From 43980a9c0be81add89554f516017a8164cba8bbd Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Thu, 27 Jun 2024 18:40:25 +0200 Subject: [PATCH 001/170] Adds additional gamma rewrites to merge-gamma (#527) --- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 177 ++++++++++++++++++++- 1 file changed, 176 insertions(+), 1 deletion(-) diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index a555ddeb0..5cdcc72d7 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -21,6 +21,176 @@ merge_gamma(llvm::RvsdgModule & rm) merge_gamma(root); } +bool +eliminate_gamma_ctl(rvsdg::gamma_node * gamma) +{ + // eliminates gammas that just replicate the ctl input + bool changed = false; + for (size_t i = 0; i < gamma->noutputs(); ++i) + { + auto o = gamma->output(i); + if (dynamic_cast(&o->type())) + { + bool eliminate = true; + for (size_t j = 0; j < gamma->nsubregions(); ++j) + { + auto r = gamma->subregion(j)->result(i); + if (auto so = dynamic_cast(r->origin())) + { + if (auto ctl = dynamic_cast(&so->node()->operation())) + { + if (j == ctl->value().alternative()) + { + continue; + } + } + } + eliminate = false; + } + if (eliminate) + { + if (o->nusers()) + { + o->divert_users(gamma->predicate()->origin()); + changed = true; + } + } + } + } + return changed; +} + +bool +fix_match_inversion(rvsdg::gamma_node * old_gamma) +{ + // inverts match and swaps regions for gammas that contain swapped control constants + if (old_gamma->nsubregions() != 2) + { + return false; + } + bool swapped = false; + size_t ctl_cnt = 0; + for (size_t i = 0; i < old_gamma->noutputs(); ++i) + { + auto o = old_gamma->output(i); + if (dynamic_cast(&o->type())) + { + ctl_cnt++; + swapped = true; + for (size_t j = 0; j < old_gamma->nsubregions(); ++j) + { + auto r = old_gamma->subregion(j)->result(i); + if (auto so = dynamic_cast(r->origin())) + { + if (auto ctl = dynamic_cast(&so->node()->operation())) + { + if (j != ctl->value().alternative()) + { + continue; + } + } + } + swapped = false; + } + } + } + if (ctl_cnt != 1 || !swapped) + { + return false; + } + if (auto no = dynamic_cast(old_gamma->predicate()->origin())) + { + if (no->nusers() != 1) + { + return false; + } + if (auto match = dynamic_cast(&no->node()->operation())) + { + if (match->nalternatives() == 2) + { + uint64_t default_alternative = match->default_alternative() ? 0 : 1; + rvsdg::match_op op( + match->nbits(), + { { 0, match->alternative(1) }, { 1, match->alternative(0) } }, + default_alternative, + match->nalternatives()); + auto new_match = rvsdg::simple_node::create_normalized( + no->region(), + op, + { no->node()->input(0)->origin() })[0]; + auto new_gamma = rvsdg::gamma_node::create(new_match, match->nalternatives()); + rvsdg::substitution_map rmap0; // subregion 0 of the new gamma - 1 of the old + rvsdg::substitution_map rmap1; + for (auto oev = old_gamma->begin_entryvar(); oev != old_gamma->end_entryvar(); oev++) + { + auto nev = new_gamma->add_entryvar(oev->origin()); + rmap0.insert(oev->argument(1), nev->argument(0)); + rmap1.insert(oev->argument(0), nev->argument(1)); + } + /* copy subregions */ + old_gamma->subregion(0)->copy(new_gamma->subregion(1), rmap1, false, false); + old_gamma->subregion(1)->copy(new_gamma->subregion(0), rmap0, false, false); + + for (auto oex = old_gamma->begin_exitvar(); oex != old_gamma->end_exitvar(); oex++) + { + std::vector operands; + operands.push_back(rmap0.lookup(oex->result(1)->origin())); + operands.push_back(rmap1.lookup(oex->result(0)->origin())); + auto nex = new_gamma->add_exitvar(operands); + oex.output()->divert_users(nex); + } + remove(old_gamma); + remove(no->node()); + return true; + } + } + } + return false; +} + +bool +eliminate_gamma_eol(rvsdg::gamma_node * gamma) +{ + // eliminates gammas that are only active at the end of the loop and have unused outputs + // seems to be mostly loop variables + auto theta = dynamic_cast(gamma->region()->node()); + if (!theta || theta->predicate()->origin() != gamma->predicate()->origin()) + { + return false; + } + if (gamma->nsubregions() != 2) + { + return false; + } + bool changed = false; + for (size_t i = 0; i < gamma->noutputs(); ++i) + { + auto o = gamma->output(i); + if (o->nusers() != 1) + { + continue; + } + auto user = *o->begin(); + if (auto res = dynamic_cast(user)) + { + if (res->output() && res->output()->nusers() == 0) + { + // continue loop subregion + if (auto arg = dynamic_cast(gamma->subregion(1)->result(i)->origin())) + { + // value is just passed through + if (o->nusers()) + { + o->divert_users(arg->input()->origin()); + changed = true; + } + } + } + } + } + return changed; +} + void merge_gamma(jlm::rvsdg::region * region) { @@ -36,7 +206,12 @@ merge_gamma(jlm::rvsdg::region * region) merge_gamma(structnode->subregion(n)); if (auto gamma = dynamic_cast(node)) { - changed = changed != merge_gamma(gamma); + if (fix_match_inversion(gamma) || eliminate_gamma_ctl(gamma) || eliminate_gamma_eol(gamma) + || merge_gamma(gamma)) + { + changed = true; + break; + } } } } From 2ff5ec38c636b2a2084b25cdf62ccdd0c8447dc4 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 30 Jun 2024 10:26:38 +0200 Subject: [PATCH 002/170] Make state gate independent of load user (#532) For the distributed memory disambiguation --- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index bfed06a3e..8384093b0 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -362,7 +362,7 @@ separate_load_edge( auto mem_sg_out = jlm::hls::state_gate_op::create(*new_load_outputs[0], { mem_edge }); mem_edge = mem_sg_out[1]; - sn->output(0)->divert_users(mem_sg_out[0]); + sn->output(0)->divert_users(new_load_outputs[0]); si->divert_to(addr_edge); sn->output(1)->divert_users(mem_edge); remove(sn); From 2d1606c57f3afbcf1e7b66cc8ea106e64a0aa3ce Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 30 Jun 2024 10:48:23 +0200 Subject: [PATCH 003/170] Removed dumping RVSDG as XML to file (#530) Removed statements that have been used for debugging. --- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 ---- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 2 -- 2 files changed, 6 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index fcd349609..67585a3c3 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -705,10 +705,6 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) // Remove imports for decouple_ function pointers dne(newLambda->subregion()); - auto xmlFile = fopen("before_remove_unused_state.rvsdg", "w"); - jlm::rvsdg::view_xml(root, xmlFile); - fclose(xmlFile); - // // TODO // Remove unused state also creates a new lambda, which we have already done above. diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index a08bff26c..942726afc 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -338,9 +338,7 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) continue; } inline_calls(ln->subregion()); - dump_xml(rm, "post_inline.rvsdg"); split_opt(rm); - dump_xml(rm, "post_opt.rvsdg"); // convert_alloca(ln->subregion()); jlm::rvsdg::substitution_map smap; for (size_t i = 0; i < ln->ninputs(); ++i) From bb36b2cc2c74657bff1eead3dbe4b16ce8b5b95b Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 30 Jun 2024 13:55:57 +0200 Subject: [PATCH 004/170] Checking loop variable in branch_op == (#531) --- jlm/hls/ir/hls.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index d9e4aebb2..700a4a744 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -39,7 +39,7 @@ class branch_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(0).type() == argument(0).type() + return ot && ot->loop == loop && ot->argument(0).type() == argument(0).type() && ot->result(0).type() == result(0).type(); } From feb7a6c60627a49bff98b42c5540eabb420a83fa Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 1 Jul 2024 10:17:32 +0200 Subject: [PATCH 005/170] Add parallel threads configuration to run-hls-test.sh (#534) The test now runs in parallel by default. Also bumps the hls-test-suite to the latest commit. --- scripts/run-hls-test.sh | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index dbeacdfcf..e8d8149a5 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=ffd3191dc3046cc54618fc4a43cc656eda755a69 +GIT_COMMIT=806876decd60a91cc6ec4773b6edeca20156f528 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" @@ -17,6 +17,9 @@ BENCHMARK_RUN_TARGET=run # We assume that the firtool is in the PATH FIRTOOL=firtool +# Execute benchmarks in parallel by default +PARALLEL_THREADS=`nproc` + function commit() { echo ${GIT_COMMIT} @@ -27,9 +30,11 @@ function usage() echo "Usage: ./run-hls-test.sh [OPTION] [VAR=VALUE]" echo "" echo " --benchmark-path PATH The path where to place the HLS test suite." - echo " [${BENCHMARK_DIR}]" + echo " Default=[${BENCHMARK_DIR}]" echo " --firtool COMMAND The command for running firtool, which can include a path." - echo " [${FIRTOOL}]" + echo " Default=[${FIRTOOL}]" + echo " --parallel #THREADS The number of threads to run in parallel." + echo " Default=[${PARALLEL_THREADS}]" echo " --get-commit-hash Prints the commit hash used for the build." echo " --help Prints this message and stops." } @@ -46,6 +51,11 @@ while [[ "$#" -ge 1 ]] ; do FIRTOOL=$(readlink -m "$1") shift ;; + --parallel) + shift + PARALLEL_THREADS=$1 + shift + ;; --get-commit-hash) commit >&2 exit 1 @@ -83,4 +93,5 @@ export PATH=${JLM_BIN_DIR}:${PATH} cd ${BENCHMARK_DIR} git checkout ${GIT_COMMIT} make clean -make FIRTOOL=${FIRTOOL} ${BENCHMARK_RUN_TARGET} +echo "make -j ${PARALLEL_THREADS} -O FIRTOOL=${FIRTOOL} ${BENCHMARK_RUN_TARGET}" +make -j ${PARALLEL_THREADS} -O FIRTOOL=${FIRTOOL} ${BENCHMARK_RUN_TARGET} From 7d8e20f46bbf03635df9c7f6dfbd695b079cd1f8 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 9 Jul 2024 21:27:15 +0200 Subject: [PATCH 006/170] Add hash method for types (#535) The PR does the following: 1. Adds an abstract `ComputeHash()` method to the type class. 2. Implements the method for all subclasses of the type class. 3. Moves hashing related classes and methods from `HashSet.hpp` to `Hash.hpp` 4. Adds support functions for hashing to `Hash.hpp` --- jlm/hls/ir/hls.cpp | 20 ++++++++ jlm/hls/ir/hls.hpp | 8 +++- jlm/llvm/ir/types.cpp | 88 ++++++++++++++++++++++++++++++++++++ jlm/llvm/ir/types.hpp | 30 ++++++++++++ jlm/rvsdg/bitstring/type.cpp | 9 ++++ jlm/rvsdg/bitstring/type.hpp | 3 ++ jlm/rvsdg/control.cpp | 9 ++++ jlm/rvsdg/control.hpp | 3 ++ jlm/rvsdg/type.hpp | 8 ++++ jlm/util/Hash.hpp | 72 +++++++++++++++++++++++++++++ jlm/util/HashSet.hpp | 23 ++-------- jlm/util/Makefile.sub | 1 + tests/test-types.cpp | 14 ++++++ tests/test-types.hpp | 6 +++ 14 files changed, 272 insertions(+), 22 deletions(-) create mode 100644 jlm/util/Hash.hpp diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 141827a61..528aab089 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -4,10 +4,17 @@ */ #include +#include namespace jlm::hls { +std::size_t +triggertype::ComputeHash() const noexcept +{ + return typeid(triggertype).hash_code(); +} + std::shared_ptr triggertype::Create() { @@ -15,6 +22,19 @@ triggertype::Create() return std::shared_ptr(std::shared_ptr(), &instance); } +std::size_t +bundletype::ComputeHash() const noexcept +{ + std::size_t seed = typeid(bundletype).hash_code(); + for (auto & element : elements_) + { + auto firstHash = std::hash()(element.first); + util::CombineHashesWithSeed(seed, firstHash, element.second->ComputeHash()); + } + + return seed; +} + jlm::rvsdg::structural_output * loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 700a4a744..20a6e7466 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -419,10 +419,11 @@ class triggertype final : public jlm::rvsdg::statetype return type; }; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + static std::shared_ptr Create(); - -private: }; class trigger_op final : public jlm::rvsdg::simple_op @@ -705,6 +706,9 @@ class bundletype final : public jlm::rvsdg::valuetype return true; }; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + std::shared_ptr get_element_type(std::string element) const { diff --git a/jlm/llvm/ir/types.cpp b/jlm/llvm/ir/types.cpp index bdd07b015..5a87fae8c 100644 --- a/jlm/llvm/ir/types.cpp +++ b/jlm/llvm/ir/types.cpp @@ -4,6 +4,7 @@ */ #include +#include #include #include @@ -78,6 +79,26 @@ FunctionType::operator==(const jlm::rvsdg::type & _other) const noexcept return true; } +std::size_t +FunctionType::ComputeHash() const noexcept +{ + std::size_t seed = typeid(FunctionType).hash_code(); + + util::CombineHashesWithSeed(seed, NumArguments()); + for (auto argumentType : ArgumentTypes_) + { + util::CombineHashesWithSeed(seed, argumentType->ComputeHash()); + } + + util::CombineHashesWithSeed(seed, NumResults()); + for (auto resultType : ResultTypes_) + { + util::CombineHashesWithSeed(seed, resultType->ComputeHash()); + } + + return seed; +} + FunctionType & FunctionType::operator=(const FunctionType & rhs) = default; @@ -111,6 +132,12 @@ PointerType::operator==(const jlm::rvsdg::type & other) const noexcept return jlm::rvsdg::is(other); } +std::size_t +PointerType::ComputeHash() const noexcept +{ + return typeid(PointerType).hash_code(); +} + std::shared_ptr PointerType::Create() { @@ -136,6 +163,14 @@ arraytype::operator==(const jlm::rvsdg::type & other) const noexcept return type && type->element_type() == element_type() && type->nelements() == nelements(); } +std::size_t +arraytype::ComputeHash() const noexcept +{ + auto typeHash = typeid(arraytype).hash_code(); + auto numElementsHash = std::hash()(nelements_); + return util::CombineHashes(typeHash, type_->ComputeHash(), numElementsHash); +} + /* floating point type */ fptype::~fptype() @@ -160,6 +195,15 @@ fptype::operator==(const jlm::rvsdg::type & other) const noexcept return type && type->size() == size(); } +std::size_t +fptype::ComputeHash() const noexcept +{ + auto typeHash = typeid(fptype).hash_code(); + auto sizeHash = std::hash()(size_); + + return util::CombineHashes(typeHash, sizeHash); +} + std::shared_ptr fptype::Create(fpsize size) { @@ -203,6 +247,12 @@ varargtype::operator==(const jlm::rvsdg::type & other) const noexcept return dynamic_cast(&other) != nullptr; } +std::size_t +varargtype::ComputeHash() const noexcept +{ + return typeid(varargtype).hash_code(); +} + std::string varargtype::debug_string() const { @@ -226,6 +276,16 @@ StructType::operator==(const jlm::rvsdg::type & other) const noexcept && &type->Declaration_ == &Declaration_; } +std::size_t +StructType::ComputeHash() const noexcept +{ + auto typeHash = typeid(StructType).hash_code(); + auto isPackedHash = std::hash()(IsPacked_); + auto nameHash = std::hash()(Name_); + auto declarationHash = std::hash()(&Declaration_); + return util::CombineHashes(typeHash, isPackedHash, nameHash, declarationHash); +} + std::string StructType::debug_string() const { @@ -252,6 +312,14 @@ fixedvectortype::operator==(const jlm::rvsdg::type & other) const noexcept return vectortype::operator==(other); } +std::size_t +fixedvectortype::ComputeHash() const noexcept +{ + auto typeHash = typeid(fixedvectortype).hash_code(); + auto sizeHash = std::hash()(size()); + return util::CombineHashes(typeHash, sizeHash, Type()->ComputeHash()); +} + std::string fixedvectortype::debug_string() const { @@ -269,6 +337,14 @@ scalablevectortype::operator==(const jlm::rvsdg::type & other) const noexcept return vectortype::operator==(other); } +std::size_t +scalablevectortype::ComputeHash() const noexcept +{ + auto typeHash = typeid(scalablevectortype).hash_code(); + auto sizeHash = std::hash()(size()); + return util::CombineHashes(typeHash, sizeHash, Type()->ComputeHash()); +} + std::string scalablevectortype::debug_string() const { @@ -286,6 +362,12 @@ iostatetype::operator==(const jlm::rvsdg::type & other) const noexcept return jlm::rvsdg::is(other); } +std::size_t +iostatetype::ComputeHash() const noexcept +{ + return typeid(iostatetype).hash_code(); +} + std::string iostatetype::debug_string() const { @@ -316,6 +398,12 @@ MemoryStateType::operator==(const jlm::rvsdg::type & other) const noexcept return jlm::rvsdg::is(other); } +std::size_t +MemoryStateType::ComputeHash() const noexcept +{ + return typeid(MemoryStateType).hash_code(); +} + std::shared_ptr MemoryStateType::Create() { diff --git a/jlm/llvm/ir/types.hpp b/jlm/llvm/ir/types.hpp index 37af0a320..12f985a1f 100644 --- a/jlm/llvm/ir/types.hpp +++ b/jlm/llvm/ir/types.hpp @@ -76,6 +76,9 @@ class FunctionType final : public jlm::rvsdg::valuetype bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + static std::shared_ptr Create( std::vector> argumentTypes, @@ -103,6 +106,9 @@ class PointerType final : public jlm::rvsdg::valuetype bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + static std::shared_ptr Create(); }; @@ -136,6 +142,9 @@ class arraytype final : public jlm::rvsdg::valuetype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + inline size_t nelements() const noexcept { @@ -191,6 +200,9 @@ class fptype final : public jlm::rvsdg::valuetype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + inline const fpsize & size() const noexcept { @@ -218,6 +230,9 @@ class varargtype final : public jlm::rvsdg::statetype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + virtual std::string debug_string() const override; @@ -298,6 +313,9 @@ class StructType final : public jlm::rvsdg::valuetype bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + [[nodiscard]] std::string debug_string() const override; @@ -437,6 +455,9 @@ class fixedvectortype final : public vectortype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + virtual std::string debug_string() const override; @@ -459,6 +480,9 @@ class scalablevectortype final : public vectortype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + virtual std::string debug_string() const override; @@ -484,6 +508,9 @@ class iostatetype final : public jlm::rvsdg::statetype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + virtual std::string debug_string() const override; @@ -511,6 +538,9 @@ class MemoryStateType final : public jlm::rvsdg::statetype bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + static std::shared_ptr Create(); }; diff --git a/jlm/rvsdg/bitstring/type.cpp b/jlm/rvsdg/bitstring/type.cpp index e29f91d83..5bc2483e8 100644 --- a/jlm/rvsdg/bitstring/type.cpp +++ b/jlm/rvsdg/bitstring/type.cpp @@ -6,6 +6,7 @@ #include #include +#include namespace jlm::rvsdg { @@ -28,6 +29,14 @@ bittype::operator==(const jlm::rvsdg::type & other) const noexcept return type != nullptr && this->nbits() == type->nbits(); } +std::size_t +bittype::ComputeHash() const noexcept +{ + auto typeHash = typeid(bittype).hash_code(); + auto numBitsHash = std::hash()(nbits_); + return util::CombineHashes(typeHash, numBitsHash); +} + std::shared_ptr bittype::Create(std::size_t nbits) { diff --git a/jlm/rvsdg/bitstring/type.hpp b/jlm/rvsdg/bitstring/type.hpp index 4fcf19da8..22be1f218 100644 --- a/jlm/rvsdg/bitstring/type.hpp +++ b/jlm/rvsdg/bitstring/type.hpp @@ -36,6 +36,9 @@ class bittype final : public jlm::rvsdg::valuetype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + /** * \brief Creates bit type of specified width * diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index cae5fcee2..da9e6a789 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -6,6 +6,7 @@ #include #include +#include namespace jlm::rvsdg { @@ -38,6 +39,14 @@ ctltype::operator==(const jlm::rvsdg::type & other) const noexcept return type && type->nalternatives_ == nalternatives_; } +std::size_t +ctltype::ComputeHash() const noexcept +{ + auto typeHash = typeid(ctltype).hash_code(); + auto numAlternativesHash = std::hash()(nalternatives_); + return util::CombineHashes(typeHash, numAlternativesHash); +} + std::shared_ptr ctltype::Create(std::size_t nalternatives) { diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index c9f772c16..7cef57853 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -35,6 +35,9 @@ class ctltype final : public jlm::rvsdg::statetype virtual bool operator==(const jlm::rvsdg::type & other) const noexcept override; + std::size_t + ComputeHash() const noexcept override; + inline size_t nalternatives() const noexcept { diff --git a/jlm/rvsdg/type.hpp b/jlm/rvsdg/type.hpp index bd1e978e8..06e452095 100644 --- a/jlm/rvsdg/type.hpp +++ b/jlm/rvsdg/type.hpp @@ -34,6 +34,14 @@ class type virtual std::string debug_string() const = 0; + + /** + * Computes a hash value for the instance of the type. + * + * @return A hash value. + */ + [[nodiscard]] virtual std::size_t + ComputeHash() const noexcept = 0; }; class valuetype : public jlm::rvsdg::type diff --git a/jlm/util/Hash.hpp b/jlm/util/Hash.hpp new file mode 100644 index 000000000..5c3398ec0 --- /dev/null +++ b/jlm/util/Hash.hpp @@ -0,0 +1,72 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_UTIL_HASH_HPP +#define JLM_UTIL_HASH_HPP + +#include +#include + +namespace jlm::util +{ + +/** + * Our own version of std::hash that also supports hashing std::pair + */ +template +struct Hash : std::hash +{ +}; + +template +struct Hash> +{ + std::size_t + operator()(const std::pair & value) const noexcept + { + return Hash()(value.first) ^ Hash()(value.second) << 1; + } +}; + +/** + * Combines multiple hash values given a seed value. + * + * @tparam Args The type of the hash values, i.e., std::size_t. + * @param seed The seed value. It contains the combined hash values after the function invocation. + * @param hash The first hash value. + * @param args The other hash values. + * + * @see CombineHashes + */ +template +void +CombineHashesWithSeed(std::size_t & seed, std::size_t hash, Args... args) +{ + seed ^= hash + 0x9e3779b9 + (seed << 6) + (seed >> 2); + (CombineHashesWithSeed(seed, args), ...); +} + +/** + * Combines multiple hash values with the seed value 0. + * + * @tparam Args The type of the hash values, i.e, std::size_t. + * @param hash The first hash value. + * @param args The other hash values. + * @return The combined hash values. + * + * @see CombineHashesWithSeed + */ +template +std::size_t +CombineHashes(std::size_t hash, Args... args) +{ + std::size_t seed = 0; + CombineHashesWithSeed(seed, hash, std::forward(args)...); + return seed; +} + +} + +#endif diff --git a/jlm/util/HashSet.hpp b/jlm/util/HashSet.hpp index a061e95cb..06abaeb89 100644 --- a/jlm/util/HashSet.hpp +++ b/jlm/util/HashSet.hpp @@ -6,6 +6,7 @@ #ifndef JLM_UTIL_HASHSET_HPP #define JLM_UTIL_HASHSET_HPP +#include #include #include @@ -14,26 +15,8 @@ namespace jlm::util { /** - * Our own version of std::hash that also supports hashing std::pair - */ -template -struct Hash : std::hash -{ -}; - -template -struct Hash> -{ - std::size_t - operator()(const std::pair & value) const noexcept - { - return std::hash()(value.first) ^ std::hash()(value.second) << 1; - } -}; - -/** - * Represents a set of values. A set is a collection that contains no duplicate elements, and whose - * elements are in no particular order. + * Represents a set of values. A set is a collection that contains no duplicate elements, and + * whose elements are in no particular order. * @tparam ItemType The type of the items in the hash set. */ template> diff --git a/jlm/util/Makefile.sub b/jlm/util/Makefile.sub index 8b3a97cd9..2e1148337 100644 --- a/jlm/util/Makefile.sub +++ b/jlm/util/Makefile.sub @@ -11,6 +11,7 @@ libutil_HEADERS = \ jlm/util/disjointset.hpp \ jlm/util/file.hpp \ jlm/util/GraphWriter.hpp \ + jlm/util/Hash.hpp \ jlm/util/HashSet.hpp \ jlm/util/intrusive-hash.hpp \ jlm/util/intrusive-list.hpp \ diff --git a/tests/test-types.cpp b/tests/test-types.cpp index acf03fba9..9ef6319b0 100644 --- a/tests/test-types.cpp +++ b/tests/test-types.cpp @@ -5,6 +5,8 @@ #include "test-types.hpp" +#include + namespace jlm::tests { @@ -25,6 +27,12 @@ valuetype::operator==(const rvsdg::type & other) const noexcept return dynamic_cast(&other) != nullptr; } +std::size_t +valuetype::ComputeHash() const noexcept +{ + return typeid(valuetype).hash_code(); +} + std::shared_ptr valuetype::Create() { @@ -49,6 +57,12 @@ statetype::operator==(const rvsdg::type & other) const noexcept return dynamic_cast(&other) != nullptr; } +std::size_t +statetype::ComputeHash() const noexcept +{ + return typeid(statetype).hash_code(); +} + std::shared_ptr statetype::Create() { diff --git a/tests/test-types.hpp b/tests/test-types.hpp index 4d4abbac7..7fbb02dab 100644 --- a/tests/test-types.hpp +++ b/tests/test-types.hpp @@ -26,6 +26,9 @@ class valuetype final : public rvsdg::valuetype virtual bool operator==(const rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + static std::shared_ptr Create(); }; @@ -45,6 +48,9 @@ class statetype final : public rvsdg::statetype virtual bool operator==(const rvsdg::type & other) const noexcept override; + [[nodiscard]] std::size_t + ComputeHash() const noexcept override; + static std::shared_ptr Create(); }; From b35a9f013c248aae7e2d06e358132a9c430a732b Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 9 Jul 2024 21:56:55 +0200 Subject: [PATCH 007/170] Remove outdated is_gamma_output() function (#538) --- jlm/llvm/ir/operators/call.cpp | 4 ++-- jlm/llvm/ir/operators/gamma.hpp | 9 --------- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/rvsdg/gamma.hpp | 6 ------ 5 files changed, 4 insertions(+), 19 deletions(-) diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index e7bd76c18..ab9195829 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -179,9 +179,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) continue; } - if (auto output = is_gamma_output(origin)) + if (auto gammaOutput = dynamic_cast(origin)) { - if (auto input = invariantInput(*output)) + if (auto input = invariantInput(*gammaOutput)) { origin = input->origin(); continue; diff --git a/jlm/llvm/ir/operators/gamma.hpp b/jlm/llvm/ir/operators/gamma.hpp index a8445f31e..9935fb348 100644 --- a/jlm/llvm/ir/operators/gamma.hpp +++ b/jlm/llvm/ir/operators/gamma.hpp @@ -26,15 +26,6 @@ is_gamma_argument(const rvsdg::output * output) return nullptr; } -/* - FIXME: This function exists in librvsdg, but is currently (2020-05-19) broken. -*/ -static inline const rvsdg::gamma_output * -is_gamma_output(const rvsdg::output * output) -{ - return dynamic_cast(output); -} - /* FIXME: This should be defined in librvsdg. */ diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 750b3daf4..9f82515df 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -203,7 +203,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto gammaOutput = is_gamma_output(&output)) + if (auto gammaOutput = dynamic_cast(&output)) { MarkOutput(*gammaOutput->node()->predicate()->origin()); for (const auto & result : gammaOutput->results) diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 2ba1aa916..2bf4c9a95 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -243,7 +243,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":out", index); } - if (is_gamma_output(Output_)) + if (is(Output_)) { auto dbgstr = jlm::rvsdg::node_output::node(Output_)->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index daaa8cb96..2fb419f51 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -456,12 +456,6 @@ class gamma_output final : public structural_output IsInvariant(rvsdg::output ** invariantOrigin = nullptr) const noexcept; }; -static inline bool -is_gamma_output(const jlm::rvsdg::input * input) noexcept -{ - return dynamic_cast(input) != nullptr; -} - /* gamma node method definitions */ inline gamma_node::gamma_node(jlm::rvsdg::output * predicate, size_t nalternatives) From 3cb440c28fc615aeb2794eaa66ce0fa0707a2dd4 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 10 Jul 2024 09:49:19 +0200 Subject: [PATCH 008/170] Build scripts fix for --get-commit-hash (#542) Changed --get-commit-hash to use 'exit 0' and print to stdout instead of stderr. --- scripts/build-circt.sh | 4 ++-- scripts/build-mlir.sh | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/build-circt.sh b/scripts/build-circt.sh index 7a113835f..47d77bfdb 100755 --- a/scripts/build-circt.sh +++ b/scripts/build-circt.sh @@ -52,8 +52,8 @@ while [[ "$#" -ge 1 ]] ; do shift ;; --get-commit-hash) - commit >&2 - exit 1 + commit >&1 + exit 0 ;; --help) usage >&2 diff --git a/scripts/build-mlir.sh b/scripts/build-mlir.sh index f60115d7f..80b453ec8 100755 --- a/scripts/build-mlir.sh +++ b/scripts/build-mlir.sh @@ -44,8 +44,8 @@ while [[ "$#" -ge 1 ]] ; do shift ;; --get-commit-hash) - commit >&2 - exit 1 + commit >&1 + exit 0 ;; --help) usage >&2 From 35f3d7eca232f8b26cc9a178efcddaf20f16067e Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 10 Jul 2024 10:25:01 +0200 Subject: [PATCH 009/170] Build reduced version of CIRCT (#540) jlm relies on the FIRRTL to Verilog Dialects in CIRCT. So switched to a CIRCT version that compiles a reduced set of Dialects. --- scripts/build-circt.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/scripts/build-circt.sh b/scripts/build-circt.sh index 47d77bfdb..dee85a7ff 100755 --- a/scripts/build-circt.sh +++ b/scripts/build-circt.sh @@ -1,7 +1,7 @@ #!/bin/bash set -eu -GIT_COMMIT=cc32a6c6702a5fe0ade64ee906b8d80c45d48f94 +GIT_COMMIT=debf1ed774c2bbdbfc8e7bc987a21f72e8f08f65 # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" @@ -84,6 +84,4 @@ cmake -G Ninja \ -DVERILATOR_DISABLE=ON \ -DCMAKE_INSTALL_PREFIX=${CIRCT_INSTALL} ninja -C ${CIRCT_BUILD_DIR} -# Dialects other than FIRRTL fail the check -#ninja -C ${CIRCT_BUILD_DIR} check-circt ninja -C ${CIRCT_BUILD_DIR} install From ca926bc8504b37d242a878e24498b165a3def6bd Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 10 Jul 2024 10:47:15 +0200 Subject: [PATCH 010/170] Always save CIRCT and MLIR builds (#543) Enables CIRCT and MLIR builds to always be saved if their build succeeds but the workflow they are part of fails. This eliminates the need to rebuild CIRCT and MLIR for CIs that fails. --- .github/actions/BuildCirct/action.yml | 1 + .github/actions/BuildMlirDialect/action.yml | 1 + 2 files changed, 2 insertions(+) diff --git a/.github/actions/BuildCirct/action.yml b/.github/actions/BuildCirct/action.yml index f02c89ada..397d9d244 100644 --- a/.github/actions/BuildCirct/action.yml +++ b/.github/actions/BuildCirct/action.yml @@ -14,6 +14,7 @@ runs: id: cache-circt uses: actions/cache@v4 with: + save-always: true path: | ${{ github.workspace }}/build-circt/circt key: ${{ runner.os }}-circt-${{ steps.get-circt-hash.outputs.hash }} diff --git a/.github/actions/BuildMlirDialect/action.yml b/.github/actions/BuildMlirDialect/action.yml index 71ad124ad..2110878a2 100644 --- a/.github/actions/BuildMlirDialect/action.yml +++ b/.github/actions/BuildMlirDialect/action.yml @@ -19,6 +19,7 @@ runs: id: cache-mlir uses: actions/cache@v4 with: + save-always: true path: | ${{ github.workspace }}/lib/mlir-rvsdg key: ${{ runner.os }}-mlir-${{ steps.get-mlir-hash.outputs.hash }} From 39b3b73ca25e85057c1de8f662167c17955e45ec Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 10 Jul 2024 13:35:03 +0200 Subject: [PATCH 011/170] HLS: Special fork for constants (#478) Adds support for forks that have a constant as input, provides unit tests, and documentation. --- jlm/hls/Makefile.sub | 1 + .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 142 ++++++++------ jlm/hls/backend/rvsdg2rhls/add-forks.cpp | 12 +- jlm/hls/backend/rvsdg2rhls/add-forks.hpp | 14 +- jlm/hls/ir/hls.hpp | 72 +++++++- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 173 ++++++++++++++++++ 6 files changed, 344 insertions(+), 70 deletions(-) create mode 100644 tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index 8e5f6f519..880992779 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -74,6 +74,7 @@ libhls_HEADERS = \ libhls_TESTS += \ tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests \ + tests/jlm/hls/backend/rvsdg2rhls/TestFork \ tests/jlm/hls/backend/rvsdg2rhls/TestGamma \ tests/jlm/hls/backend/rvsdg2rhls/TestTheta \ tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests \ diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 17c865ede..702c92acf 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -275,6 +275,11 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto input0 = GetSubfield(body, inBundles[0], "data"); Connect(body, outData, input0); } + else if (dynamic_cast(&(node->operation()))) + { + auto input0 = GetSubfield(body, inBundles[0], "data"); + Connect(body, outData, input0); + } else if (auto op = dynamic_cast(&(node->operation()))) { auto inData = GetSubfield(body, inBundles[0], "data"); @@ -462,6 +467,8 @@ RhlsToFirrtlConverter::MlirGenLoopConstBuffer(const jlm::rvsdg::simple_node * no circt::firrtl::FModuleOp RhlsToFirrtlConverter::MlirGenFork(const jlm::rvsdg::simple_node * node) { + auto op = dynamic_cast(&node->operation()); + bool isConstant = op->IsConstant(); // Create the module and its input/output ports auto module = nodeToModule(node); auto body = module.getBodyBlock(); @@ -472,71 +479,88 @@ RhlsToFirrtlConverter::MlirGenFork(const jlm::rvsdg::simple_node * node) auto inValid = GetSubfield(body, inBundle, "valid"); auto inData = GetSubfield(body, inBundle, "data"); - // - // Output registers - // - auto clock = GetClockSignal(module); - auto reset = GetResetSignal(module); - ::llvm::SmallVector firedRegs; - ::llvm::SmallVector whenConditions; auto oneBitValue = GetConstant(body, 1, 1); auto zeroBitValue = GetConstant(body, 1, 0); - // outputs can only fire if input is valid. This should not be necessary, unless other components - // misbehave - mlir::Value allFired = inValid; - for (size_t i = 0; i < node->noutputs(); ++i) - { - std::string validName("out"); - validName.append(std::to_string(i)); - validName.append("_fired_reg"); - auto firedReg = Builder_->create( - Builder_->getUnknownLoc(), - GetIntType(1), - clock, - reset, - zeroBitValue, - Builder_->getStringAttr(validName)); - body->push_back(firedReg); - firedRegs.push_back(firedReg); - // Get the bundle - auto port = GetOutPort(module, i); - auto portReady = GetSubfield(body, port, "ready"); - auto portValid = GetSubfield(body, port, "valid"); - auto portData = GetSubfield(body, port, "data"); - - auto notFiredReg = AddNotOp(body, firedReg.getResult()); - auto andOp = AddAndOp(body, inValid, notFiredReg); - Connect(body, portValid, andOp); - Connect(body, portData, inData); - - auto orOp = AddOrOp(body, portReady, firedReg.getResult()); - allFired = AddAndOp(body, allFired, orOp); - - // Conditions needed for the when statements - whenConditions.push_back(AddAndOp(body, portReady, portValid)); - } - allFired = AddNodeOp(body, allFired, "all_fired")->getResult(0); - Connect(body, inReady, allFired); - - // When statement - auto condition = AddNotOp(body, allFired); - auto whenOp = AddWhenOp(body, condition, true); - // getThenBlock() cause an error during commpilation - // So we first get the builder and then its associated body - auto thenBody = whenOp.getThenBodyBuilder().getBlock(); - // Then region - for (size_t i = 0; i < node->noutputs(); i++) + // + // Output registers + // + if (isConstant) { - auto nestedWhen = AddWhenOp(thenBody, whenConditions[i], false); - auto nestedBody = nestedWhen.getThenBodyBuilder().getBlock(); - Connect(nestedBody, firedRegs[i].getResult(), oneBitValue); + Connect(body, inReady, oneBitValue); + for (size_t i = 0; i < node->noutputs(); ++i) + { + // Get the bundle + auto port = GetOutPort(module, i); + auto portValid = GetSubfield(body, port, "valid"); + auto portData = GetSubfield(body, port, "data"); + Connect(body, portValid, inValid); + Connect(body, portData, inData); + } } - // Else region - auto elseBody = whenOp.getElseBodyBuilder().getBlock(); - for (size_t i = 0; i < node->noutputs(); i++) + else { - Connect(elseBody, firedRegs[i].getResult(), zeroBitValue); + auto clock = GetClockSignal(module); + auto reset = GetResetSignal(module); + ::llvm::SmallVector firedRegs; + ::llvm::SmallVector whenConditions; + // outputs can only fire if input is valid. This should not be necessary, unless other + // components misbehave + mlir::Value allFired = inValid; + for (size_t i = 0; i < node->noutputs(); ++i) + { + std::string validName("out"); + validName.append(std::to_string(i)); + validName.append("_fired_reg"); + auto firedReg = Builder_->create( + Builder_->getUnknownLoc(), + GetIntType(1), + clock, + reset, + zeroBitValue, + Builder_->getStringAttr(validName)); + body->push_back(firedReg); + firedRegs.push_back(firedReg); + + // Get the bundle + auto port = GetOutPort(module, i); + auto portReady = GetSubfield(body, port, "ready"); + auto portValid = GetSubfield(body, port, "valid"); + auto portData = GetSubfield(body, port, "data"); + + auto notFiredReg = AddNotOp(body, firedReg.getResult()); + auto andOp = AddAndOp(body, inValid, notFiredReg.getResult()); + Connect(body, portValid, andOp); + Connect(body, portData, inData); + + auto orOp = AddOrOp(body, portReady, firedReg.getResult()); + allFired = AddAndOp(body, allFired, orOp); + + // Conditions needed for the when statements + whenConditions.push_back(AddAndOp(body, portReady, portValid)); + } + allFired = AddNodeOp(body, allFired, "all_fired").getResult(); + Connect(body, inReady, allFired); + + // When statement + auto condition = AddNotOp(body, allFired); + auto whenOp = AddWhenOp(body, condition, true); + // getThenBlock() cause an error during commpilation + // So we first get the builder and then its associated body + auto thenBody = whenOp.getThenBodyBuilder().getBlock(); + // Then region + for (size_t i = 0; i < node->noutputs(); i++) + { + auto nestedWhen = AddWhenOp(thenBody, whenConditions[i], false); + auto nestedBody = nestedWhen.getThenBodyBuilder().getBlock(); + Connect(nestedBody, firedRegs[i].getResult(), oneBitValue); + } + // Else region + auto elseBody = whenOp.getElseBodyBuilder().getBlock(); + for (size_t i = 0; i < node->noutputs(); i++) + { + Connect(elseBody, firedRegs[i].getResult(), zeroBitValue); + } } return module; diff --git a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp index 88618b256..39087dd84 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp @@ -5,6 +5,7 @@ #include #include +#include #include namespace jlm::hls @@ -36,14 +37,15 @@ add_forks(jlm::rvsdg::region * region) add_forks(structnode->subregion(n)); } } + // If a node has no inputs it is a constant + bool isConstant = node->ninputs() == 0; for (size_t i = 0; i < node->noutputs(); ++i) { auto out = node->output(i); if (out->nusers() > 1) { - std::vector users; - users.insert(users.begin(), out->begin(), out->end()); - auto fork = hls::fork_op::create(out->nusers(), *out); + std::vector users(out->begin(), out->end()); + auto fork = hls::fork_op::create(out->nusers(), *out, isConstant); for (size_t j = 0; j < users.size(); j++) { users[j]->divert_to(fork[j]); @@ -54,9 +56,9 @@ add_forks(jlm::rvsdg::region * region) } void -add_forks(llvm::RvsdgModule & rm) +add_forks(llvm::RvsdgModule & rvsdgModule) { - auto & graph = rm.Rvsdg(); + auto & graph = rvsdgModule.Rvsdg(); auto root = graph.root(); add_forks(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-forks.hpp b/jlm/hls/backend/rvsdg2rhls/add-forks.hpp index eb5dc1b6d..39d8847cb 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-forks.hpp +++ b/jlm/hls/backend/rvsdg2rhls/add-forks.hpp @@ -12,11 +12,23 @@ namespace jlm::hls { +/** + * Adds a fork for every output that has multiple consumers (node inputs). The original output is + * connected to the fork's input and each consumer is connected to one of the fork's outputs. + * + * /param region The region for which to insert forks. + */ void add_forks(rvsdg::region * region); +/** + * Adds a fork for every output that has multiple consumers (node inputs). The original output is + * connected to the fork's input and each consumer is connected to one of the fork's outputs. + * + * /param rvsdgModule The RVSDG module for which to insert forks. + */ void -add_forks(llvm::RvsdgModule & rm); +add_forks(llvm::RvsdgModule & rvsdgModule); } #endif // JLM_HLS_BACKEND_RVSDG2RHLS_ADD_FORKS_HPP diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 20a6e7466..00d07eefa 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -70,28 +70,66 @@ class branch_op final : public jlm::rvsdg::simple_op bool loop; // only used for dot output }; +/** + * Forks ensures 1-to-1 connections between producers and consumers, i.e., they handle fanout of + * signals. Normal forks have a register inside to ensure that a token consumed on one output is not + * repeated. The fork only creates an acknowledge on its single input once all outputs have been + * consumed. + * + * CFORK (constant fork): + * Handles the special case when the same constant is used as input for multiple nodes. It would be + * possible to have a constant for each input, but deduplication replaces the constants with a + * single constant fork. Since the input of the fork is always the same value and is always valid. + * No handshaking is necessary and the outputs of the fork is always valid. + */ class fork_op final : public jlm::rvsdg::simple_op { public: virtual ~fork_op() {} + /** + * Create a fork operation that is not a constant fork. + * + * /param nalternatives Number of outputs. + * /param value The signal type, which is the same for the input and all outputs. + */ fork_op(size_t nalternatives, const std::shared_ptr & type) : jlm::rvsdg::simple_op({ type }, { nalternatives, type }) {} + /** + * Create a fork operation. + * + * /param nalternatives Number of outputs. + * /param value The signal type, which is the same for the input and all outputs. + * /param isConstant If true, the fork is a constant fork. + */ + fork_op( + size_t nalternatives, + const std::shared_ptr & type, + bool isConstant) + : rvsdg::simple_op({ type }, { nalternatives, type }), + IsConstant_(isConstant) + {} + bool operator==(const jlm::rvsdg::operation & other) const noexcept override { - auto ot = dynamic_cast(&other); + auto forkOp = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(0).type() == argument(0).type() && ot->nresults() == nresults(); + return forkOp && forkOp->argument(0).type() == argument(0).type() + && forkOp->nresults() == nresults() && forkOp->IsConstant() == IsConstant_; } + /** + * Debug string for the fork operation. + * /return HLS_CFORK if the fork is a constant fork, else HLS_FORK. + */ std::string debug_string() const override { - return "HLS_FORK"; + return IsConstant() ? "HLS_CFORK" : "HLS_FORK"; } std::unique_ptr @@ -100,14 +138,38 @@ class fork_op final : public jlm::rvsdg::simple_op return std::unique_ptr(new fork_op(*this)); } + /** + * Create a fork operation with a single input and multiple outputs. + * + * /param nalternatives Number of outputs. + * /param value The signal type, which is the same for the input and all outputs. + * /param isConstant If true, the fork is a constant fork. + * + * /return A vector of outputs. + */ static std::vector - create(size_t nalternatives, jlm::rvsdg::output & value) + create(size_t nalternatives, jlm::rvsdg::output & value, bool isConstant = false) { auto region = value.region(); - fork_op op(nalternatives, value.Type()); + fork_op op(nalternatives, value.Type(), isConstant); return jlm::rvsdg::simple_node::create_normalized(region, op, { &value }); } + + /** + * Cechk if a fork is a constant fork (CFORK). + * + * /return True if the fork is a constant fork, i.e., the input of the fork is a constant, else + * false. + */ + [[nodiscard]] bool + IsConstant() const noexcept + { + return IsConstant_; + } + +private: + bool IsConstant_ = false; }; class merge_op final : public jlm::rvsdg::simple_op diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp new file mode 100644 index 000000000..9e0e4197b --- /dev/null +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -0,0 +1,173 @@ +/* + * Copyright 2024 Magnus Sjalander + * See COPYING for terms of redistribution. + */ + +#include "test-registry.hpp" + +#include +#include +#include +#include +#include + +static inline void +TestFork() +{ + using namespace jlm; + using namespace jlm::llvm; + + // Arrange + auto b32 = rvsdg::bittype::Create(32); + auto ft = FunctionType::Create({ b32, b32, b32 }, { b32, b32, b32 }); + + RvsdgModule rm(util::filepath(""), "", ""); + auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::operation)); + nf->set_mutable(false); + + auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + + rvsdg::bitult_op ult(32); + rvsdg::bitadd_op add(32); + + auto loop = hls::loop_node::create(lambda->subregion()); + auto subregion = loop->subregion(); + rvsdg::output * idvBuffer; + loop->add_loopvar(lambda->fctargument(0), &idvBuffer); + rvsdg::output * lvsBuffer; + loop->add_loopvar(lambda->fctargument(1), &lvsBuffer); + rvsdg::output * lveBuffer; + loop->add_loopvar(lambda->fctargument(2), &lveBuffer); + + auto arm = rvsdg::simple_node::create_normalized(subregion, add, { idvBuffer, lvsBuffer })[0]; + auto cmp = rvsdg::simple_node::create_normalized(subregion, ult, { arm, lveBuffer })[0]; + auto match = rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); + + loop->set_predicate(match); + + auto f = lambda->finalize({ loop->output(0), loop->output(1), loop->output(2) }); + rm.Rvsdg().add_export(f, { f->Type(), "" }); + + rvsdg::view(rm.Rvsdg(), stdout); + + // Act + hls::add_forks(rm); + rvsdg::view(rm.Rvsdg(), stdout); + + // Assert + { + auto omegaRegion = rm.Rvsdg().root(); + assert(omegaRegion->nnodes() == 1); + auto lambda = util::AssertedCast(omegaRegion->nodes.first()); + assert(is(lambda)); + + auto lambdaRegion = lambda->subregion(); + assert(lambdaRegion->nnodes() == 1); + auto loop = util::AssertedCast(lambdaRegion->nodes.first()); + assert(is(loop)); + + // Traverse the rvsgd graph upwards to check connections + rvsdg::node_output * forkNodeOutput; + assert( + forkNodeOutput = + dynamic_cast(loop->subregion()->result(0)->origin())); + auto forkNode = forkNodeOutput->node(); + auto forkOp = util::AssertedCast(&forkNode->operation()); + assert(forkNode->ninputs() == 1); + assert(forkNode->noutputs() == 4); + assert(forkOp->IsConstant() == false); + } +} + +static inline void +TestConstantFork() +{ + using namespace jlm; + using namespace jlm::llvm; + + // Arrange + auto b32 = rvsdg::bittype::Create(32); + auto ft = FunctionType::Create({ b32 }, { b32 }); + + RvsdgModule rm(util::filepath(""), "", ""); + auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::operation)); + nf->set_mutable(false); + + auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + auto lambdaRegion = lambda->subregion(); + + rvsdg::bitult_op ult(32); + rvsdg::bitadd_op add(32); + + auto loop = hls::loop_node::create(lambdaRegion); + auto subregion = loop->subregion(); + rvsdg::output * idvBuffer; + loop->add_loopvar(lambda->fctargument(0), &idvBuffer); + auto bitConstant1 = rvsdg::create_bitconstant(subregion, 32, 1); + + auto arm = rvsdg::simple_node::create_normalized(subregion, add, { idvBuffer, bitConstant1 })[0]; + auto cmp = rvsdg::simple_node::create_normalized(subregion, ult, { arm, bitConstant1 })[0]; + auto match = rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); + + loop->set_predicate(match); + + auto f = lambda->finalize({ loop->output(0) }); + rm.Rvsdg().add_export(f, { f->Type(), "" }); + + rvsdg::view(rm.Rvsdg(), stdout); + + // Act + hls::add_forks(rm); + rvsdg::view(rm.Rvsdg(), stdout); + + // Assert + { + auto omegaRegion = rm.Rvsdg().root(); + assert(omegaRegion->nnodes() == 1); + auto lambda = util::AssertedCast(omegaRegion->nodes.first()); + assert(is(lambda)); + + auto lambdaRegion = lambda->subregion(); + assert(lambdaRegion->nnodes() == 1); + + rvsdg::node_output * loopOutput; + assert(loopOutput = dynamic_cast(lambdaRegion->result(0)->origin())); + auto loopNode = loopOutput->node(); + assert(is(loopNode)); + auto loop = util::AssertedCast(loopNode); + + // Traverse the rvsgd graph upwards to check connections + rvsdg::node_output * forkNodeOutput; + assert( + forkNodeOutput = + dynamic_cast(loop->subregion()->result(0)->origin())); + auto forkNode = forkNodeOutput->node(); + auto forkOp = util::AssertedCast(&forkNode->operation()); + assert(forkNode->ninputs() == 1); + assert(forkNode->noutputs() == 2); + assert(forkOp->IsConstant() == false); + auto matchNodeOutput = dynamic_cast(forkNode->input(0)->origin()); + auto matchNode = matchNodeOutput->node(); + auto bitsUltNodeOutput = dynamic_cast(matchNode->input(0)->origin()); + auto bitsUltNode = bitsUltNodeOutput->node(); + auto cforkNodeOutput = dynamic_cast(bitsUltNode->input(1)->origin()); + auto cforkNode = cforkNodeOutput->node(); + auto cforkOp = util::AssertedCast(&cforkNode->operation()); + assert(cforkNode->ninputs() == 1); + assert(cforkNode->noutputs() == 2); + assert(cforkOp->IsConstant() == true); + } +} + +static int +Test() +{ + std::cout << std::endl << "### Test fork ###" << std::endl << std::endl; + TestFork(); + std::cout << std::endl << "### Test constant ###" << std::endl << std::endl; + TestConstantFork(); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/TestFork", Test) From c8eb355a9344350585a9e7c73d19c16c60519f71 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 14 Jul 2024 10:13:00 +0200 Subject: [PATCH 012/170] New firrtl2verilog HLS backend (#539) Enables jlm-hls to directly generate Verilog as output. --- configure.sh | 41 ++++++- jlm/hls/Makefile.sub | 4 + .../FirrtlToVerilogConverter.cpp | 111 ++++++++++++++++++ .../FirrtlToVerilogConverter.hpp | 34 ++++++ .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 12 ++ tools/jlm-hls/jlm-hls.cpp | 20 +++- 6 files changed, 216 insertions(+), 6 deletions(-) create mode 100644 jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp create mode 100644 jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.hpp diff --git a/configure.sh b/configure.sh index bc1535a33..492dfe302 100755 --- a/configure.sh +++ b/configure.sh @@ -112,7 +112,44 @@ CXXFLAGS_NO_COMMENT="" if [ "${ENABLE_HLS}" == "yes" ] ; then CPPFLAGS_CIRCT="-I${CIRCT_PATH}/include" CXXFLAGS_NO_COMMENT="-Wno-error=comment" - CIRCT_LDFLAGS="-L${CIRCT_PATH}/lib -lMLIR -lCIRCTAnalysisTestPasses -lCIRCTDependenceAnalysis -lCIRCTExportFIRRTL -lCIRCTFIRRTL -lCIRCTFIRRTLTransforms -lCIRCTScheduling -lCIRCTSchedulingAnalysis -lCIRCTSeq -lCIRCTSupport -lCIRCTTransforms -lCIRCTHW -lCIRCTOM" + CIRCT_LDFLAGS_ARRAY=( + "-L${CIRCT_PATH}/lib" + "-lMLIR" + "-lMLIRBytecodeReader" + "-lMLIRBytecodeWriter" + "-lMLIRParser" + "-lMLIRSupport" + "-lMLIRIR" + "-lMLIROptLib" + "-lMLIRFuncDialect" + "-lMLIRTransforms" + "-lCIRCTAnalysisTestPasses" + "-lCIRCTDependenceAnalysis" + "-lCIRCTExportFIRRTL" + "-lCIRCTScheduling" + "-lCIRCTSchedulingAnalysis" + "-lCIRCTFirtool" + "-lCIRCTFIRRTLReductions" + "-lCIRCTFIRRTLToHW" + "-lCIRCTExportVerilog" + "-lCIRCTImportFIRFile" + "-lCIRCTFIRRTLTransforms" + "-lCIRCTHWTransforms" + "-lCIRCTSVTransforms" + "-lCIRCTTransforms" + "-lCIRCTSV" + "-lCIRCTComb" + "-lCIRCTLTL" + "-lCIRCTVerif" + "-lCIRCTFIRRTL" + "-lCIRCTSeq" + "-lCIRCTSeqTransforms" + "-lCIRCTHW" + "-lCIRCTVerifToSV" + "-lCIRCTExportChiselInterface" + "-lCIRCTOM" + "-lCIRCTSupport" + ) fi CPPFLAGS_MLIR="" @@ -137,7 +174,7 @@ CXXFLAGS=${CXXFLAGS-} ${CXXFLAGS_COMMON} ${CXXFLAGS_TARGET} ${CXXFLAGS_NO_COMMEN CPPFLAGS=${CPPFLAGS-} ${CPPFLAGS_COMMON} ${CPPFLAGS_LLVM} ${CPPFLAGS_ASSERTS} ${CPPFLAGS_CIRCT} ${CPPFLAGS_MLIR} ENABLE_HLS=${ENABLE_HLS} CIRCT_PATH=${CIRCT_PATH} -CIRCT_LDFLAGS=${CIRCT_LDFLAGS} +CIRCT_LDFLAGS=${CIRCT_LDFLAGS_ARRAY[*]} ENABLE_MLIR=${ENABLE_MLIR} MLIR_PATH=${MLIR_PATH} MLIR_LDFLAGS=${MLIR_LDFLAGS} diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index 880992779..c3d4e8f4b 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -2,6 +2,8 @@ # See COPYING for terms of redistribution. libhls_SOURCES = \ + jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp \ + \ jlm/hls/backend/rhls2firrtl/base-hls.cpp \ jlm/hls/backend/rhls2firrtl/dot-hls.cpp \ jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp \ @@ -38,6 +40,8 @@ libhls_SOURCES = \ jlm/hls/util/view.cpp \ libhls_HEADERS = \ + jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.hpp \ + \ jlm/hls/backend/rhls2firrtl/base-hls.hpp \ jlm/hls/backend/rhls2firrtl/dot-hls.hpp \ jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp \ diff --git a/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp b/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp new file mode 100644 index 000000000..badc6af0e --- /dev/null +++ b/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp @@ -0,0 +1,111 @@ +/* + * Copyright 2024 Magnus Sjalander + * See COPYING for terms of redistribution. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace jlm::hls +{ + +using namespace circt; +using namespace llvm; + +bool +FirrtlToVerilogConverter::Convert( + const util::filepath inputFirrtlFile, + const util::filepath outputVerilogFile) +{ + mlir::MLIRContext context; + mlir::TimingScope ts; + + // Set up and read the input FIRRTL file + std::string errorMessage; + auto input = mlir::openInputFile(inputFirrtlFile.to_str(), &errorMessage); + if (!input) + { + std::cerr << errorMessage << std::endl; + return false; + } + llvm::SourceMgr sourceMgr; + sourceMgr.AddNewSourceBuffer(std::move(input), llvm::SMLoc()); + mlir::SourceMgrDiagnosticVerifierHandler sourceMgrHandler(sourceMgr, &context); + + firrtl::FIRParserOptions options; + options.infoLocatorHandling = firrtl::FIRParserOptions::InfoLocHandling::IgnoreInfo; + options.numAnnotationFiles = 0; + options.scalarizeExtModules = true; + auto module = importFIRFile(sourceMgr, &context, ts, options); + if (!module) + { + std::cerr << "Failed to parse FIRRTL input" << std::endl; + return false; + } + + // Manually set up the options for the firtool + cl::OptionCategory mainCategory("firtool Options"); + firtool::FirtoolOptions firtoolOptions(mainCategory); + firtoolOptions.preserveAggregate = firrtl::PreserveAggregate::PreserveMode::None; + firtoolOptions.preserveMode = firrtl::PreserveValues::PreserveMode::None; + firtoolOptions.buildMode = firtool::FirtoolOptions::BuildModeRelease; + firtoolOptions.exportChiselInterface = false; + + // Populate the pass manager and apply them to the module + mlir::PassManager pm(&context); + + // Firtool sets a blackBoxRoot based on the inputFilename path, but this functionality is not used + // so we set it to an empty string (the final argument) + if (failed(firtool::populateCHIRRTLToLowFIRRTL(pm, firtoolOptions, *module, ""))) + { + std::cerr << "Failed to populate CHIRRTL to LowFIRRTL" << std::endl; + return false; + } + if (failed(firtool::populateLowFIRRTLToHW(pm, firtoolOptions))) + { + std::cerr << "Failed to populate LowFIRRTL to HW" << std::endl; + return false; + } + if (failed(firtool::populateHWToSV(pm, firtoolOptions))) + { + std::cerr << "Failed to populate HW to SV" << std::endl; + return false; + } + + if (failed(pm.run(module.get()))) + { + std::cerr << "Failed to run pass manager" << std::endl; + return false; + } + + mlir::PassManager exportPm(&context); + + // Legalize unsupported operations within the modules. + exportPm.nest().addPass(sv::createHWLegalizeModulesPass()); + + // Tidy up the IR to improve verilog emission quality. + exportPm.nest().addPass(sv::createPrettifyVerilogPass()); + + std::error_code errorCode; + llvm::raw_fd_ostream os(outputVerilogFile.to_str(), errorCode); + exportPm.addPass(createExportVerilogPass(os)); + + if (failed(exportPm.run(module.get()))) + { + std::cerr << "Failed to run export pass manager" << std::endl; + return false; + } + + (void)module.release(); + return true; +} + +} // namespace jlm::hls diff --git a/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.hpp b/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.hpp new file mode 100644 index 000000000..14748becc --- /dev/null +++ b/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.hpp @@ -0,0 +1,34 @@ +/* + * Copyright 2024 Magnus Sjalander + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_HLS_BACKEND_FIRRTL2VERILOG_FIRRTLTOVERILOGCONVERTER_HPP +#define JLM_HLS_BACKEND_FIRRTL2VERILOG_FIRRTLTOVERILOGCONVERTER_HPP + +#include + +namespace jlm::hls +{ + +class FirrtlToVerilogConverter +{ +public: + FirrtlToVerilogConverter() = delete; + + /** + * Converts FIRRTL to Verilog by reading FIRRTL from a file and writing the converted Verilog to + * another file. The functionality is heavily inspired by the processBuffer() function of the + * firtool in the CIRCT project. + * + * \param inputFirrtlFile The complete path to the FIRRTL file to convert to Verilog. + * \param outputVerilogFile The complete path to the Verilog file to write the converted Verilog + * to. \return True if the conversion was successful, false otherwise. + */ + static bool + Convert(const util::filepath inputFirrtlFile, const util::filepath outputVerilogFile); +}; + +} // namespace jlm::hls + +#endif // JLM_HLS_BACKEND_FIRRTL2VERILOG_FIRRTLTOVERILOGCONVERTER_HPP diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index c14b0e31a..6cb46845c 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -84,6 +84,18 @@ class RhlsToFirrtlConverter : public BaseHLS return mlirGen.toString(circuit); } + std::unique_ptr + ConvertToMduleOp(llvm::RvsdgModule & rvsdgModule) + { + auto lambdaNode = get_hls_lambda(rvsdgModule); + auto mlirGen = RhlsToFirrtlConverter(); + auto circuit = mlirGen.MlirGen(lambdaNode); + std::unique_ptr module = + std::make_unique(mlir::ModuleOp::create(Builder_->getUnknownLoc())); + module->push_back(circuit); + return module; + } + private: std::string toString(const circt::firrtl::CircuitOp circuit); diff --git a/tools/jlm-hls/jlm-hls.cpp b/tools/jlm-hls/jlm-hls.cpp index f51caa26a..ffa1007e4 100644 --- a/tools/jlm-hls/jlm-hls.cpp +++ b/tools/jlm-hls/jlm-hls.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include #include @@ -77,18 +78,28 @@ main(int argc, char ** argv) jlm::hls::rvsdg2ref(*rvsdgModule, commandLineOptions.OutputFiles_.to_str() + ".ref.ll"); jlm::hls::rvsdg2rhls(*rvsdgModule); - std::string output; + jlm::util::filepath firrtlFile(commandLineOptions.OutputFiles_.to_str() + ".fir"); if (commandLineOptions.UseCirct_) { + // Writing the FIRRTL to a file and then reading it back in to convert to Verilog. + // Could potentially change to pass the FIRRTL directly to the converter, but the converter + // is based on CIRCT's Firtool library, which assumes that the FIRRTL is read from a file. jlm::hls::RhlsToFirrtlConverter hls; - output = hls.ToString(*rvsdgModule); + auto output = hls.ToString(*rvsdgModule); + stringToFile(output, firrtlFile.to_str()); + jlm::util::filepath outputVerilogFile(commandLineOptions.OutputFiles_.to_str() + ".v"); + if (!jlm::hls::FirrtlToVerilogConverter::Convert(firrtlFile, outputVerilogFile)) + { + std::cerr << "The FIRRTL to Verilog conversion failed.\n" << std::endl; + exit(1); + } } else { jlm::hls::FirrtlHLS hls; - output = hls.run(*rvsdgModule); + auto output = hls.run(*rvsdgModule); + stringToFile(output, firrtlFile.to_str()); } - stringToFile(output, commandLineOptions.OutputFiles_.to_str() + ".fir"); jlm::hls::VerilatorHarnessHLS vhls; stringToFile(vhls.run(*rvsdgModule), commandLineOptions.OutputFiles_.to_str() + ".harness.cpp"); @@ -109,5 +120,6 @@ main(int argc, char ** argv) { JLM_UNREACHABLE("Format not supported.\n"); } + return 0; } From 154d50fed97746263ac0d8a1752fad793f0f4c17 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 14 Jul 2024 21:53:23 +0200 Subject: [PATCH 013/170] jhls links llvm-IR files and creates single object file (#545) This simplifies how HLS benchmarks are compiled and reduce their dependencies to only jlm binaries (jhls and jlm-hls) and Verilator. --- jlm/tooling/CommandGraphGenerator.cpp | 19 +++++++++++++------ 1 file changed, 13 insertions(+), 6 deletions(-) diff --git a/jlm/tooling/CommandGraphGenerator.cpp b/jlm/tooling/CommandGraphGenerator.cpp index 48fac1d0c..a73677af5 100644 --- a/jlm/tooling/CommandGraphGenerator.cpp +++ b/jlm/tooling/CommandGraphGenerator.cpp @@ -360,12 +360,19 @@ JhlsCommandGraphGenerator::GenerateCommandGraph(const JhlsCommandLineOptions & c commandLineOptions.UseCirct_); m2r2.AddEdge(hls); - std::vector lnkifiles; - for (const auto & c : commandLineOptions.Compilations_) - { - if (c.RequiresLinking()) - lnkifiles.push_back(c.OutputFile()); - } + auto linkerInputFiles = util::filepath(commandLineOptions.OutputFile_.to_str() + ".re*.ll"); + auto mergedFile = util::filepath(commandLineOptions.OutputFile_.to_str() + ".merged.ll"); + auto & llvmLink = + LlvmLinkCommand::Create(*commandGraph, { linkerInputFiles }, mergedFile, true, false); + hls.AddEdge(llvmLink); + + auto & compileMerged = LlcCommand::Create( + *commandGraph, + mergedFile, + util::filepath(commandLineOptions.OutputFile_.to_str() + ".o"), + LlcCommand::OptimizationLevel::O3, + LlcCommand::RelocationModel::Pic); + llvmLink.AddEdge(compileMerged); for (const auto & leave : leaves) leave->AddEdge(commandGraph->GetExitNode()); From 929b49ee89eaeab084bb1cb83b273e648df2ae31 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 14 Jul 2024 22:26:13 +0200 Subject: [PATCH 014/170] Removes --firtool option from run-hls-test.sh (#546) With jlm-hls being able to generate Verilog there is no longer a need to use the firtool to convert FIRRTL to Verilog. --- .github/workflows/hls.yml | 2 +- scripts/run-hls-test.sh | 25 +++---------------------- 2 files changed, 4 insertions(+), 23 deletions(-) diff --git a/.github/workflows/hls.yml b/.github/workflows/hls.yml index 24bc5b9cc..52c91fc99 100644 --- a/.github/workflows/hls.yml +++ b/.github/workflows/hls.yml @@ -21,4 +21,4 @@ jobs: with: install-verilator: true - name: "Run hls-test-suite" - run: ./scripts/run-hls-test.sh --firtool ${{ github.workspace }}/build-circt/circt/bin/firtool + run: ./scripts/run-hls-test.sh diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index e8d8149a5..88c4a749e 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=806876decd60a91cc6ec4773b6edeca20156f528 +GIT_COMMIT=f90cce18a8868185a31e4a69902835d1df60846a # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" @@ -14,9 +14,6 @@ JLM_BIN_DIR=${JLM_ROOT_DIR}/build BENCHMARK_DIR=${JLM_ROOT_DIR}/usr/hls-test-suite BENCHMARK_RUN_TARGET=run -# We assume that the firtool is in the PATH -FIRTOOL=firtool - # Execute benchmarks in parallel by default PARALLEL_THREADS=`nproc` @@ -31,8 +28,6 @@ function usage() echo "" echo " --benchmark-path PATH The path where to place the HLS test suite." echo " Default=[${BENCHMARK_DIR}]" - echo " --firtool COMMAND The command for running firtool, which can include a path." - echo " Default=[${FIRTOOL}]" echo " --parallel #THREADS The number of threads to run in parallel." echo " Default=[${PARALLEL_THREADS}]" echo " --get-commit-hash Prints the commit hash used for the build." @@ -46,11 +41,6 @@ while [[ "$#" -ge 1 ]] ; do BENCHMARK_DIR=$(readlink -m "$1") shift ;; - --firtool) - shift - FIRTOOL=$(readlink -m "$1") - shift - ;; --parallel) shift PARALLEL_THREADS=$1 @@ -67,15 +57,6 @@ while [[ "$#" -ge 1 ]] ; do esac done -# Check if firtool exists -if ! command -v ${FIRTOOL} &> /dev/null -then - echo "${FIRTOOL} is not found." - echo "Make sure to use '--firtool COMMAND' to specify which firtool to use." - echo "You can use './scripts/build-circt.sh' to build it, if needed." - exit 1 -fi - # Check if verilator exists if ! command -v verilator &> /dev/null then @@ -93,5 +74,5 @@ export PATH=${JLM_BIN_DIR}:${PATH} cd ${BENCHMARK_DIR} git checkout ${GIT_COMMIT} make clean -echo "make -j ${PARALLEL_THREADS} -O FIRTOOL=${FIRTOOL} ${BENCHMARK_RUN_TARGET}" -make -j ${PARALLEL_THREADS} -O FIRTOOL=${FIRTOOL} ${BENCHMARK_RUN_TARGET} +echo "make -j ${PARALLEL_THREADS} -O ${BENCHMARK_RUN_TARGET}" +make -j ${PARALLEL_THREADS} -O ${BENCHMARK_RUN_TARGET} From 03769ad01ed3362efa1028a3440db0b8ff3326e2 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 15 Jul 2024 16:16:27 +0200 Subject: [PATCH 015/170] Using llvm-version variable for CI InstallPackages (#548) This avoids having to change the version number in multiple places when updating LLVM version. --- .github/actions/InstallPackages/action.yml | 23 +++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) diff --git a/.github/actions/InstallPackages/action.yml b/.github/actions/InstallPackages/action.yml index 026551c23..0faf3409a 100644 --- a/.github/actions/InstallPackages/action.yml +++ b/.github/actions/InstallPackages/action.yml @@ -2,6 +2,11 @@ name: "Install packages" description: "Installs packages that the jlm framework depends on." inputs: + llvm-version: + description: "LLVM/MLIR version that is installed" + default: 17 + required: false + install-llvm: description: "Install LLVM package. Default is 'false'." default: "false" @@ -51,10 +56,10 @@ runs: || inputs.install-mlir == 'true' || inputs.install-clang-format == 'true'}} run: | - export HAS_LLVM_REPOSITORY=$(find /etc/apt/ -name *.list | xargs cat | grep llvm-toolchain-jammy-17) + export HAS_LLVM_REPOSITORY=$(find /etc/apt/ -name *.list | xargs cat | grep llvm-toolchain-jammy-${{inputs.llvm-version}}) if [[ -z $HAS_LLVM_REPOSITORY ]]; then wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | sudo tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc - sudo add-apt-repository --no-update deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-17 main + sudo add-apt-repository --no-update deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-${{inputs.llvm-version}} main fi shell: bash @@ -65,29 +70,29 @@ runs: - name: "Install LLVM package" if: ${{inputs.install-llvm == 'true'}} run: | - sudo apt-get install llvm-17-dev - pip install "lit~=17.0" + sudo apt-get install llvm-${{inputs.llvm-version}}-dev + pip install "lit~=${{inputs.llvm-version}}.0" pip show lit shell: bash - name: "Install clang package" if: ${{inputs.install-clang == 'true'}} - run: sudo apt-get install clang-17 + run: sudo apt-get install clang-${{inputs.llvm-version}} shell: bash - name: "Install MLIR packages" if: ${{inputs.install-mlir == 'true'}} run: | - sudo apt-get install libmlir-17-dev mlir-17-tools + sudo apt-get install libmlir-${{inputs.llvm-version}}-dev mlir-${{inputs.llvm-version}}-tools if ! [ -f /usr/lib/x86_64-linux-gnu/libMLIR.so ]; then - sudo ln -s /usr/lib/llvm-17/lib/libMLIR.so.17 /usr/lib/x86_64-linux-gnu/ - sudo ln -s /usr/lib/llvm-17/lib/libMLIR.so.17 /usr/lib/x86_64-linux-gnu/libMLIR.so + sudo ln -s /usr/lib/llvm-${{inputs.llvm-version}}/lib/libMLIR.so.${{inputs.llvm-version}} /usr/lib/x86_64-linux-gnu/ + sudo ln -s /usr/lib/llvm-${{inputs.llvm-version}}/lib/libMLIR.so.${{inputs.llvm-version}} /usr/lib/x86_64-linux-gnu/libMLIR.so fi shell: bash - name: "Install clang-format package" if: ${{inputs.install-clang-format == 'true'}} - run: sudo apt-get install clang-format-17 + run: sudo apt-get install clang-format-${{inputs.llvm-version}} shell: bash - name: "Install ninja package" From b82f21c5a02c9a6dd03f0ef214f0b74dd4d29214 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 17 Jul 2024 09:12:32 +0200 Subject: [PATCH 016/170] Removed deprecated FIRRTL generation (#550) The FIRRTL generation based on printing ASCII has not been kept up to date with the CIRCT version, and no longer serves a purpose for the HLS backend. The '--circt' argument for jhls has been deprecated but kept to not break dependent projects, i.e., the hls-test-suite, until they have removed the use of the argument. --- jlm/hls/Makefile.sub | 2 - jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp | 1146 -------------------- jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp | 199 ---- jlm/tooling/Command.cpp | 1 - jlm/tooling/Command.hpp | 11 +- jlm/tooling/CommandGraphGenerator.cpp | 3 +- jlm/tooling/CommandLine.cpp | 7 +- jlm/tooling/CommandLine.hpp | 6 +- tools/jlm-hls/jlm-hls.cpp | 30 +- 9 files changed, 17 insertions(+), 1388 deletions(-) delete mode 100644 jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp delete mode 100644 jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index c3d4e8f4b..3e1e1de41 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -6,7 +6,6 @@ libhls_SOURCES = \ \ jlm/hls/backend/rhls2firrtl/base-hls.cpp \ jlm/hls/backend/rhls2firrtl/dot-hls.cpp \ - jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp \ jlm/hls/backend/rhls2firrtl/json-hls.cpp \ jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp \ jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp \ @@ -44,7 +43,6 @@ libhls_HEADERS = \ \ jlm/hls/backend/rhls2firrtl/base-hls.hpp \ jlm/hls/backend/rhls2firrtl/dot-hls.hpp \ - jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp \ jlm/hls/backend/rhls2firrtl/json-hls.hpp \ jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp \ jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp \ diff --git a/jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp b/jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp deleted file mode 100644 index 5d8ff7369..000000000 --- a/jlm/hls/backend/rhls2firrtl/firrtl-hls.cpp +++ /dev/null @@ -1,1146 +0,0 @@ -/* - * Copyright 2021 David Metz - * See COPYING for terms of redistribution. - */ - -#include - -#include - -namespace jlm::hls -{ - -bool -is_identity_mapping(const jlm::rvsdg::match_op & op) -{ - for (const auto & pair : op) - { - if (pair.first != pair.second) - return false; - } - - return true; -} - -std::string -FirrtlHLS::get_text(llvm::RvsdgModule & rm) -{ - std::ostringstream firrtl; - auto module = lambda_node_to_firrtl(get_hls_lambda(rm)); - firrtl << indent(0) << "circuit " << module.name << ":\n"; - for (auto module : modules) - { - firrtl << module.second.firrtl; - } - return firrtl.str(); -} - -std::string -FirrtlHLS::to_firrtl_type(const jlm::rvsdg::type * type) -{ - return util::strfmt("UInt<", jlm_sizeof(type), ">"); -} - -std::string -FirrtlHLS::mem_io() -{ - std::ostringstream module; - module << indent(2) - << "output mem_req: {flip ready: UInt<1>, valid: UInt<1>, addr: UInt<64>, data: UInt<64>, " - "write: UInt<1>, width: UInt<3>}\n"; - module << indent(2) << "input mem_res: {valid: UInt<1>, data: UInt<64>}\n"; - module << indent(2) << "mem_req.valid <= " << UInt(1, 0) << "\n"; - module << indent(2) << "mem_req.addr is invalid\n"; - module << indent(2) << "mem_req.write is invalid\n"; - module << indent(2) << "mem_req.data is invalid\n"; - module << indent(2) << "mem_req.width is invalid\n"; - return module.str(); -} - -std::string -FirrtlHLS::mux_mem(const std::vector & mem_nodes) const -{ - std::ostringstream mem; - std::string previous_granted = UInt(1, 0); - for (auto node_name : mem_nodes) - { - mem << indent(2) << node_name << ".mem_res.valid <= mem_res.valid\n"; - mem << indent(2) << node_name << ".mem_res.data <= mem_res.data\n"; - mem << indent(2) << node_name << ".mem_req.ready <= " << UInt(1, 0) << "\n"; - mem << indent(2) << "when and(not(" << previous_granted << ")," << node_name - << ".mem_req.valid):\n"; - mem << indent(3) << node_name << ".mem_req.ready <= " << UInt(1, 1) << "\n"; - mem << indent(3) << "mem_req.addr <= " << node_name << ".mem_req.addr\n"; - mem << indent(3) << "mem_req.write <= " << node_name << ".mem_req.write\n"; - mem << indent(3) << "mem_req.valid <= " << UInt(1, 1) << "\n"; - mem << indent(3) << "mem_req.data <= " << node_name << ".mem_req.data\n"; - mem << indent(3) << "mem_req.width <= " << node_name << ".mem_req.width\n"; - mem << indent(2) << "node previous_granted_" << node_name << " = or(" << previous_granted - << ", " << node_name << ".mem_req.ready)\n"; - previous_granted = "previous_granted_" + node_name; - } - return mem.str(); -} - -std::string -FirrtlHLS::module_header(const jlm::rvsdg::node * node, bool has_mem_io) -{ - std::ostringstream module; - - module << indent(1) << "module " << get_module_name(node) << ":\n"; - // io - module << indent(2) << "; io\n"; - module << indent(2) << "input clk: Clock\n"; - module << indent(2) << "input reset: UInt<1>\n"; - for (size_t i = 0; i < node->ninputs(); ++i) - { - module << indent(2) << "input i" << (i) << ": {flip ready: UInt<1>, valid: UInt<1>, data: " - << to_firrtl_type(&node->input(i)->type()) << "}\n"; - } - for (size_t i = 0; i < node->noutputs(); ++i) - { - module << indent(2) << "output o" << i << ": {flip ready: UInt<1>, valid: UInt<1>, data: " - << to_firrtl_type(&node->output(i)->type()) << "}\n"; - } - if (has_mem_io) - { - module << mem_io(); - } - - return module.str(); -} - -FirrtlModule & -FirrtlHLS::mem_node_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n, true); - - bool store = dynamic_cast(&(n->operation())); - // registers - module << indent(2) << "; registers\n"; - for (size_t i = 0; i < n->noutputs(); ++i) - { - module << indent(2) << "reg o" << i << "_valid_reg: UInt<1>, clk with: (reset => (reset, " - << UInt(1, 0) << "))\n"; - module << indent(2) << "reg o" << i << "_data_reg: " << to_firrtl_type(&n->output(i)->type()) - << ", clk\n"; - } - module << indent(2) << "reg sent_reg: UInt<1>, clk with: (reset => (reset, " << UInt(1, 0) - << "))\n"; - std::string can_request = "not(sent_reg)"; // only request again once previous request is finished - for (size_t i = 0; i < n->ninputs(); ++i) - { - can_request = "and(" + can_request + ", " + valid(n->input(i)) + ")"; - } - for (size_t i = 0; i < n->noutputs(); ++i) - { - can_request = "and(" + can_request + ", not(o" + util::strfmt(i) + "_valid_reg))"; - } - // block until all inputs and no outputs are valid - module << indent(2) << "node can_request = " << can_request << "\n"; - - module << indent(2) << "; mem request\n"; - module << indent(2) << "mem_req.valid <= can_request\n"; - module << indent(2) << "mem_req.addr <= " << data(n->input(0)) << "\n"; - int bit_width; - if (store) - { - module << indent(2) << "mem_req.write <= " << UInt(1, 1) << "\n"; - module << indent(2) << "mem_req.data <= " << data(n->input(1)) << "\n"; - bit_width = dynamic_cast(&n->input(1)->type())->nbits(); - } - else - { - module << indent(2) << "mem_req.write <= " << UInt(1, 0) << "\n"; - module << indent(2) << "mem_req.data is invalid\n"; - if (auto bt = dynamic_cast(&n->output(0)->type())) - { - bit_width = bt->nbits(); - } - else if (dynamic_cast(&n->output(0)->type())) - { - bit_width = 64; - } - else - { - throw util::error("unknown width for mem request"); - } - } - int log2_bytes = log2(bit_width / 8); - module << indent(2) << "mem_req.width <= " << UInt(4, log2_bytes) << "\n"; - module << indent(2) << "when mem_req.ready:\n"; - module << indent(3) << "sent_reg <= " << UInt(1, 1) << "\n"; - // set memstate - if (store) - { - module << indent(3) << "o0_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(3) << "o0_data_reg <= " << data(n->input(2)) << "\n"; - } - else - { - module << indent(3) << "o1_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(3) << "o1_data_reg <= " << data(n->input(1)) << "\n"; - } - module << indent(2) << "; mem response\n"; - module << indent(2) << "when and(sent_reg, mem_res.valid):\n"; - module << indent(3) << "sent_reg <= " << UInt(1, 0) << "\n"; - if (!store) - { - module << indent(3) << "o0_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(3) << "o0_data_reg <= mem_res.data\n"; - } - // handshaking - module << indent(2) << "; handshaking\n"; - // inputs are ready when mem interface accepts request (is ready) - for (size_t i = 0; i < n->ninputs(); ++i) - { - module << indent(2) << ready(n->input(i)) << " <= mem_req.ready\n"; - } - for (size_t i = 0; i < n->noutputs(); ++i) - { - auto out = n->output(i); - module << indent(2) << valid(out) << " <= o" << i << "_valid_reg,\n"; - module << indent(2) << data(out) << " <= o" << i << "_data_reg\n"; - module << indent(2) << "when " << fire(out) << ":\n"; - module << indent(3) << "o" << i << "_valid_reg <= " << UInt(1, 0) << "\n"; - } - - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), true }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::pred_buffer_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - module << indent(2) << "; registers\n"; - // start initialized with a valid pred 0 - module << indent(2) << "reg buf_valid_reg: UInt<1>, clk with: (reset => (reset, " << UInt(1, 1) - << "))\n"; - module << indent(2) << "reg buf_data_reg: " << to_firrtl_type(&n->output(0)->type()) - << ", clk with: (reset => (reset, " << UInt(1, 0) << "))\n"; - auto o0 = n->output(0); - auto i0 = n->input(0); - module << indent(2) << valid(o0) << " <= or(buf_valid_reg, " << valid(i0) << ")\n"; - module << indent(2) << data(o0) << " <= mux(buf_valid_reg, buf_data_reg, " << data(i0) << ")\n"; - module << indent(2) << ready(i0) << " <= not(buf_valid_reg)\n"; - module << indent(2) << "when " << fire(i0) << ":\n"; - module << indent(3) << "buf_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(3) << "buf_data_reg <= " << data(i0) << "\n"; - module << indent(2) << "when " << fire(o0) << ":\n"; - module << indent(3) << "buf_valid_reg <= " << UInt(1, 0) << "\n"; - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::buffer_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - module << indent(2) << "; registers\n"; - auto o = dynamic_cast(&(n->operation())); - auto capacity = o->capacity; - for (size_t i = 0; i < capacity; ++i) - { - module << indent(2) << "reg buf" << i << "_valid_reg: UInt<1>, clk with: (reset => (reset, " - << UInt(1, 0) << "))\n"; - module << indent(2) << "reg buf" << i << "_data_reg: " << to_firrtl_type(&n->output(0)->type()) - << ", clk\n"; - } - for (size_t i = 0; i <= capacity; ++i) - { - module << indent(2) << "wire in_consumed" << i << ": UInt<1>\n"; - module << indent(2) << "wire shift_out" << i << ": UInt<1>\n"; - } - auto o0 = n->output(0); - // connect out to buf0 - module << indent(2) << valid(o0) << " <= buf0_valid_reg\n"; - module << indent(2) << data(o0) << " <= buf0_data_reg\n"; - auto i0 = n->input(0); - // buf is ready if last one is empty - module << indent(2) << ready(i0) << " <= not(buf" << capacity - 1 << "_valid_reg)\n"; - - // shift register with insertion into earliest free slot - module << indent(2) << "shift_out0 <= " << fire(o0) << "\n"; - module << indent(2) << "in_consumed0 <= " << UInt(1, 0) << "\n"; - if (o->pass_through) - { - module << indent(2) << valid(o0) << " <= or(" << valid(i0) << ", buf0_valid_reg)\n"; - module << indent(2) << "in_consumed0 <= and(not(buf0_valid_reg), " << ready(o0) << ")\n"; - module << indent(2) << "when not(buf0_valid_reg):\n"; - module << indent(3) << data(o0) << " <= " << data(i0) << "\n"; - } - // invalid pseudo slot so we can use the same logic for the last slot - module << indent(2) << "node buf" << capacity << "_valid_reg = " << UInt(1, 0) << "\n"; - module << indent(2) << "node buf" << capacity << "_data_reg = " << UInt(1, 0) << "\n"; - for (size_t i = 0; i < capacity; ++i) - { - module << indent(2) << "node will_be_empty" << i << " = or(shift_out" << i << ", not(buf" << i - << "_valid_reg))\n"; - module << indent(2) << "in_consumed" << i + 1 << " <= in_consumed" << i << "\n"; - module << indent(2) << "node in_available" << i << " = and(" << fire(i0) << ", not(in_consumed" - << i << "))\n"; - module << indent(2) << "shift_out" << i + 1 << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << "when shift_out" << i << ":\n"; - module << indent(3) << "buf" << i << "_valid_reg <= " << UInt(1, 0) << "\n"; - module << indent(2) << "when will_be_empty" << i << ":\n"; - module << indent(3) << "when buf" << i + 1 << "_valid_reg:\n"; - module << indent(4) << "buf" << i << "_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(4) << "buf" << i << "_data_reg <= buf" << i + 1 << "_data_reg\n"; - module << indent(4) << "shift_out" << i + 1 << " <= " << UInt(1, 1) << "\n"; - module << indent(3) << "else when in_available" << i << ":\n"; - module << indent(4) << "in_consumed" << i + 1 << " <= " << UInt(1, 1) << "\n"; - module << indent(4) << "buf" << i << "_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(4) << "buf" << i << "_data_reg <= " << data(i0) << "\n"; - } - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::ndmux_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - - auto ipred = n->input(0); - auto o0 = n->output(0); - - module << indent(2) << valid(o0) << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << data(o0) << " is invalid\n"; - module << indent(2) << ready(ipred) << " <= " << UInt(1, 0) << "\n"; - - for (size_t i = 1; i < n->ninputs(); ++i) - { - auto in = n->input(i); - module << indent(2) << ready(in) << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << "when and(" << valid(ipred) << ", eq(" << data(ipred) << ", " - << UInt(64, i - 1) << ")):\n"; - module << indent(3) << valid(o0) << " <= " << valid(in) << "\n"; - module << indent(3) << data(o0) << " <= " << data(in) << "\n"; - module << indent(3) << ready(in) << " <= " << ready(o0) << "\n"; - module << indent(3) << ready(ipred) << " <= and(" << ready(o0) << "," << valid(in) << ")\n"; - } - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::dmux_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - - auto ipred = n->input(0); - auto o0 = n->output(0); - - std::string any_discard_reg = UInt(1, 0); - for (size_t i = 1; i < n->ninputs(); ++i) - { - // discard and discard_reg are separate in order to allow tokens to be discarded in the same - // cycle. - module << indent(2) << "reg i" << i << "_discard_reg: UInt<1>, clk with: (reset => (reset, " - << UInt(1, 0) << "))\n"; - module << indent(2) << "wire i" << i << "_discard: UInt<1>\n"; - module << indent(2) << "i" << i << "_discard <= i" << i << "_discard_reg\n"; - module << indent(2) << "i" << i << "_discard_reg <= i" << i << "_discard\n"; - any_discard_reg = util::strfmt("or(", any_discard_reg, ", i", i, "_discard_reg)"); - } - module << indent(2) << "node any_discard_reg = " << any_discard_reg << "\n"; - module << indent(2) << "reg processed_reg: UInt<1>, clk with: (reset => (reset, " << UInt(1, 0) - << "))\n"; - module << indent(2) << valid(o0) << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << data(o0) << " is invalid\n"; - module << indent(2) << ready(ipred) << " <= " << UInt(1, 0) << "\n"; - - for (size_t i = 1; i < n->ninputs(); ++i) - { - auto in = n->input(i); - module << indent(2) << ready(in) << " <= i" << i << "_discard\n"; - // clear discard reg on fire - module << indent(2) << "when " << fire(in) << ":\n"; - module << indent(3) << "i" << i << "_discard_reg <= " << UInt(1, 0) << "\n"; - // pred match and no outstanding discards - module << indent(2) << "when and(and(" << valid(ipred) << ", eq(" << data(ipred) << ", " - << UInt(64, i - 1) << ")), not(any_discard_reg)):\n"; - module << indent(3) << valid(o0) << " <= " << valid(in) << "\n"; - module << indent(3) << data(o0) << " <= " << data(in) << "\n"; - module << indent(3) << ready(in) << " <= " << ready(o0) << "\n"; - module << indent(3) << ready(ipred) << " <= and(" << ready(o0) << "," << valid(in) << ")\n"; - module << indent(3) << "when not(processed_reg):\n"; - for (size_t j = 1; j < n->ninputs(); ++j) - { - if (i != j) - { - module << indent(4) << "i" << j << "_discard <= " << UInt(1, 1) << "\n"; - } - } - module << indent(4) << "processed_reg <= " << UInt(1, 1) << "\n"; - } - module << indent(2) << "when " << fire(o0) << ":\n"; - module << indent(3) << "processed_reg <= " << UInt(1, 0) << "\n"; - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::merge_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - auto o0 = n->output(0); - - module << indent(2) << valid(o0) << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << data(o0) << " is invalid\n"; - - for (size_t i = 0; i < n->ninputs(); ++i) - { - auto in = n->input(i); - module << indent(2) << ready(in) << " <= " << ready(o0) << "\n"; - module << indent(2) << "when " << valid(in) << ":\n"; - module << indent(3) << valid(o0) << " <= " << valid(in) << "\n"; - module << indent(3) << data(o0) << " <= " << data(in) << "\n"; - } - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::fork_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - - module << indent(2) << "; registers\n"; - for (size_t i = 0; i < n->noutputs(); ++i) - { - module << indent(2) << "reg out" << i << "_fired: UInt<1>, clk with: (reset => (reset, " - << UInt(1, 0) << "))\n"; - } - - auto i0 = n->input(0); - std::string all_fired = UInt(1, 1); // True by default - for (size_t i = 0; i < n->noutputs(); ++i) - { - auto out = n->output(i); - module << indent(2) << valid(out) << " <= and(" << valid(i0) << ", not(out" << i - << "_fired))\n"; - module << indent(2) << data(out) << " <= " << data(i0) << "\n"; - all_fired = "and(" + all_fired + ", or(" + ready(out) + ", out" + util::strfmt(i) + "_fired))"; - } - module << indent(2) << "node all_fired = " << all_fired << "\n"; - module << indent(2) << ready(i0) << " <= all_fired\n"; - module << indent(2) << "when not(all_fired):\n"; - for (size_t i = 0; i < n->noutputs(); ++i) - { - module << indent(3) << "when " << fire(n->output(i)) << ":\n"; - module << indent(4) << "out" << i << "_fired <= " << UInt(1, 1) << "\n"; - } - module << indent(2) << "else:\n"; - for (size_t i = 0; i < n->noutputs(); ++i) - { - module << indent(3) << "out" << i << "_fired <= " << UInt(1, 0) << "\n"; - } - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::sink_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - auto i0 = n->input(0); - module << indent(2) << ready(i0) << " <= " << UInt(1, 1) << "\n"; - - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::print_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - auto pn = dynamic_cast(&n->operation()); - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - auto i0 = n->input(0); - auto o0 = n->output(0); - module << indent(2) << " ; " << n->operation().debug_string() << "\n"; - module << indent(2) << data(o0) << " <= " << data(i0); - // handshaking - module << indent(2) + "; handshaking" + "\n"; - - module << indent(2) << valid(o0) << " <= " << valid(i0) << "\n"; - module << indent(2) << ready(i0) << " <= " << ready(o0) << "\n"; - module << indent(2) << "printf(clk, and(" << fire(i0) << ", not(reset)), \"print node " - << pn->id() << ": %x\\n\", pad(" << data(i0) << ", 64))\n"; - - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::branch_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - - auto ipred = n->input(0); - auto ival = n->input(1); - - module << indent(2) << ready(ival) << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << ready(ipred) << " <= " << UInt(1, 0) << "\n"; - for (size_t i = 0; i < n->noutputs(); ++i) - { - auto out = n->output(i); - module << indent(2) << valid(out) << " <= " << UInt(1, 0) << "\n"; - module << indent(2) << data(out) << " is invalid\n"; - module << indent(2) << "when and(" << valid(ipred) << ", eq(" << data(ipred) - << ", " + UInt(64, i) << ")):\n"; - module << indent(3) << ready(ival) << " <= " << ready(out) << "\n"; - module << indent(3) << ready(ipred) << " <= and(" << ready(out) << "," << valid(ival) << ")\n"; - module << indent(3) << valid(out) << " <= " << valid(ival) << "\n"; - module << indent(3) << data(out) << " <= " << data(ival) << "\n"; - } - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule & -FirrtlHLS::trigger_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - module << module_header(n); - - auto itrig = n->input(0); - auto ival = n->input(1); - auto out = n->output(0); - // inputs have to both fire at the same time - module << indent(2) << ready(itrig) << " <= and(" << ready(out) << "," << valid(ival) << ")\n"; - module << indent(2) << ready(ival) << " <= and(" << ready(out) << "," << valid(itrig) << ")\n"; - module << indent(2) << valid(out) << " <= and(" << valid(ival) << "," << valid(itrig) << ")\n"; - module << indent(2) << data(out) << " <= " << data(ival) << "\n"; - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -int -jlm_sizeof(const jlm::rvsdg::type * t) -{ - if (auto bt = dynamic_cast(t)) - { - return bt->nbits(); - } - else if (auto at = dynamic_cast(t)) - { - return jlm_sizeof(&at->element_type()) * at->nelements(); - } - else if (dynamic_cast(t)) - { - return 64; - } - else if (auto ct = dynamic_cast(t)) - { - return ceil(log2(ct->nalternatives())); - } - else if (dynamic_cast(t)) - { - return 1; - } - else - { - throw std::logic_error(t->debug_string() + " size of not implemented!"); - } -} - -std::string -FirrtlHLS::gep_op_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - auto o = dynamic_cast(&(n->operation())); - std::string result = "cvt(" + data(n->input(0)) + ")"; // start of with base pointer - // TODO: support structs - const jlm::rvsdg::type * pt = &o->GetPointeeType(); - for (size_t i = 1; i < n->ninputs(); ++i) - { - int bits = jlm_sizeof(pt); - if (dynamic_cast(pt)) - { - ; - } - else if (auto at = dynamic_cast(pt)) - { - pt = &at->element_type(); - } - else - { - throw std::logic_error(pt->debug_string() + " pointer not implemented!"); - } - int bytes = bits / 8; - // gep inputs are signed - auto offset = "mul(asSInt(" + data(n->input(i)) + "), cvt(" + UInt(64, bytes) + "))"; - result = "add(" + result + ", " + offset + ")"; - } - return "asUInt(" + result + ")"; -} - -std::string -FirrtlHLS::match_op_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string result; - auto o = dynamic_cast(&(n->operation())); - JLM_ASSERT(o); - if (is_identity_mapping(*o)) - { - return data(n->input(0)); - } - else - { - result = UInt(64, o->default_alternative()); - // TODO: optimize? - for (auto it = o->begin(); it != o->end(); it++) - { - result = "mux(eq(" + UInt(64, it->first) + ", " + data(n->input(0)) + "), " - + UInt(64, it->second) + ", " + result + ")"; - } - } - return result; -} - -std::string -FirrtlHLS::simple_op_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - if (dynamic_cast(&(n->operation()))) - { - return match_op_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return "gt(asSInt(" + data(n->input(0)) + "), asSInt(" + data(n->input(1)) + "))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "geq(asSInt(" + data(n->input(0)) + "), asSInt(" + data(n->input(1)) + "))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "leq(asSInt(" + data(n->input(0)) + "), asSInt(" + data(n->input(1)) + "))"; - } - else if (auto o = dynamic_cast(&(n->operation()))) - { - return "asUInt(pad(asSInt(" + data(n->input(0)) + "), " + util::strfmt(o->ndstbits()) + "))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return data(n->input(0)); - } - else if (dynamic_cast(&(n->operation()))) - { - return data(n->input(0)); - } - else if (dynamic_cast(&(n->operation()))) - { - return data(n->input(0)); - } - else if (dynamic_cast(&(n->operation()))) - { - return "gt(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "lt(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "lt(asSInt(" + data(n->input(0)) + "), asSInt(" + data(n->input(1)) + "))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "eq(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "add(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "mul(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "sub(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "and(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "xor(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "or(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - // TODO: automatic conversion to static shift? - return "dshr(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (dynamic_cast(&(n->operation()))) - { - // TODO: automatic conversion to static shift? - return "asUInt(dshr(asSInt(" + data(n->input(0)) + "), " + data(n->input(1)) + "))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "asUInt(div(asSInt(" + data(n->input(0)) + "), asSInt(" + data(n->input(1)) + ")))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "asUInt(rem(asSInt(" + data(n->input(0)) + "), asSInt(" + data(n->input(1)) + ")))"; - } - else if (dynamic_cast(&(n->operation()))) - { - // TODO: automatic conversion to static shift? - // TODO: adjust shift limit (bits) - return "dshl(" + data(n->input(0)) + ", bits(" + data(n->input(1)) + ", 7, 0))"; - } - else if (dynamic_cast(&(n->operation()))) - { - return "neq(" + data(n->input(0)) + ", " + data(n->input(1)) + ")"; - } - else if (auto o = dynamic_cast(&(n->operation()))) - { - auto value = o->value(); - return util::strfmt("UInt<", value.nbits(), ">(", value.to_uint(), ")"); - } - else if (dynamic_cast(&(n->operation()))) - { - return UInt(1, 0); // TODO: Fix? - } - else if (auto o = dynamic_cast(&(n->operation()))) - { - return UInt(ceil(log2(o->value().nalternatives())), o->value().alternative()); - } - else if (dynamic_cast(&(n->operation()))) - { - return gep_op_to_firrtl(n); - } - else - { - throw std::logic_error(n->operation().debug_string() + " not implemented!"); - } -} - -FirrtlModule & -FirrtlHLS::single_out_simple_node_to_firrtl(const jlm::rvsdg::simple_node * n) -{ - std::string module_name = get_module_name(n); - std::ostringstream module; - - module << module_header(n); - - if (n->noutputs() != 1) - { - throw std::logic_error(n->operation().debug_string() + " has more than 1 output"); - } - module << indent(2) << "; logic\n"; - module << indent(2) << " ; " << n->operation().debug_string() << "\n"; - auto op = simple_op_to_firrtl(n); - module << indent(2) << data(n->output(0)) << " <= " << op; - // handshaking - module << indent(2) + "; handshaking" + "\n"; - - auto out = n->output(0); - - std::string inputs_valids = UInt(1, 1); // True by default - for (size_t i = 0; i < n->ninputs(); ++i) - { - inputs_valids = "and(" + inputs_valids + ", " + valid(n->input(i)) + ")"; - } - module << indent(2) << "node inputs_valid = " << inputs_valids << "\n"; - for (size_t i = 0; i < n->noutputs(); ++i) - { - module << indent(2) << valid(out) << " <= inputs_valid\n"; - } - for (size_t i = 0; i < n->ninputs(); ++i) - { - module << indent(2) + ready(n->input(i)) + "<= and(" << ready(n->output(0)) - << ", inputs_valid)\n"; - } - - modules.emplace_back(&n->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -FirrtlModule -FirrtlHLS::node_to_firrtl(const jlm::rvsdg::node * node, const int depth) -{ - // check if module for operation was already generated - for (auto pair : modules) - { - if (pair.first != nullptr && *pair.first == node->operation()) - { - return pair.second; - } - } - if (auto n = dynamic_cast(node)) - { - if (dynamic_cast(&(n->operation()))) - { - return mem_node_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return mem_node_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return pred_buffer_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return buffer_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return branch_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return trigger_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return sink_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return print_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return fork_to_firrtl(n); - } - else if (dynamic_cast(&(n->operation()))) - { - return merge_to_firrtl(n); - } - else if (auto o = dynamic_cast(&(n->operation()))) - { - if (o->discarding) - { - return dmux_to_firrtl(n); - } - else - { - return ndmux_to_firrtl(n); - } - } - return single_out_simple_node_to_firrtl(n); - } - else - { - throw std::logic_error(node->operation().debug_string() + " not implemented!"); - } -} - -std::string -FirrtlHLS::create_loop_instances(loop_node * ln) -{ - std::ostringstream firrtl; - auto sr = ln->subregion(); - for (const auto node : jlm::rvsdg::topdown_traverser(sr)) - { - if (dynamic_cast(node)) - { - auto node_module = node_to_firrtl(node, 2); - std::string inst_name = get_node_name(node); - if (node_module.has_mem) - { - mem_nodes.push_back(inst_name); - } - firrtl << indent(2) << "inst " << inst_name << " of " << node_module.name << "\n"; - for (size_t i = 0; i < node->noutputs(); ++i) - { - output_map[node->output(i)] = inst_name + "." + get_port_name(node->output(i)); - } - } - else if (auto oln = dynamic_cast(node)) - { - firrtl << create_loop_instances(oln); - } - else - { - throw util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); - } - } - for (size_t i = 0; i < sr->narguments(); ++i) - { - auto arg = sr->argument(i); - auto ba = dynamic_cast(arg); - if (!ba) - { - JLM_ASSERT(arg->input() != nullptr); - // map to input of loop - output_map[arg] = output_map[arg->input()->origin()]; - } - else - { - auto result = ba->result(); - JLM_ASSERT(result->type() == arg->type()); - // map to end of loop (origin of associated result) - output_map[arg] = output_map[result->origin()]; - } - } - for (size_t i = 0; i < ln->noutputs(); ++i) - { - auto out = ln->output(i); - JLM_ASSERT(out->results.size() == 1); - output_map[out] = output_map[out->results.begin()->origin()]; - } - return firrtl.str(); -} - -std::string -FirrtlHLS::connect(jlm::rvsdg::region * sr) -{ - std::ostringstream firrtl; - for (const auto & node : jlm::rvsdg::topdown_traverser(sr)) - { - if (dynamic_cast(node)) - { - auto inst_name = get_node_name(node); - firrtl << indent(2) << inst_name << ".clk <= clk\n"; - firrtl << indent(2) << inst_name << ".reset <= reset\n"; - for (size_t i = 0; i < node->ninputs(); ++i) - { - auto in_name = inst_name + "." + get_port_name(node->input(i)); - JLM_ASSERT(output_map.count(node->input(i)->origin())); - auto origin = output_map[node->input(i)->origin()]; - firrtl << indent(2) << origin << ".ready <= " << in_name << ".ready\n"; - firrtl << indent(2) << in_name << ".data <= " << origin << ".data\n"; - firrtl << indent(2) << in_name << ".valid <= " << origin << ".valid\n"; - } - } - else if (auto oln = dynamic_cast(node)) - { - firrtl << connect(oln->subregion()); - } - else - { - throw util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); - } - } - return firrtl.str(); -} - -FirrtlModule -FirrtlHLS::subregion_to_firrtl(jlm::rvsdg::region * sr) -{ - auto module_name = "subregion_mod" + util::strfmt(modules.size()); - std::ostringstream module; - module << indent(1) << "module " << module_name << ":\n"; - // io - module << indent(2) << "; io\n"; - module << indent(2) << "input clk: Clock\n"; - module << indent(2) << "input reset: UInt<1>\n"; - for (size_t i = 0; i < sr->narguments(); ++i) - { - module << indent(2) << "input " << get_port_name(sr->argument(i)) - << ": {flip ready: UInt<1>, valid: UInt<1>, data: " - << to_firrtl_type(&sr->argument(i)->type()) << "}\n"; - } - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << indent(2) << "output " << get_port_name(sr->result(i)) - << ": {flip ready: UInt<1>, valid: UInt<1>, data: " - << to_firrtl_type(&sr->result(i)->type()) << "}\n"; - } - module << mem_io(); - module << indent(2) << "; instances\n"; - for (size_t i = 0; i < sr->narguments(); ++i) - { - output_map[sr->argument(i)] = get_port_name(sr->argument(i)); - } - // create node modules and ios first - for (const auto node : jlm::rvsdg::topdown_traverser(sr)) - { - if (dynamic_cast(node)) - { - auto node_module = node_to_firrtl(node, 2); - std::string inst_name = get_node_name(node); - if (node_module.has_mem) - { - mem_nodes.push_back(inst_name); - } - module << indent(2) << "inst " << inst_name << " of " << node_module.name << "\n"; - for (size_t i = 0; i < node->noutputs(); ++i) - { - output_map[node->output(i)] = inst_name + "." + get_port_name(node->output(i)); - } - } - else if (auto oln = dynamic_cast(node)) - { - module << create_loop_instances(oln); - } - else - { - throw util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); - } - } - module << connect(sr); - for (size_t i = 0; i < sr->nresults(); ++i) - { - auto origin = output_map[sr->result(i)->origin()]; - auto result = get_port_name(sr->result(i)); - module << indent(2) << origin << ".ready <= " << result << ".ready\n"; - module << indent(2) << result << ".data <= " << origin << ".data\n"; - module << indent(2) << result << ".valid <= " << origin << ".valid\n"; - } - - module << mux_mem(mem_nodes); - - modules.emplace_back(nullptr, FirrtlModule{ module_name, module.str(), true }); - return modules.back().second; -} - -FirrtlModule -FirrtlHLS::lambda_node_to_firrtl(const llvm::lambda::node * ln) -{ - auto module_name = ln->name() + "_lambda_mod"; - auto sr = ln->subregion(); - create_node_names(sr); - std::ostringstream module; - module << indent(1) << "module " << module_name << ":\n"; - // io - module << indent(2) << "; io" - << "\n"; - module << indent(2) << "input clk: Clock" - << "\n"; - module << indent(2) << "input reset: UInt<1>" - << "\n"; - module << indent(2) << "input i: {flip ready: UInt<1>, valid: UInt<1>"; - for (size_t i = 0; i < sr->narguments(); ++i) - { - module << ", data" << i << ": " << to_firrtl_type(&sr->argument(i)->type()); - } - module << "}\n"; - module << indent(2) << "output o: {flip ready: UInt<1>, valid: UInt<1>"; - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << ", data" << i << ": " << to_firrtl_type(&sr->result(i)->type()); - } - module << "}\n"; - module << mem_io(); - // registers - module << indent(2) << "; registers" - << "\n"; - for (size_t i = 0; i < sr->narguments(); ++i) - { - module << indent(2) << "reg i" << i << "_valid_reg: UInt<1>, clk with: (reset => (reset, " - << UInt(1, 0) << "))\n"; - module << indent(2) << "reg i" << i - << "_data_reg: " << to_firrtl_type(&sr->argument(i)->type()) + ", clk" + "\n"; - } - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << indent(2) << "reg o" << i << "_valid_reg: UInt<1>, clk with: (reset => (reset, " - << UInt(1, 0) << "))\n"; - module << indent(2) << "reg o" << i - << "_data_reg: " << to_firrtl_type(&sr->result(i)->type()) + ", clk" + "\n"; - } - module << indent(2) << "; instances" - << "\n"; - auto sr_firrtl = subregion_to_firrtl(sr); - module << indent(2) << "inst sr of " << sr_firrtl.name << "\n"; - module << indent(2) << "sr.clk <= clk\n"; - module << indent(2) << "sr.reset <= reset\n"; - for (size_t i = 0; i < sr->narguments(); ++i) - { - module << indent(2) << "sr." << valid(sr->argument(i)) << " <= i" << i << "_valid_reg\n"; - module << indent(2) << "sr." << data(sr->argument(i)) << " <= i" << i << "_data_reg\n"; - module << indent(2) << "when and(sr." << valid(sr->argument(i)) << ", sr." - << ready(sr->argument(i)) << "):\n"; - module << indent(3) << "i" << i << "_valid_reg <= " << UInt(1, 0) << "\n"; - } - // logic - module << indent(2) << "; logic" - << "\n"; - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << indent(2) << "sr." << ready(sr->result(i)) << " <= not(o" << i << "_valid_reg)\n"; - } - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << indent(2) << "when and(sr." << valid(sr->result(i)) << ", sr." << ready(sr->result(i)) - << "):\n"; - module << indent(3) << "o" << i << "_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(3) << "o" << i << "_data_reg <= sr." << get_port_name(sr->result(i)) - << ".data\n"; - } - std::string outputs_valids = UInt(1, 1); // True by default - for (size_t i = 0; i < sr->nresults(); ++i) - { - outputs_valids = "and(" + outputs_valids + ", o" + util::strfmt(i) + "_valid_reg)"; - } - module << indent(2) << "node outputs_valid = " << outputs_valids << "\n"; - module << indent(2) << "o.valid <= outputs_valid\n"; - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << indent(2) << "o.data" << i << " <= o" << i << "_data_reg\n"; - } - module << indent(2) << "when and(o.valid, o.ready):\n"; - for (size_t i = 0; i < sr->nresults(); ++i) - { - module << indent(3) << "o" << i << "_valid_reg <= " << UInt(1, 0) << "\n"; - } - std::string inputs_ready = UInt(1, 1); // True by default - for (size_t i = 0; i < sr->narguments(); ++i) - { - inputs_ready = util::strfmt("and(", inputs_ready, ", not(i", i, "_valid_reg))"); - } - module << indent(2) << "node inputs_ready = " << inputs_ready << "\n"; - module << indent(2) << "i.ready <= inputs_ready\n"; - - module << indent(2) << "when and(i.ready, i.valid):\n"; - for (size_t i = 0; i < sr->narguments(); ++i) - { - module << indent(3) << "i" << i << "_valid_reg <= " << UInt(1, 1) << "\n"; - module << indent(3) << "i" << i << "_data_reg <= i.data" << i << "\n"; - } - - module << mux_mem({ - "sr", - }); - modules.emplace_back(&ln->operation(), FirrtlModule{ module_name, module.str(), false }); - return modules.back().second; -} - -std::string -FirrtlHLS::get_module_name(const jlm::rvsdg::node * node) -{ - auto new_name = util::strfmt("op_", node->operation().debug_string(), "_", modules.size()); - // remove chars that are not valid in firrtl module names - std::replace_if(new_name.begin(), new_name.end(), isForbiddenChar, '_'); - return new_name; -} - -} diff --git a/jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp b/jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp deleted file mode 100644 index 3dff8ca21..000000000 --- a/jlm/hls/backend/rhls2firrtl/firrtl-hls.hpp +++ /dev/null @@ -1,199 +0,0 @@ -/* - * Copyright 2021 David Metz - * See COPYING for terms of redistribution. - */ - -#ifndef JLM_HLS_BACKEND_RHLS2FIRRTL_FIRRTL_HLS_HPP -#define JLM_HLS_BACKEND_RHLS2FIRRTL_FIRRTL_HLS_HPP - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -namespace jlm::hls -{ - -class FirrtlModule -{ -public: - FirrtlModule(const std::string & name, const std::string & firrtl, bool hasMem) - : has_mem(hasMem), - name(name), - firrtl(firrtl) - {} - -public: - bool has_mem; - std::string name; - std::string firrtl; -}; - -inline bool -is_identity_mapping(const jlm::rvsdg::match_op & op); - -int -jlm_sizeof(const jlm::rvsdg::type * t); - -class FirrtlHLS : public BaseHLS -{ - std::string - extension() override - { - return ".fir"; - } - - std::string - get_text(llvm::RvsdgModule & rm) override; - -private: - std::vector> modules; - std::vector mem_nodes; - - std::string - get_module_name(const jlm::rvsdg::node * node); - - static inline std::string - indent(size_t depth) - { - return std::string(depth * 4, ' '); - } - - static std::string - ready(jlm::rvsdg::output * port) - { - return get_port_name(port) + ".ready"; - } - - static std::string - ready(jlm::rvsdg::input * port) - { - return get_port_name(port) + ".ready"; - } - - static std::string - valid(jlm::rvsdg::output * port) - { - return get_port_name(port) + ".valid"; - } - - static std::string - valid(jlm::rvsdg::input * port) - { - return get_port_name(port) + ".valid"; - } - - static std::string - data(jlm::rvsdg::output * port) - { - return get_port_name(port) + ".data"; - } - - static std::string - data(jlm::rvsdg::input * port) - { - return get_port_name(port) + ".data"; - } - - static std::string - fire(jlm::rvsdg::output * port) - { - return "and(" + valid(port) + ", " + ready(port) + ")"; - } - - static std::string - fire(jlm::rvsdg::input * port) - { - return "and(" + valid(port) + ", " + ready(port) + ")"; - } - - static std::string - UInt(size_t width, size_t value) - { - return util::strfmt("UInt<", width, ">(", value, ")"); - } - - static std::string - to_firrtl_type(const jlm::rvsdg::type * type); - - std::string - mem_io(); - - std::string - mux_mem(const std::vector & mem_nodes) const; - - std::string - module_header(const jlm::rvsdg::node * node, bool has_mem_io = false); - - FirrtlModule & - mem_node_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - pred_buffer_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - buffer_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - ndmux_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - dmux_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - merge_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - fork_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - sink_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - print_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - branch_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - trigger_to_firrtl(const jlm::rvsdg::simple_node * n); - - static std::string - gep_op_to_firrtl(const jlm::rvsdg::simple_node * n); - - static std::string - match_op_to_firrtl(const jlm::rvsdg::simple_node * n); - - static std::string - simple_op_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule & - single_out_simple_node_to_firrtl(const jlm::rvsdg::simple_node * n); - - FirrtlModule - node_to_firrtl(const jlm::rvsdg::node * node, const int depth); - - std::string - create_loop_instances(hls::loop_node * ln); - - std::string - connect(jlm::rvsdg::region * sr); - - FirrtlModule - subregion_to_firrtl(jlm::rvsdg::region * sr); - - FirrtlModule - lambda_node_to_firrtl(const llvm::lambda::node * ln); -}; - -} - -#endif // JLM_HLS_BACKEND_RHLS2FIRRTL_FIRRTL_HLS_HPP diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index bb5f5fc50..b0d26d82d 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -603,7 +603,6 @@ JlmHlsCommand::ToString() const "-o ", OutputFolder_.to_str(), " ", - UseCirct_ ? "--circt " : "", InputFile_.to_str()); } diff --git a/jlm/tooling/Command.hpp b/jlm/tooling/Command.hpp index 1e49faff4..1244c0bc8 100644 --- a/jlm/tooling/Command.hpp +++ b/jlm/tooling/Command.hpp @@ -575,10 +575,9 @@ class JlmHlsCommand final : public Command public: ~JlmHlsCommand() noexcept override; - JlmHlsCommand(util::filepath inputFile, util::filepath outputFolder, bool useCirct) + JlmHlsCommand(util::filepath inputFile, util::filepath outputFolder) : InputFile_(std::move(inputFile)), - OutputFolder_(std::move(outputFolder)), - UseCirct_(useCirct) + OutputFolder_(std::move(outputFolder)) {} [[nodiscard]] std::string @@ -621,17 +620,15 @@ class JlmHlsCommand final : public Command Create( CommandGraph & commandGraph, const util::filepath & inputFile, - const util::filepath & outputFolder, - bool useCirct) + const util::filepath & outputFolder) { - std::unique_ptr command(new JlmHlsCommand(inputFile, outputFolder, useCirct)); + std::unique_ptr command(new JlmHlsCommand(inputFile, outputFolder)); return CommandGraph::Node::Create(commandGraph, std::move(command)); } private: util::filepath InputFile_; util::filepath OutputFolder_; - bool UseCirct_; }; /** diff --git a/jlm/tooling/CommandGraphGenerator.cpp b/jlm/tooling/CommandGraphGenerator.cpp index a73677af5..0753c67e7 100644 --- a/jlm/tooling/CommandGraphGenerator.cpp +++ b/jlm/tooling/CommandGraphGenerator.cpp @@ -356,8 +356,7 @@ JhlsCommandGraphGenerator::GenerateCommandGraph(const JhlsCommandLineOptions & c auto & hls = JlmHlsCommand::Create( *commandGraph, dynamic_cast(&m2r2.GetCommand())->OutputFile(), - commandLineOptions.OutputFile_, - commandLineOptions.UseCirct_); + commandLineOptions.OutputFile_); m2r2.AddEdge(hls); auto linkerInputFiles = util::filepath(commandLineOptions.OutputFile_.to_str() + ".re*.ll"); diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 19fce001c..aa6a43856 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -325,7 +325,6 @@ JlmHlsCommandLineOptions::Reset() noexcept OutputFormat_ = OutputFormat::Firrtl; HlsFunction_ = ""; ExtractHlsFunction_ = false; - UseCirct_ = false; } void @@ -995,8 +994,6 @@ JlmHlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) cl::Prefix, cl::desc("Extracts function specified by hls-function")); - cl::opt useCirct("circt", cl::Prefix, cl::desc("Use CIRCT to generate FIRRTL")); - cl::opt format( cl::values( ::clEnumValN( @@ -1019,7 +1016,6 @@ JlmHlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) CommandLineOptions_.HlsFunction_ = std::move(hlsFunction); CommandLineOptions_.OutputFiles_ = outputFolder; CommandLineOptions_.ExtractHlsFunction_ = extractHlsFunction; - CommandLineOptions_.UseCirct_ = useCirct; CommandLineOptions_.OutputFormat_ = format; return CommandLineOptions_; @@ -1159,7 +1155,7 @@ JhlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) cl::opt generateFirrtl("firrtl", cl::ValueDisallowed, cl::desc("Generate firrtl")); - cl::opt useCirct("circt", cl::Prefix, cl::desc("Use CIRCT to generate FIRRTL")); + cl::opt useCirct("circt", cl::Prefix, cl::desc("DEPRACATED - CIRCT is always used to generate FIRRTL")); cl::ParseCommandLineOptions(argc, argv); @@ -1243,7 +1239,6 @@ JhlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) CommandLineOptions_.UsePthreads_ = usePthreads; CommandLineOptions_.Md_ = mD; CommandLineOptions_.GenerateFirrtl_ = generateFirrtl; - CommandLineOptions_.UseCirct_ = useCirct; for (auto & inputFile : inputFiles) { diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index b008adb71..ea5af00d2 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -385,8 +385,7 @@ class JlmHlsCommandLineOptions final : public CommandLineOptions : InputFile_(""), OutputFiles_(""), OutputFormat_(OutputFormat::Firrtl), - ExtractHlsFunction_(false), - UseCirct_(false) + ExtractHlsFunction_(false) {} void @@ -397,7 +396,6 @@ class JlmHlsCommandLineOptions final : public CommandLineOptions OutputFormat OutputFormat_; std::string HlsFunction_; bool ExtractHlsFunction_; - bool UseCirct_; }; /** @@ -438,7 +436,6 @@ class JhlsCommandLineOptions final : public CommandLineOptions Suppress_(false), UsePthreads_(false), GenerateFirrtl_(false), - UseCirct_(false), Hls_(false), Md_(false), OptimizationLevel_(OptimizationLevel::O0), @@ -456,7 +453,6 @@ class JhlsCommandLineOptions final : public CommandLineOptions bool Suppress_; bool UsePthreads_; bool GenerateFirrtl_; - bool UseCirct_; bool Hls_; bool Md_; diff --git a/tools/jlm-hls/jlm-hls.cpp b/tools/jlm-hls/jlm-hls.cpp index ffa1007e4..b68b5e9b7 100644 --- a/tools/jlm-hls/jlm-hls.cpp +++ b/tools/jlm-hls/jlm-hls.cpp @@ -5,7 +5,6 @@ #include #include -#include #include #include #include @@ -78,27 +77,18 @@ main(int argc, char ** argv) jlm::hls::rvsdg2ref(*rvsdgModule, commandLineOptions.OutputFiles_.to_str() + ".ref.ll"); jlm::hls::rvsdg2rhls(*rvsdgModule); + // Writing the FIRRTL to a file and then reading it back in to convert to Verilog. + // Could potentially change to pass the FIRRTL directly to the converter, but the converter + // is based on CIRCT's Firtool library, which assumes that the FIRRTL is read from a file. + jlm::hls::RhlsToFirrtlConverter hls; + auto output = hls.ToString(*rvsdgModule); jlm::util::filepath firrtlFile(commandLineOptions.OutputFiles_.to_str() + ".fir"); - if (commandLineOptions.UseCirct_) + stringToFile(output, firrtlFile.to_str()); + jlm::util::filepath outputVerilogFile(commandLineOptions.OutputFiles_.to_str() + ".v"); + if (!jlm::hls::FirrtlToVerilogConverter::Convert(firrtlFile, outputVerilogFile)) { - // Writing the FIRRTL to a file and then reading it back in to convert to Verilog. - // Could potentially change to pass the FIRRTL directly to the converter, but the converter - // is based on CIRCT's Firtool library, which assumes that the FIRRTL is read from a file. - jlm::hls::RhlsToFirrtlConverter hls; - auto output = hls.ToString(*rvsdgModule); - stringToFile(output, firrtlFile.to_str()); - jlm::util::filepath outputVerilogFile(commandLineOptions.OutputFiles_.to_str() + ".v"); - if (!jlm::hls::FirrtlToVerilogConverter::Convert(firrtlFile, outputVerilogFile)) - { - std::cerr << "The FIRRTL to Verilog conversion failed.\n" << std::endl; - exit(1); - } - } - else - { - jlm::hls::FirrtlHLS hls; - auto output = hls.run(*rvsdgModule); - stringToFile(output, firrtlFile.to_str()); + std::cerr << "The FIRRTL to Verilog conversion failed.\n" << std::endl; + exit(1); } jlm::hls::VerilatorHarnessHLS vhls; From bfcad0a96ca18d0d4af6f392924d862fb78516cb Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 17 Jul 2024 10:16:29 +0200 Subject: [PATCH 017/170] Support for naming include file in HLS harness (#547) The test harness inserts an include file, which name depends on the Verilog filename passed to Verilator, and to make it possible to speficy the Verilog file name the test harness has been updated. --- .../rhls2firrtl/verilator-harness-hls.cpp | 6 ++++-- .../rhls2firrtl/verilator-harness-hls.hpp | 20 +++++++++++++++++++ scripts/run-hls-test.sh | 2 +- tools/jlm-hls/jlm-hls.cpp | 2 +- 4 files changed, 26 insertions(+), 4 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index 2e4bbafed..b9032b421 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -39,9 +39,11 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) "#else\n" "#include \"verilated_vcd_c.h\"\n" "#endif\n" + // Include the Verilator generated header, which provides access to Verilog signals + // The name of the header is based on the Verilog filename used as input to Verilator "#include \"V" - << file_name << ".h\"\n" - << "#define V_NAME V" << file_name << "\n" + << GetVerilogFileName().base() << ".h\"\n" + << "#define V_NAME V" << GetVerilogFileName().base() << "\n" << "#define TIMEOUT 10000000\n" "#define xstr(s) str(s)\n" "#define str(s) #s\n" diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp index 3a7b63b9f..5e0822c17 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp @@ -23,7 +23,27 @@ class VerilatorHarnessHLS : public BaseHLS std::string get_text(llvm::RvsdgModule & rm) override; +public: + /** + * Construct a Verilator harness generator. + * + * /param verilogFile The filename to the Verilog file that is to be used together with the generated harness as input to Verilator. + */ + VerilatorHarnessHLS(const util::filepath verilogFile) + : VerilogFile_(std::move(verilogFile)){}; + private: + const util::filepath VerilogFile_; + + /** + * \return The Verilog filename that is to be used together with the generated harness as input to Verilator. + */ + [[nodiscard]] const util::filepath & + GetVerilogFileName() const noexcept + { + return VerilogFile_; + } + std::string convert_to_c_type(const jlm::rvsdg::type * type); diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index 88c4a749e..a685dbcd4 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=f90cce18a8868185a31e4a69902835d1df60846a +GIT_COMMIT=effbe0bff96b396fb41e7c95bb74c7c772567136 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" diff --git a/tools/jlm-hls/jlm-hls.cpp b/tools/jlm-hls/jlm-hls.cpp index b68b5e9b7..58004ad35 100644 --- a/tools/jlm-hls/jlm-hls.cpp +++ b/tools/jlm-hls/jlm-hls.cpp @@ -91,7 +91,7 @@ main(int argc, char ** argv) exit(1); } - jlm::hls::VerilatorHarnessHLS vhls; + jlm::hls::VerilatorHarnessHLS vhls(outputVerilogFile); stringToFile(vhls.run(*rvsdgModule), commandLineOptions.OutputFiles_.to_str() + ".harness.cpp"); // TODO: hide behind flag From c196e4b70590067f11ac94a2516ca55286d75fa6 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 17 Jul 2024 10:38:00 +0200 Subject: [PATCH 018/170] Move printing of region tree to rvsdg::region class (#549) This PR overhauls the printing of the region tree. It moves it to the rvsdg::region class. The plan going forward is: 1. To be able to conveniently output the tree from jlm-opt. 2. To annotate the tree with valuable information and use it to gather better statistics about an RVSDG (after a transformation) --- jlm/rvsdg/region.cpp | 44 +++++++++++++++++++++++++++++ jlm/rvsdg/region.hpp | 13 +++++++++ jlm/rvsdg/view.cpp | 44 ----------------------------- jlm/rvsdg/view.hpp | 6 ---- tests/jlm/rvsdg/TestRegion.cpp | 51 ++++++++++++++++++++++++++++++++++ 5 files changed, 108 insertions(+), 50 deletions(-) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 0fa575805..7ba59867e 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -359,6 +359,50 @@ region::NumRegions(const jlm::rvsdg::region & region) noexcept return numRegions; } +std::string +region::ToTree(const rvsdg::region & region) noexcept +{ + return ToTree(region, 0); +} + +std::string +region::ToTree(const rvsdg::region & region, size_t identationDepth) noexcept +{ + std::string subTree; + auto identationChar = '-'; + + // Convert current region to a string + if (region.IsRootRegion()) + { + subTree = "RootRegion\n"; + identationDepth += 1; + } + else if (region.node()->nsubregions() != 1) + { + auto indentationString = std::string(identationDepth, identationChar); + subTree += util::strfmt(indentationString, "Region[", region.index(), "]\n"); + identationDepth += 1; + } + + // Convert the region's structural nodes with their subregions to a string + for (const auto & node : region.nodes) + { + if (auto structuralNode = dynamic_cast(&node)) + { + auto identationString = std::string(identationDepth, identationChar); + auto nodeString = structuralNode->operation().debug_string(); + subTree += util::strfmt(identationString, nodeString, '\n'); + + for (size_t n = 0; n < structuralNode->nsubregions(); n++) + { + subTree += ToTree(*structuralNode->subregion(n), identationDepth + 1); + } + } + } + + return subTree; +} + size_t nnodes(const jlm::rvsdg::region * region) noexcept { diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 960945c69..4522d4f57 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -390,6 +390,16 @@ class region [[nodiscard]] static size_t NumRegions(const jlm::rvsdg::region & region) noexcept; + /** + * Converts \p region and all of its contained structural nodes with subregions to a tree in + * ASCII format. + * + * @param region The top-level region that is converted + * @return A string containing the ASCII tree of \p region + */ + [[nodiscard]] static std::string + ToTree(const rvsdg::region & region) noexcept; + region_nodes_list nodes; region_top_node_list top_nodes; @@ -397,6 +407,9 @@ class region region_bottom_node_list bottom_nodes; private: + [[nodiscard]] static std::string + ToTree(const rvsdg::region & region, size_t identationDepth) noexcept; + size_t index_; jlm::rvsdg::graph * graph_; jlm::rvsdg::structural_node * node_; diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 005d2fb1b..fde1e968a 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -170,50 +170,6 @@ view(const jlm::rvsdg::region * region, FILE * out) fflush(out); } -std::string -region_tree(const jlm::rvsdg::region * region) -{ - std::function f = - [&](const jlm::rvsdg::region * region, size_t depth) - { - std::string subtree; - if (region->node()) - { - if (region->node()->nsubregions() != 1) - { - subtree += std::string(depth, '-') + jlm::util::strfmt(region) + "\n"; - depth += 1; - } - } - else - { - subtree = "ROOT\n"; - depth += 1; - } - - for (const auto & node : region->nodes) - { - if (auto snode = dynamic_cast(&node)) - { - subtree += std::string(depth, '-') + snode->operation().debug_string() + "\n"; - for (size_t n = 0; n < snode->nsubregions(); n++) - subtree += f(snode->subregion(n), depth + 1); - } - } - - return subtree; - }; - - return f(region, 0); -} - -void -region_tree(const jlm::rvsdg::region * region, FILE * out) -{ - fputs(region_tree(region).c_str(), out); - fflush(out); -} - /* xml */ static inline std::string diff --git a/jlm/rvsdg/view.hpp b/jlm/rvsdg/view.hpp index 07a95ee7a..76ccdb63f 100644 --- a/jlm/rvsdg/view.hpp +++ b/jlm/rvsdg/view.hpp @@ -56,12 +56,6 @@ view(const jlm::rvsdg::graph & graph, FILE * out) return view(graph.root(), out); } -std::string -region_tree(const jlm::rvsdg::region * region); - -void -region_tree(const jlm::rvsdg::region * region, FILE * out); - std::string to_xml(const jlm::rvsdg::region * region); diff --git a/tests/jlm/rvsdg/TestRegion.cpp b/tests/jlm/rvsdg/TestRegion.cpp index 2525e6fcf..ff02055cd 100644 --- a/tests/jlm/rvsdg/TestRegion.cpp +++ b/tests/jlm/rvsdg/TestRegion.cpp @@ -294,3 +294,54 @@ Test() } JLM_UNIT_TEST_REGISTER("jlm/rvsdg/TestRegion", Test) + +static int +TestToTree_EmptyRvsdg() +{ + using namespace jlm::rvsdg; + + // Arrange + graph rvsdg; + + // Act + auto tree = region::ToTree(*rvsdg.root()); + std::cout << tree << std::flush; + + // Assert + assert(tree == "RootRegion\n"); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/TestRegion-TestToTree_EmptyRvsdg", TestToTree_EmptyRvsdg) + +static int +TestToTree_RvsdgWithStructuralNodes() +{ + using namespace jlm::rvsdg; + + // Arrange + graph rvsdg; + auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 2); + jlm::tests::structural_node::create(structuralNode->subregion(1), 3); + + // Act + auto tree = region::ToTree(*rvsdg.root()); + std::cout << tree << std::flush; + + // Assert + auto numLines = std::count(tree.begin(), tree.end(), '\n'); + + // We should find '\n' 8 times: 1 root region + 2 structural nodes + 5 subregions + assert(numLines == 8); + + // Check that the last line printed looks accordingly + auto lastLine = std::string("----Region[2]\n"); + assert(tree.compare(tree.size() - lastLine.size(), lastLine.size(), lastLine) == 0); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/TestRegion-TestToTree_RvsdgWithStructuralNodes", + TestToTree_RvsdgWithStructuralNodes) From e2ed20ff8f7a502b8ded597a40f6f0a8a041745f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 18 Jul 2024 11:02:22 +0200 Subject: [PATCH 019/170] Add printing of rvsdg tree to jlm-opt (#551) This PR adds the capability to print the region tree to `jlm-opt`. Here is an example output: ``` RootRegion -DELTA[.str.3] -DELTA[array2] -DELTA[.str.2] -DELTA[array] -DELTA[__PRETTY_FUNCTION__.main] -DELTA[.str.1] -DELTA[.str] -DELTA[points] -LAMBDA[main] --THETA ---GAMMA ----Region[0] ----Region[1] --GAMMA ---Region[0] ---Region[1] ----GAMMA -----Region[0] -----Region[1] ------GAMMA -------Region[0] -------Region[1] --------GAMMA ---------Region[0] ---------Region[1] --------GAMMA ---------Region[0] ---------Region[1] --GAMMA ---Region[0] ---Region[1] ``` --- jlm/tooling/Command.cpp | 26 +++++++ jlm/tooling/Command.hpp | 20 +++-- jlm/tooling/CommandLine.cpp | 4 + jlm/tooling/CommandLine.hpp | 1 + tests/jlm/tooling/TestJlmOptCommand.cpp | 33 ++++++++ .../tooling/TestJlmOptCommandLineParser.cpp | 78 +++++++++++++++++++ 6 files changed, 155 insertions(+), 7 deletions(-) diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index b0d26d82d..6c1802772 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -470,6 +470,28 @@ JlmOptCommand::PrintAsMlir( #endif } +void +JlmOptCommand::PrintAsRvsdgTree( + const llvm::RvsdgModule & rvsdgModule, + const util::filepath & outputFile, + util::StatisticsCollector &) +{ + auto & rootRegion = *rvsdgModule.Rvsdg().root(); + auto tree = rvsdg::region::ToTree(rootRegion); + + if (outputFile == "") + { + std::cout << tree << std::flush; + } + else + { + std::ofstream fs; + fs.open(outputFile.to_str()); + fs << tree; + fs.close(); + } +} + void JlmOptCommand::PrintRvsdgModule( llvm::RvsdgModule & rvsdgModule, @@ -493,6 +515,10 @@ JlmOptCommand::PrintRvsdgModule( { PrintAsMlir(rvsdgModule, outputFile, statisticsCollector); } + else if (outputFormat == tooling::JlmOptCommandLineOptions::OutputFormat::Tree) + { + PrintAsRvsdgTree(rvsdgModule, outputFile, statisticsCollector); + } else { JLM_UNREACHABLE("Unhandled output format."); diff --git a/jlm/tooling/Command.hpp b/jlm/tooling/Command.hpp index 1244c0bc8..78f3f6ee1 100644 --- a/jlm/tooling/Command.hpp +++ b/jlm/tooling/Command.hpp @@ -369,6 +369,13 @@ class JlmOptCommand final : public Command return CommandLineOptions_; } + static void + PrintRvsdgModule( + llvm::RvsdgModule & rvsdgModule, + const util::filepath & outputFile, + const JlmOptCommandLineOptions::OutputFormat & outputFormat, + util::StatisticsCollector & statisticsCollector); + private: std::unique_ptr ParseInputFile( @@ -384,13 +391,6 @@ class JlmOptCommand final : public Command ParseMlirIrFile(const util::filepath & inputFile, util::StatisticsCollector & statisticsCollector) const; - static void - PrintRvsdgModule( - llvm::RvsdgModule & rvsdgModule, - const util::filepath & outputFile, - const JlmOptCommandLineOptions::OutputFormat & outputFormat, - util::StatisticsCollector & statisticsCollector); - static void PrintAsAscii( const llvm::RvsdgModule & rvsdgModule, @@ -415,6 +415,12 @@ class JlmOptCommand final : public Command const util::filepath & outputFile, util::StatisticsCollector & statisticsCollector); + static void + PrintAsRvsdgTree( + const llvm::RvsdgModule & rvsdgModule, + const util::filepath & outputFile, + util::StatisticsCollector & statisticsCollector); + std::string ProgramName_; JlmOptCommandLineOptions CommandLineOptions_; }; diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index aa6a43856..497f598e1 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -308,6 +308,7 @@ JlmOptCommandLineOptions::GetOutputFormatCommandLineArguments() { OutputFormat::Ascii, "ascii" }, { OutputFormat::Llvm, "llvm" }, { OutputFormat::Mlir, "mlir" }, + { OutputFormat::Tree, "tree" }, { OutputFormat::Xml, "xml" } }; @@ -843,6 +844,9 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) #ifdef ENABLE_MLIR CreateOutputFormatOption(JlmOptCommandLineOptions::OutputFormat::Mlir, "Output MLIR"), #endif + CreateOutputFormatOption( + JlmOptCommandLineOptions::OutputFormat::Tree, + "Output Rvsdg Tree"), CreateOutputFormatOption(JlmOptCommandLineOptions::OutputFormat::Xml, "Output XML")), cl::init(JlmOptCommandLineOptions::OutputFormat::Llvm)); diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index ea5af00d2..ce3ef827c 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -54,6 +54,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions Ascii, Llvm, Mlir, + Tree, Xml, LastEnumValue // must always be the last enum value, used for iteration diff --git a/tests/jlm/tooling/TestJlmOptCommand.cpp b/tests/jlm/tooling/TestJlmOptCommand.cpp index 648933d27..613fa8131 100644 --- a/tests/jlm/tooling/TestJlmOptCommand.cpp +++ b/tests/jlm/tooling/TestJlmOptCommand.cpp @@ -5,9 +5,12 @@ #include +#include #include #include +#include + static void TestStatistics() { @@ -56,3 +59,33 @@ TestJlmOptCommand() } JLM_UNIT_TEST_REGISTER("jlm/tooling/TestJlmOptCommand", TestJlmOptCommand) + +static int +PrintRvsdgTreeToFile() +{ + using namespace jlm; + + // Arrange + util::filepath outputFile("/tmp/RvsdgTree"); + + jlm::llvm::RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); + util::StatisticsCollector statisticsCollector; + + // Act + tooling::JlmOptCommand::PrintRvsdgModule( + rvsdgModule, + outputFile, + tooling::JlmOptCommandLineOptions::OutputFormat::Tree, + statisticsCollector); + + // Assert + std::stringstream buffer; + std::ifstream istream(outputFile.to_str()); + buffer << istream.rdbuf(); + + assert(buffer.str() == "RootRegion\n"); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/tooling/TestJlmOptCommand-PrintRvsdgTreeToFile", PrintRvsdgTreeToFile) diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index 082172eda..83971e3c5 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -7,6 +7,46 @@ #include +#include + +// FIXME: We have a similar function in TestJlcCommandLineParser.cpp. We need to clean up. +static const jlm::tooling::JlmOptCommandLineOptions & +ParseCommandLineArguments(const std::vector & commandLineArguments) +{ + auto cleanUp = [](const std::vector & array) + { + for (const auto & ptr : array) + { + delete[] ptr; + } + }; + + std::vector array; + for (const auto & commandLineArgument : commandLineArguments) + { + array.push_back(new char[commandLineArgument.size() + 1]); + strncpy(array.back(), commandLineArgument.data(), commandLineArgument.size()); + array.back()[commandLineArgument.size()] = '\0'; + } + + static jlm::tooling::JlmOptCommandLineParser commandLineParser; + const jlm::tooling::JlmOptCommandLineOptions * commandLineOptions; + try + { + commandLineOptions = + &commandLineParser.ParseCommandLineArguments(static_cast(array.size()), &array[0]); + } + catch (...) + { + cleanUp(array); + throw; + } + + cleanUp(array); + + return *commandLineOptions; +} + static void TestOptimizationCommandLineArgumentConversion() { @@ -97,3 +137,41 @@ Test() } JLM_UNIT_TEST_REGISTER("jlm/tooling/TestJlmOptCommandLineParser", Test) + +static int +OutputFormatParsing() +{ + using namespace jlm::tooling; + + auto testOutputFormatParsing = + [](const char * outputFormatString, + jlm::tooling::JlmOptCommandLineOptions::OutputFormat outputFormat) + { + // Arrange + std::vector commandLineArguments( + { "jlm-opt", "--output-format", outputFormatString, "foo.c" }); + + // Act + auto & commandLineOptions = ParseCommandLineArguments(commandLineArguments); + + // Assert + assert(commandLineOptions.GetOutputFormat() == outputFormat); + }; + + auto start = static_cast(JlmOptCommandLineOptions::OutputFormat::FirstEnumValue) + 1; + auto end = static_cast(JlmOptCommandLineOptions::OutputFormat::LastEnumValue); + + for (size_t n = start; n != end; n++) + { + auto outputFormat = static_cast(n); + auto outputFormatString = JlmOptCommandLineOptions::ToCommandLineArgument(outputFormat); + + testOutputFormatParsing(outputFormatString, outputFormat); + } + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/tooling/TestJlmOptCommandLineParser-OutputFormatParsing", + OutputFormatParsing) From d0449a966d9a8600642f5bb1315ba883efbebd18 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 19 Jul 2024 20:10:50 +0200 Subject: [PATCH 020/170] Implement equality operator for attributeset class (#528) This PR reimplements the internals of the attributeset class such that we can properly support `operator=()`. Closes #525 --- jlm/llvm/Makefile.sub | 1 + jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 69 +++--- jlm/llvm/frontend/LlvmModuleConversion.cpp | 109 ++++---- jlm/llvm/ir/attribute.cpp | 76 ++---- jlm/llvm/ir/attribute.hpp | 273 ++++++++------------- jlm/llvm/ir/cfg.hpp | 12 - jlm/llvm/ir/operators/lambda.hpp | 6 - jlm/util/HashSet.hpp | 2 +- tests/jlm/llvm/ir/AttributeSetTests.cpp | 64 +++++ 9 files changed, 277 insertions(+), 335 deletions(-) create mode 100644 tests/jlm/llvm/ir/AttributeSetTests.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 3a81e1760..81d878e2b 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -164,6 +164,7 @@ libllvm_TESTS += \ tests/jlm/llvm/ir/operators/TestPhi \ tests/jlm/llvm/ir/operators/test-sext \ tests/jlm/llvm/ir/operators/StoreTests \ + tests/jlm/llvm/ir/AttributeSetTests \ tests/jlm/llvm/ir/test-aggregation \ tests/jlm/llvm/ir/test-cfg \ tests/jlm/llvm/ir/test-cfg-node \ diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 8185e985c..5f8d1b7ab 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -271,42 +271,53 @@ convert_attribute_kind(const attribute::kind & kind) return map[kind]; } -static ::llvm::AttributeSet -convert_attributes(const attributeset & as, context & ctx) +static ::llvm::Attribute +ConvertEnumAttribute(const llvm::enum_attribute & attribute, context & ctx) { - auto convert_attribute = [](const llvm::attribute & attribute, context & ctx) - { - auto & llvmctx = ctx.llvm_module().getContext(); + auto & llvmContext = ctx.llvm_module().getContext(); + auto kind = convert_attribute_kind(attribute.kind()); + return ::llvm::Attribute::get(llvmContext, kind); +} - if (auto sa = dynamic_cast(&attribute)) - return ::llvm::Attribute::get(llvmctx, sa->kind(), sa->value()); +static ::llvm::Attribute +ConvertIntAttribute(const llvm::int_attribute & attribute, context & ctx) +{ + auto & llvmContext = ctx.llvm_module().getContext(); + auto kind = convert_attribute_kind(attribute.kind()); + return ::llvm::Attribute::get(llvmContext, kind, attribute.value()); +} - if (typeid(attribute) == typeid(enum_attribute)) - { - auto ea = dynamic_cast(&attribute); - auto kind = convert_attribute_kind(ea->kind()); - return ::llvm::Attribute::get(llvmctx, kind); - } +static ::llvm::Attribute +ConvertTypeAttribute(const llvm::type_attribute & attribute, context & ctx) +{ + auto & llvmContext = ctx.llvm_module().getContext(); + auto kind = convert_attribute_kind(attribute.kind()); + auto type = convert_type(attribute.type(), ctx); + return ::llvm::Attribute::get(llvmContext, kind, type); +} - if (auto ia = dynamic_cast(&attribute)) - { - auto kind = convert_attribute_kind(ia->kind()); - return ::llvm::Attribute::get(llvmctx, kind, ia->value()); - } +static ::llvm::Attribute +ConvertStringAttribute(const llvm::string_attribute & attribute, context & ctx) +{ + auto & llvmContext = ctx.llvm_module().getContext(); + return ::llvm::Attribute::get(llvmContext, attribute.kind(), attribute.value()); +} - if (auto ta = dynamic_cast(&attribute)) - { - auto kind = convert_attribute_kind(ta->kind()); - auto type = convert_type(ta->type(), ctx); - return ::llvm::Attribute::get(llvmctx, kind, type); - } +static ::llvm::AttributeSet +convert_attributes(const attributeset & attributeSet, context & ctx) +{ + ::llvm::AttrBuilder builder(ctx.llvm_module().getContext()); + for (auto & attribute : attributeSet.EnumAttributes()) + builder.addAttribute(ConvertEnumAttribute(attribute, ctx)); - JLM_UNREACHABLE("This should have never happened!"); - }; + for (auto & attribute : attributeSet.IntAttributes()) + builder.addAttribute(ConvertIntAttribute(attribute, ctx)); - ::llvm::AttrBuilder builder(ctx.llvm_module().getContext()); - for (auto & attribute : as) - builder.addAttribute(convert_attribute(attribute, ctx)); + for (auto & attribute : attributeSet.TypeAttributes()) + builder.addAttribute(ConvertTypeAttribute(attribute, ctx)); + + for (auto & attribute : attributeSet.StringAttributes()) + builder.addAttribute(ConvertStringAttribute(attribute, ctx)); return ::llvm::AttributeSet::get(ctx.llvm_module().getContext(), builder); } diff --git a/jlm/llvm/frontend/LlvmModuleConversion.cpp b/jlm/llvm/frontend/LlvmModuleConversion.cpp index 54e1eca9b..d137cfdf2 100644 --- a/jlm/llvm/frontend/LlvmModuleConversion.cpp +++ b/jlm/llvm/frontend/LlvmModuleConversion.cpp @@ -181,75 +181,78 @@ ConvertAttributeKind(const ::llvm::Attribute::AttrKind & kind) return map[kind]; } -static std::unique_ptr -convert_attribute(const ::llvm::Attribute & attribute, context & ctx) +static enum_attribute +ConvertEnumAttribute(const ::llvm::Attribute & attribute) { - auto convert_type_attribute = [](const ::llvm::Attribute & attribute, context & ctx) - { - JLM_ASSERT(attribute.isTypeAttribute()); - - if (attribute.getKindAsEnum() == ::llvm::Attribute::AttrKind::ByVal) - { - auto type = ConvertType(attribute.getValueAsType(), ctx); - return type_attribute::create_byval(std::move(type)); - } - - if (attribute.getKindAsEnum() == ::llvm::Attribute::AttrKind::StructRet) - { - auto type = ConvertType(attribute.getValueAsType(), ctx); - return type_attribute::CreateStructRetAttribute(std::move(type)); - } + JLM_ASSERT(attribute.isEnumAttribute()); + auto kind = ConvertAttributeKind(attribute.getKindAsEnum()); + return enum_attribute(kind); +} - JLM_UNREACHABLE("Unhandled attribute"); - }; +static int_attribute +ConvertIntAttribute(const ::llvm::Attribute & attribute) +{ + JLM_ASSERT(attribute.isIntAttribute()); + auto kind = ConvertAttributeKind(attribute.getKindAsEnum()); + return { kind, attribute.getValueAsInt() }; +} - auto convert_string_attribute = [](const ::llvm::Attribute & attribute) - { - JLM_ASSERT(attribute.isStringAttribute()); - return string_attribute::create( - attribute.getKindAsString().str(), - attribute.getValueAsString().str()); - }; +static type_attribute +ConvertTypeAttribute(const ::llvm::Attribute & attribute, context & ctx) +{ + JLM_ASSERT(attribute.isTypeAttribute()); - auto convert_enum_attribute = [](const ::llvm::Attribute & attribute) + if (attribute.getKindAsEnum() == ::llvm::Attribute::AttrKind::ByVal) { - JLM_ASSERT(attribute.isEnumAttribute()); - - auto kind = ConvertAttributeKind(attribute.getKindAsEnum()); - return enum_attribute::create(kind); - }; + auto type = ConvertType(attribute.getValueAsType(), ctx); + return { attribute::kind::ByVal, std::move(type) }; + } - auto convert_int_attribute = [](const ::llvm::Attribute & attribute) + if (attribute.getKindAsEnum() == ::llvm::Attribute::AttrKind::StructRet) { - JLM_ASSERT(attribute.isIntAttribute()); - - auto kind = ConvertAttributeKind(attribute.getKindAsEnum()); - return int_attribute::create(kind, attribute.getValueAsInt()); - }; - - if (attribute.isTypeAttribute()) - return convert_type_attribute(attribute, ctx); - - if (attribute.isStringAttribute()) - return convert_string_attribute(attribute); - - if (attribute.isEnumAttribute()) - return convert_enum_attribute(attribute); - - if (attribute.isIntAttribute()) - return convert_int_attribute(attribute); + auto type = ConvertType(attribute.getValueAsType(), ctx); + return { attribute::kind::StructRet, std::move(type) }; + } JLM_UNREACHABLE("Unhandled attribute"); } +static string_attribute +ConvertStringAttribute(const ::llvm::Attribute & attribute) +{ + JLM_ASSERT(attribute.isStringAttribute()); + return { attribute.getKindAsString().str(), attribute.getValueAsString().str() }; +} + static attributeset convert_attributes(const ::llvm::AttributeSet & as, context & ctx) { - attributeset attributes; + attributeset attributeSet; for (auto & attribute : as) - attributes.insert(convert_attribute(attribute, ctx)); + { + if (attribute.isEnumAttribute()) + { + attributeSet.InsertEnumAttribute(ConvertEnumAttribute(attribute)); + } + else if (attribute.isIntAttribute()) + { + attributeSet.InsertIntAttribute(ConvertIntAttribute(attribute)); + } + else if (attribute.isTypeAttribute()) + { + attributeSet.InsertTypeAttribute(ConvertTypeAttribute(attribute, ctx)); + } + else if (attribute.isStringAttribute()) + { + attributeSet.InsertStringAttribute(ConvertStringAttribute(attribute)); + } + else + { + JLM_UNREACHABLE("Unhandled attribute"); + } + } - return attributes; + return attributeSet; } static std::unique_ptr diff --git a/jlm/llvm/ir/attribute.cpp b/jlm/llvm/ir/attribute.cpp index b6788599c..493f39b35 100644 --- a/jlm/llvm/ir/attribute.cpp +++ b/jlm/llvm/ir/attribute.cpp @@ -8,15 +8,9 @@ namespace jlm::llvm { -/* attribute class */ +attribute::~attribute() noexcept = default; -attribute::~attribute() -{} - -/* string attribute class */ - -string_attribute::~string_attribute() -{} +string_attribute::~string_attribute() noexcept = default; bool string_attribute::operator==(const attribute & other) const @@ -25,16 +19,7 @@ string_attribute::operator==(const attribute & other) const return sa && sa->kind() == kind() && sa->value() == value(); } -std::unique_ptr -string_attribute::copy() const -{ - return std::unique_ptr(new string_attribute(kind(), value())); -} - -/* enum attribute class */ - -enum_attribute::~enum_attribute() -{} +enum_attribute::~enum_attribute() noexcept = default; bool enum_attribute::operator==(const attribute & other) const @@ -43,16 +28,7 @@ enum_attribute::operator==(const attribute & other) const return ea && ea->kind() == kind(); } -std::unique_ptr -enum_attribute::copy() const -{ - return std::unique_ptr(new enum_attribute(kind())); -} - -/* integer attribute class */ - -int_attribute::~int_attribute() -{} +int_attribute::~int_attribute() noexcept = default; bool int_attribute::operator==(const attribute & other) const @@ -61,16 +37,7 @@ int_attribute::operator==(const attribute & other) const return ia && ia->kind() == kind() && ia->value() == value(); } -std::unique_ptr -int_attribute::copy() const -{ - return std::unique_ptr(new int_attribute(kind(), value())); -} - -/* type attribute class */ - -type_attribute::~type_attribute() -{} +type_attribute::~type_attribute() noexcept = default; bool type_attribute::operator==(const attribute & other) const @@ -79,37 +46,28 @@ type_attribute::operator==(const attribute & other) const return ta && ta->kind() == kind() && ta->type() == type(); } -std::unique_ptr -type_attribute::copy() const +attributeset::EnumAttributeRange +attributeset::EnumAttributes() const { - return std::make_unique(kind(), type_); + return EnumAttributes_.Items(); } -/* attribute set class */ - -attributeset & -attributeset::operator=(const attributeset & other) +attributeset::IntAttributeRange +attributeset::IntAttributes() const { - if (this == &other) - return *this; - - attributes_.clear(); - for (auto & attribute : other) - attributes_.push_back(attribute.copy()); - - return *this; + return IntAttributes_.Items(); } -attributeset::constiterator -attributeset::begin() const +attributeset::TypeAttributeRange +attributeset::TypeAttributes() const { - return constiterator(attributes_.begin()); + return TypeAttributes_.Items(); } -attributeset::constiterator -attributeset::end() const +attributeset::StringAttributeRange +attributeset::StringAttributes() const { - return constiterator(attributes_.end()); + return StringAttributes_.Items(); } } diff --git a/jlm/llvm/ir/attribute.hpp b/jlm/llvm/ir/attribute.hpp index fe2378819..6dab4a8c1 100644 --- a/jlm/llvm/ir/attribute.hpp +++ b/jlm/llvm/ir/attribute.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -121,19 +122,7 @@ class attribute EndAttrKinds ///< Sentinel value useful for loops }; - virtual ~attribute(); - - attribute() = default; - - attribute(const attribute &) = delete; - - attribute(attribute &&) = delete; - - attribute & - operator=(const attribute &) = delete; - - attribute & - operator=(attribute &&) = delete; + virtual ~attribute() noexcept; virtual bool operator==(const attribute &) const = 0; @@ -143,9 +132,6 @@ class attribute { return !operator==(other); } - - virtual std::unique_ptr - copy() const = 0; }; /** \brief String attribute @@ -153,39 +139,28 @@ class attribute class string_attribute final : public attribute { public: - ~string_attribute() override; + ~string_attribute() noexcept override; -private: string_attribute(const std::string & kind, const std::string & value) : kind_(kind), value_(value) {} -public: - const std::string & + [[nodiscard]] const std::string & kind() const noexcept { return kind_; } - const std::string & + [[nodiscard]] const std::string & value() const noexcept { return value_; } - virtual bool + bool operator==(const attribute &) const override; - virtual std::unique_ptr - copy() const override; - - static std::unique_ptr - create(const std::string & kind, const std::string & value) - { - return std::unique_ptr(new string_attribute(kind, value)); - } - private: std::string kind_; std::string value_; @@ -196,32 +171,21 @@ class string_attribute final : public attribute class enum_attribute : public attribute { public: - ~enum_attribute() override; + ~enum_attribute() noexcept override; -protected: - enum_attribute(const attribute::kind & kind) + explicit enum_attribute(const attribute::kind & kind) : kind_(kind) {} -public: - const attribute::kind & + [[nodiscard]] const attribute::kind & kind() const noexcept { return kind_; } - virtual bool + bool operator==(const attribute &) const override; - virtual std::unique_ptr - copy() const override; - - static std::unique_ptr - create(const attribute::kind & kind) - { - return std::unique_ptr(new enum_attribute(kind)); - } - private: attribute::kind kind_; }; @@ -231,33 +195,22 @@ class enum_attribute : public attribute class int_attribute final : public enum_attribute { public: - ~int_attribute() override; + ~int_attribute() noexcept override; -private: int_attribute(attribute::kind kind, uint64_t value) : enum_attribute(kind), value_(value) {} -public: - uint64_t + [[nodiscard]] uint64_t value() const noexcept { return value_; } - virtual bool + bool operator==(const attribute &) const override; - virtual std::unique_ptr - copy() const override; - - static std::unique_ptr - create(const attribute::kind & kind, uint64_t value) - { - return std::unique_ptr(new int_attribute(kind, value)); - } - private: uint64_t value_; }; @@ -267,181 +220,151 @@ class int_attribute final : public enum_attribute class type_attribute final : public enum_attribute { public: - ~type_attribute() override; + ~type_attribute() noexcept override; type_attribute(attribute::kind kind, std::shared_ptr type) : enum_attribute(kind), type_(std::move(type)) {} - const jlm::rvsdg::valuetype & + [[nodiscard]] const jlm::rvsdg::valuetype & type() const noexcept { return *type_; } - virtual bool + bool operator==(const attribute &) const override; - virtual std::unique_ptr - copy() const override; - - static std::unique_ptr - create_byval(std::shared_ptr type) - { - return std::make_unique(kind::ByVal, std::move(type)); - } - - static std::unique_ptr - CreateStructRetAttribute(std::shared_ptr type) - { - return std::make_unique(kind::StructRet, std::move(type)); - } - private: std::shared_ptr type_; }; -/** \brief Attribute set - */ -class attributeset final -{ - class constiterator; - -public: - ~attributeset() - {} - - attributeset() = default; - - attributeset(std::vector> attributes) - : attributes_(std::move(attributes)) - {} - - attributeset(const attributeset & other) - { - *this = other; - } - - attributeset(attributeset && other) - : attributes_(std::move(other.attributes_)) - {} +} - attributeset & - operator=(const attributeset & other); +namespace jlm::util +{ - attributeset & - operator=(attributeset && other) +template<> +struct Hash +{ + std::size_t + operator()(const jlm::llvm::enum_attribute & attribute) const noexcept { - if (this == &other) - return *this; - - attributes_ = std::move(other.attributes_); - - return *this; + return std::hash()(attribute.kind()); } +}; - constiterator - begin() const; - - constiterator - end() const; - - void - insert(const attribute & a) +template<> +struct Hash +{ + std::size_t + operator()(const jlm::llvm::int_attribute & attribute) const noexcept { - attributes_.push_back(a.copy()); + auto kindHash = std::hash()(attribute.kind()); + auto valueHash = std::hash()(attribute.value()); + return util::CombineHashes(kindHash, valueHash); } +}; - void - insert(std::unique_ptr a) +template<> +struct Hash +{ + std::size_t + operator()(const jlm::llvm::string_attribute & attribute) const noexcept { - attributes_.push_back(std::move(a)); + auto kindHash = std::hash()(attribute.kind()); + auto valueHash = std::hash()(attribute.value()); + return util::CombineHashes(kindHash, valueHash); } +}; - bool - operator==(const attributeset & other) const noexcept +template<> +struct Hash +{ + std::size_t + operator()(const jlm::llvm::type_attribute & attribute) const noexcept { - /* - FIXME: Ah, since this is not a real set, we cannot cheaply implement a comparison. - */ - return false; + auto kindHash = std::hash()(attribute.kind()); + auto typeHash = attribute.type().ComputeHash(); + return util::CombineHashes(kindHash, typeHash); } +}; - bool - operator!=(const attributeset & other) const noexcept - { - return !(*this == other); - } +} -private: - /* - FIXME: Implement a proper set. Elements are not unique here. - */ - std::vector> attributes_; -}; +namespace jlm::llvm +{ -/** \brief Attribute set const iterator +/** \brief Attribute set */ -class attributeset::constiterator final +class attributeset final { + using EnumAttributeHashSet = util::HashSet; + using IntAttributeHashSet = util::HashSet; + using TypeAttributeHashSet = util::HashSet; + using StringAttributeHashSet = util::HashSet; + + using EnumAttributeRange = util::iterator_range; + using IntAttributeRange = util::iterator_range; + using TypeAttributeRange = util::iterator_range; + using StringAttributeRange = util::iterator_range; + public: - using iterator_category = std::forward_iterator_tag; - using value_type = const attribute *; - using difference_type = std::ptrdiff_t; - using pointer = const attribute **; - using reference = const attribute *&; + [[nodiscard]] EnumAttributeRange + EnumAttributes() const; -private: - friend ::jlm::llvm::attributeset; + [[nodiscard]] IntAttributeRange + IntAttributes() const; -private: - constiterator(const std::vector>::const_iterator & it) - : it_(it) - {} + [[nodiscard]] TypeAttributeRange + TypeAttributes() const; -public: - const attribute * - operator->() const + [[nodiscard]] StringAttributeRange + StringAttributes() const; + + void + InsertEnumAttribute(const enum_attribute & attribute) { - return it_->get(); + EnumAttributes_.Insert(attribute); } - const attribute & - operator*() + void + InsertIntAttribute(const int_attribute & attribute) { - return *operator->(); + IntAttributes_.Insert(attribute); } - constiterator & - operator++() + void + InsertTypeAttribute(const type_attribute & attribute) { - it_++; - return *this; + TypeAttributes_.Insert(attribute); } - constiterator - operator++(int) + void + InsertStringAttribute(const string_attribute & attribute) { - constiterator tmp = *this; - ++*this; - return tmp; + StringAttributes_.Insert(attribute); } bool - operator==(const constiterator & other) const + operator==(const attributeset & other) const noexcept { - return it_ == other.it_; + return IntAttributes_ == other.IntAttributes_ && EnumAttributes_ == other.EnumAttributes_ + && TypeAttributes_ == other.TypeAttributes_ && StringAttributes_ == other.StringAttributes_; } bool - operator!=(const constiterator & other) const + operator!=(const attributeset & other) const noexcept { - return !operator==(other); + return !(*this == other); } private: - std::vector>::const_iterator it_; + EnumAttributeHashSet EnumAttributes_{}; + IntAttributeHashSet IntAttributes_{}; + TypeAttributeHashSet TypeAttributes_{}; + StringAttributeHashSet StringAttributes_{}; }; } diff --git a/jlm/llvm/ir/cfg.hpp b/jlm/llvm/ir/cfg.hpp index b9abc3d21..27f6d8ee9 100644 --- a/jlm/llvm/ir/cfg.hpp +++ b/jlm/llvm/ir/cfg.hpp @@ -54,18 +54,6 @@ class argument final : public variable return attributes_; } - void - add(const llvm::attribute & attribute) - { - attributes_.insert(attribute); - } - - void - add(std::unique_ptr attribute) - { - attributes_.insert(std::move(attribute)); - } - static std::unique_ptr create( const std::string & name, diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index ed4d1de4e..e702ac8c7 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -519,12 +519,6 @@ class fctargument final : public jlm::rvsdg::argument return attributes_; } - void - add(const jlm::llvm::attribute & attribute) - { - attributes_.insert(attribute); - } - void set_attributes(const jlm::llvm::attributeset & attributes) { diff --git a/jlm/util/HashSet.hpp b/jlm/util/HashSet.hpp index 06abaeb89..f2b57957d 100644 --- a/jlm/util/HashSet.hpp +++ b/jlm/util/HashSet.hpp @@ -24,6 +24,7 @@ class HashSet { using InternalSet = std::unordered_set; +public: class ItemConstIterator final { public: @@ -90,7 +91,6 @@ class HashSet typename InternalSet::const_iterator It_; }; -public: ~HashSet() noexcept = default; HashSet() = default; diff --git a/tests/jlm/llvm/ir/AttributeSetTests.cpp b/tests/jlm/llvm/ir/AttributeSetTests.cpp new file mode 100644 index 000000000..827d614b0 --- /dev/null +++ b/tests/jlm/llvm/ir/AttributeSetTests.cpp @@ -0,0 +1,64 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include + +#include + +static int +TestEquality() +{ + using namespace jlm::llvm; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + enum_attribute enumAttribute1(attribute::kind::AllocAlign); + enum_attribute enumAttribute2(attribute::kind::AlwaysInline); + + int_attribute intAttribute1(attribute::kind::Alignment, 4); + int_attribute intAttribute2(attribute::kind::AllocSize, 8); + + string_attribute stringAttribute1("myKind1", "myValue"); + string_attribute stringAttribute2("myKind2", "myValue"); + + type_attribute typeAttribute1(attribute::kind::ByRef, valueType); + type_attribute typeAttribute2(attribute::kind::ByVal, valueType); + + attributeset set1; + set1.InsertEnumAttribute(enumAttribute1); + set1.InsertIntAttribute(intAttribute1); + set1.InsertStringAttribute(stringAttribute1); + set1.InsertTypeAttribute(typeAttribute1); + + attributeset set2; + set2.InsertEnumAttribute(enumAttribute2); + set2.InsertIntAttribute(intAttribute2); + set2.InsertStringAttribute(stringAttribute2); + set2.InsertTypeAttribute(typeAttribute2); + + attributeset set3; + set3.InsertEnumAttribute(enumAttribute1); + set3.InsertIntAttribute(intAttribute1); + set3.InsertStringAttribute(stringAttribute1); + set3.InsertTypeAttribute(typeAttribute1); + + // Act & Assert + assert(set1 == set1); + assert(set1 != set2); + assert(set1 == set3); + + assert(set2 == set2); + assert(set2 != set3); + + assert(set3 == set3); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/AttributeSetTests-TestEquality", TestEquality); From b69c33c12d31e001fca22a8dce76457aa75d6f29 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 19 Jul 2024 20:33:03 +0200 Subject: [PATCH 021/170] Remove is_gamma_argument() function (#553) This PR does the following: 1. Introduces the GammaArgument class, which represents a region argument in a gamma subregion. 2. Removes the is_gamma_argument() function. --- jlm/llvm/ir/operators/call.cpp | 4 +-- jlm/llvm/ir/operators/gamma.hpp | 15 --------- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/llvm/opt/cne.cpp | 2 +- jlm/rvsdg/gamma.cpp | 2 ++ jlm/rvsdg/gamma.hpp | 34 +++++++++++++++++++-- 7 files changed, 39 insertions(+), 24 deletions(-) diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index ab9195829..10655903c 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -190,9 +190,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) return origin; } - if (auto argument = is_gamma_argument(origin)) + if (auto gammaArgument = dynamic_cast(origin)) { - origin = argument->input()->origin(); + origin = gammaArgument->input()->origin(); continue; } diff --git a/jlm/llvm/ir/operators/gamma.hpp b/jlm/llvm/ir/operators/gamma.hpp index 9935fb348..21b1163fa 100644 --- a/jlm/llvm/ir/operators/gamma.hpp +++ b/jlm/llvm/ir/operators/gamma.hpp @@ -11,21 +11,6 @@ namespace jlm::llvm { -/* - FIXME: This should be defined in librvsdg. -*/ -static inline const rvsdg::argument * -is_gamma_argument(const rvsdg::output * output) -{ - using namespace rvsdg; - - auto a = dynamic_cast(output); - if (a && is(a->region()->node())) - return a; - - return nullptr; -} - /* FIXME: This should be defined in librvsdg. */ diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 9f82515df..f910a5701 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -213,9 +213,9 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto argument = is_gamma_argument(&output)) + if (auto gammaArgument = dynamic_cast(&output)) { - MarkOutput(*argument->input()->origin()); + MarkOutput(*gammaArgument->input()->origin()); return; } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 2bf4c9a95..8aeb5285e 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -225,7 +225,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":cv:", index); } - if (is_gamma_argument(Output_)) + if (is(Output_)) { auto dbgstr = Output_->region()->node()->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg", index); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 7ae66310b..1037af6b0 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -221,7 +221,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return true; } - if (is_gamma_argument(o1) && is_gamma_argument(o2)) + if (is(o1) && is(o2)) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); auto a1 = static_cast(o1); diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index b8bec05b7..8103424b8 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -375,6 +375,8 @@ gamma_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & sma return gamma; } +GammaArgument::~GammaArgument() noexcept = default; + } jlm::rvsdg::node_normal_form * diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 2fb419f51..20d42eb90 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -465,6 +465,30 @@ inline gamma_node::gamma_node(jlm::rvsdg::output * predicate, size_t nalternativ new gamma_input(this, predicate, ctltype::Create(nalternatives)))); } +/** + * Represents a region argument in a gamma subregion. + */ +class GammaArgument final : public argument +{ + friend gamma_node; + +public: + ~GammaArgument() noexcept override; + +private: + GammaArgument(rvsdg::region & region, gamma_input & input) + : argument(®ion, &input, input.Type()) + {} + + static GammaArgument & + Create(rvsdg::region & region, gamma_input & input) + { + auto gammaArgument = new GammaArgument(region, input); + region.append_argument(gammaArgument); + return *gammaArgument; + } +}; + inline jlm::rvsdg::gamma_input * gamma_node::predicate() const noexcept { @@ -486,12 +510,16 @@ gamma_node::exitvar(size_t index) const noexcept inline jlm::rvsdg::gamma_input * gamma_node::add_entryvar(jlm::rvsdg::output * origin) { - node::add_input(std::unique_ptr(new gamma_input(this, origin, origin->Type()))); + auto input = + node::add_input(std::unique_ptr(new gamma_input(this, origin, origin->Type()))); + auto gammaInput = static_cast(input); for (size_t n = 0; n < nsubregions(); n++) - argument::create(subregion(n), input(ninputs() - 1), origin->Type()); + { + GammaArgument::Create(*subregion(n), *gammaInput); + } - return static_cast(input(ninputs() - 1)); + return gammaInput; } inline jlm::rvsdg::gamma_output * From 08d2d6175b9f9198e5263c74b2e95eda01ec6526 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 20 Jul 2024 15:39:14 +0200 Subject: [PATCH 022/170] Remove is_gamma_result() function (#554) This PR does the following: 1. Introduces the GammaResult class, which represents a region result in a gamma subregion. 2. Removes the is_gamma_result() function. --- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 1 + jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/llvm/Makefile.sub | 1 - jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 1 + .../InterProceduralGraphConversion.cpp | 1 + jlm/llvm/ir/operators.hpp | 1 - jlm/llvm/ir/operators/call.cpp | 1 + jlm/llvm/ir/operators/gamma.hpp | 31 ------------------- jlm/llvm/ir/operators/lambda.cpp | 5 +-- jlm/llvm/opt/DeadNodeElimination.cpp | 1 + jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.hpp | 1 + jlm/llvm/opt/alias-analyses/Andersen.cpp | 1 + jlm/llvm/opt/alias-analyses/Andersen.hpp | 5 +++ .../alias-analyses/MemoryNodeProvisioning.hpp | 1 + jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 1 + jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 5 +++ jlm/llvm/opt/cne.cpp | 1 + jlm/llvm/opt/inlining.cpp | 1 + jlm/llvm/opt/inversion.cpp | 1 + jlm/llvm/opt/pull.cpp | 1 + jlm/llvm/opt/pull.hpp | 5 +++ jlm/llvm/opt/push.cpp | 1 + jlm/llvm/opt/push.hpp | 8 ++--- jlm/llvm/opt/reduction.cpp | 1 + jlm/llvm/opt/unroll.cpp | 1 + jlm/rvsdg/gamma.cpp | 2 ++ jlm/rvsdg/gamma.hpp | 28 ++++++++++++++++- tests/TestRvsdgs.hpp | 1 + tests/jlm/llvm/ir/operators/TestCall.cpp | 2 +- 30 files changed, 71 insertions(+), 43 deletions(-) delete mode 100644 jlm/llvm/ir/operators/gamma.hpp diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 0fab8065c..856a651b6 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include namespace jlm::hls diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index ead7aa339..acb696842 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -7,11 +7,11 @@ #include #include #include -#include #include #include #include #include +#include #include #include #include diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 81d878e2b..224194445 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -111,7 +111,6 @@ libllvm_HEADERS = \ jlm/llvm/ir/operators/Load.hpp \ jlm/llvm/ir/operators/MemCpy.hpp \ jlm/llvm/ir/operators/MemoryStateOperations.hpp \ - jlm/llvm/ir/operators/gamma.hpp \ jlm/llvm/ir/operators/GetElementPtr.hpp \ jlm/llvm/ir/operators/theta.hpp \ jlm/llvm/ir/operators/delta.hpp \ diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index e617c8530..4cc9214f7 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 97266b7ac..fae7410d6 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/ir/operators.hpp b/jlm/llvm/ir/operators.hpp index 8c4a7c48b..2b3ec2a8e 100644 --- a/jlm/llvm/ir/operators.hpp +++ b/jlm/llvm/ir/operators.hpp @@ -9,7 +9,6 @@ #include #include #include -#include #include #include #include diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 10655903c..026730ef4 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -5,6 +5,7 @@ #include #include +#include namespace jlm::llvm { diff --git a/jlm/llvm/ir/operators/gamma.hpp b/jlm/llvm/ir/operators/gamma.hpp deleted file mode 100644 index 21b1163fa..000000000 --- a/jlm/llvm/ir/operators/gamma.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2020 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#ifndef JLM_LLVM_IR_OPERATORS_GAMMA_HPP -#define JLM_LLVM_IR_OPERATORS_GAMMA_HPP - -#include - -namespace jlm::llvm -{ - -/* - FIXME: This should be defined in librvsdg. -*/ -static inline const rvsdg::result * -is_gamma_result(const rvsdg::input * input) -{ - using namespace rvsdg; - - auto r = dynamic_cast(input); - if (r && is(r->region()->node())) - return r; - - return nullptr; -} - -} - -#endif diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 18652cfb0..b73aefbdd 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -5,6 +5,7 @@ #include #include +#include #include @@ -313,9 +314,9 @@ node::ComputeCallSummary() const continue; } - if (auto result = is_gamma_result(input)) + if (auto gammaResult = dynamic_cast(input)) { - auto output = result->output(); + auto output = gammaResult->output(); worklist.insert(worklist.end(), output->begin(), output->end()); continue; } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index f910a5701..2a9e8d493 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index fec9d2ca5..0789465e5 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -5,9 +5,9 @@ #include #include -#include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index 35c49ed97..a3b4a1ae1 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -10,6 +10,7 @@ namespace jlm::rvsdg { +class gamma_node; class theta_node; } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 770f90e45..d723ca643 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index cbbcae521..c7e11d3a3 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -11,6 +11,11 @@ #include #include +namespace jlm::rvsdg +{ +class gamma_node; +} + namespace jlm::llvm::aa { diff --git a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp index 4fd557ce6..e32218dad 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp @@ -7,6 +7,7 @@ #define JLM_LLVM_OPT_ALIAS_ANALYSES_MEMORYNODEPROVISIONING_HPP #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 8aeb5285e..a3054aa86 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index b91a84570..6a0156b9a 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -9,6 +9,11 @@ #include #include +namespace jlm::rvsdg +{ +class gamma_node; +} + namespace jlm::llvm::aa { diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 1037af6b0..a34d28fdb 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 51445f369..d7e217864 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 0342abb4a..e1e1b237e 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 7db510944..481205936 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/pull.hpp b/jlm/llvm/opt/pull.hpp index 99c074679..259ee05cd 100644 --- a/jlm/llvm/opt/pull.hpp +++ b/jlm/llvm/opt/pull.hpp @@ -9,6 +9,11 @@ #include #include +namespace jlm::rvsdg +{ +class gamma_node; +} + namespace jlm::llvm { diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 0172a7e2e..d43df4367 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/push.hpp b/jlm/llvm/opt/push.hpp index de69728a1..7337c5779 100644 --- a/jlm/llvm/opt/push.hpp +++ b/jlm/llvm/opt/push.hpp @@ -8,15 +8,15 @@ #include -namespace jlm::llvm -{ - -namespace rvsdg +namespace jlm::rvsdg { class gamma_node; class theta_node; } +namespace jlm::llvm +{ + class RvsdgModule; /** diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index da4c8ed62..2e7421549 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 63d85e99b..3774eb622 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 8103424b8..9d6d92d1f 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -377,6 +377,8 @@ gamma_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & sma GammaArgument::~GammaArgument() noexcept = default; +GammaResult::~GammaResult() noexcept = default; + } jlm::rvsdg::node_normal_form * diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 20d42eb90..169f4b450 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -489,6 +489,30 @@ class GammaArgument final : public argument } }; +/** + * Represents a region result in a gamma subregion. + */ +class GammaResult final : public result +{ + friend gamma_node; + +public: + ~GammaResult() noexcept override; + +private: + GammaResult(rvsdg::region & region, rvsdg::output & origin, gamma_output & gammaOutput) + : result(®ion, &origin, &gammaOutput, origin.Type()) + {} + + static GammaResult & + Create(rvsdg::region & region, rvsdg::output & origin, gamma_output & gammaOutput) + { + auto gammaResult = new GammaResult(region, origin, gammaOutput); + origin.region()->append_result(gammaResult); + return *gammaResult; + } +}; + inline jlm::rvsdg::gamma_input * gamma_node::predicate() const noexcept { @@ -533,7 +557,9 @@ gamma_node::add_exitvar(const std::vector & values) auto output = exitvar(nexitvars() - 1); for (size_t n = 0; n < nsubregions(); n++) - result::create(subregion(n), values[n], output, type); + { + GammaResult::Create(*subregion(n), *values[n], *output); + } return output; } diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index e5d24787e..96a3e3956 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -5,6 +5,7 @@ #include #include +#include #include namespace jlm::tests diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index ef702ea23..b07647d80 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -9,7 +9,7 @@ #include #include - +#include #include static void From efdee1d3b820d3118f16f2ea2940451c4d9a08e2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 22 Jul 2024 08:24:59 +0200 Subject: [PATCH 023/170] Remove is_theta_output() function (#558) --- jlm/llvm/ir/operators/call.cpp | 4 ++-- jlm/llvm/ir/operators/theta.hpp | 9 --------- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/rvsdg/theta.hpp | 6 ------ 5 files changed, 4 insertions(+), 19 deletions(-) diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 026730ef4..1d68fbaa9 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -197,9 +197,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) continue; } - if (auto output = is_theta_output(origin)) + if (auto thetaOutput = dynamic_cast(origin)) { - if (auto input = invariantInput(*output)) + if (auto input = invariantInput(*thetaOutput)) { origin = input->origin(); continue; diff --git a/jlm/llvm/ir/operators/theta.hpp b/jlm/llvm/ir/operators/theta.hpp index e7dcc80a5..ee6a69903 100644 --- a/jlm/llvm/ir/operators/theta.hpp +++ b/jlm/llvm/ir/operators/theta.hpp @@ -38,15 +38,6 @@ is_theta_result(const jlm::rvsdg::input * input) return nullptr; } -/* - FIXME: This function exists in librvsdg, but is currently (2020-05-21) broken. -*/ -static inline const jlm::rvsdg::theta_output * -is_theta_output(const jlm::rvsdg::output * output) -{ - return dynamic_cast(output); -} - } #endif diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 2a9e8d493..cb8379dbb 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -220,7 +220,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto thetaOutput = is_theta_output(&output)) + if (auto thetaOutput = dynamic_cast(&output)) { MarkOutput(*thetaOutput->node()->predicate()->origin()); MarkOutput(*thetaOutput->result()->origin()); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index a3054aa86..7b52f33a6 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -238,7 +238,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":arg", index); } - if (is_theta_output(Output_)) + if (is(Output_)) { auto dbgstr = jlm::rvsdg::node_output::node(Output_)->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 50fca1631..34759b9e3 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -355,12 +355,6 @@ class theta_output final : public structural_output jlm::rvsdg::theta_input * input_; }; -static inline bool -is_theta_output(const jlm::rvsdg::theta_output * output) noexcept -{ - return dynamic_cast(output) != nullptr; -} - static inline bool is_invariant(const jlm::rvsdg::theta_output * output) noexcept { From 2c409d32038026ce5ab204f4cdd0fee6fb12f52f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 23 Jul 2024 23:24:47 +0200 Subject: [PATCH 024/170] Remove is_theta_argument() function (#559) This PR does the following: 1. Introduces the ThetaArgument class, which represents a region argument in a theta subregion. 2. Removes the is_gamma_argument() function. --- jlm/llvm/ir/operators/call.cpp | 6 ++--- jlm/llvm/ir/operators/theta.hpp | 12 ---------- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/llvm/opt/cne.cpp | 2 +- jlm/rvsdg/theta.cpp | 6 +++-- jlm/rvsdg/theta.hpp | 26 +++++++++++++++++++++ 7 files changed, 36 insertions(+), 20 deletions(-) diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 1d68fbaa9..6175e4939 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -99,7 +99,7 @@ invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutpu if (auto thetaOutput = dynamic_cast(&output)) return invariantInput(*thetaOutput, invariantOutputs); - if (auto thetaArgument = is_theta_argument(&output)) + if (auto thetaArgument = dynamic_cast(&output)) { auto thetaInput = static_cast(thetaArgument->input()); return invariantInput(*thetaInput->output(), invariantOutputs); @@ -208,9 +208,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) return origin; } - if (auto argument = is_theta_argument(origin)) + if (auto thetaArgument = dynamic_cast(origin)) { - if (auto input = invariantInput(*argument)) + if (auto input = invariantInput(*thetaArgument)) { origin = input->origin(); continue; diff --git a/jlm/llvm/ir/operators/theta.hpp b/jlm/llvm/ir/operators/theta.hpp index ee6a69903..cbd4c2b98 100644 --- a/jlm/llvm/ir/operators/theta.hpp +++ b/jlm/llvm/ir/operators/theta.hpp @@ -14,18 +14,6 @@ namespace jlm::llvm /* FIXME: This should be defined in librvsdg. */ -static inline const jlm::rvsdg::argument * -is_theta_argument(const jlm::rvsdg::output * output) -{ - using namespace jlm::rvsdg; - - auto a = dynamic_cast(output); - if (a && is(a->region()->node())) - return a; - - return nullptr; -} - static inline const jlm::rvsdg::result * is_theta_result(const jlm::rvsdg::input * input) { diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index cb8379dbb..b218ce99e 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -228,7 +228,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto thetaArgument = is_theta_argument(&output)) + if (auto thetaArgument = dynamic_cast(&output)) { auto thetaInput = util::AssertedCast(thetaArgument->input()); MarkOutput(*thetaInput->output()); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 7b52f33a6..4e03db4f6 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -232,7 +232,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":arg", index); } - if (is_theta_argument(Output_)) + if (is(Output_)) { auto dbgstr = Output_->region()->node()->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg", index); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index a34d28fdb..7fdbdcf3a 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -179,7 +179,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (o1->type() != o2->type()) return false; - if (is_theta_argument(o1) && is_theta_argument(o2)) + if (is(o1) && is(o2)) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); auto a1 = static_cast(o1); diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 230e1be7c..247e81ffc 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -43,6 +43,8 @@ theta_output::~theta_output() noexcept input_->output_ = nullptr; } +ThetaArgument::~ThetaArgument() noexcept = default; + /* theta node */ theta_node::~theta_node() @@ -78,8 +80,8 @@ theta_node::add_loopvar(jlm::rvsdg::output * origin) input->output_ = output; output->input_ = input; - auto argument = argument::create(subregion(), input, origin->Type()); - result::create(subregion(), argument, output, origin->Type()); + auto & thetaArgument = ThetaArgument::Create(*subregion(), *input); + result::create(subregion(), &thetaArgument, output, origin->Type()); return output; } diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 34759b9e3..fb2bea226 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -355,6 +355,32 @@ class theta_output final : public structural_output jlm::rvsdg::theta_input * input_; }; +/** + * Represents a region argument in a theta subregion. + */ +class ThetaArgument final : public argument +{ + friend theta_node; + +public: + ~ThetaArgument() noexcept override; + +private: + ThetaArgument(rvsdg::region & region, theta_input & input) + : argument(®ion, &input, input.Type()) + { + JLM_ASSERT(is(region.node())); + } + + static ThetaArgument & + Create(rvsdg::region & region, theta_input & input) + { + auto thetaArgument = new ThetaArgument(region, input); + region.append_argument(thetaArgument); + return *thetaArgument; + } +}; + static inline bool is_invariant(const jlm::rvsdg::theta_output * output) noexcept { From 87b8cc19cffcc61902f36e5a7c4596deccc8bee5 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 24 Jul 2024 08:49:25 +0200 Subject: [PATCH 025/170] Remove is_theta_result() method (#560) This PR does the following: 1. Introduces the ThetaResult class, which represents a region result in a theta subregion. 2. Removes the is_theta_result() function. --- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 1 + jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 1 - jlm/llvm/Makefile.sub | 1 - jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 1 + .../InterProceduralGraphConversion.cpp | 1 + jlm/llvm/ir/operators.hpp | 1 - jlm/llvm/ir/operators/call.cpp | 1 + jlm/llvm/ir/operators/lambda.cpp | 5 +-- jlm/llvm/ir/operators/theta.hpp | 31 ------------------- jlm/llvm/opt/DeadNodeElimination.cpp | 1 + jlm/llvm/opt/alias-analyses/Andersen.cpp | 1 + jlm/llvm/opt/alias-analyses/Andersen.hpp | 1 + .../alias-analyses/MemoryNodeProvisioning.hpp | 1 + jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 1 + jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 1 + jlm/llvm/opt/cne.cpp | 1 + jlm/llvm/opt/inlining.cpp | 1 + jlm/llvm/opt/inversion.cpp | 1 + jlm/llvm/opt/push.cpp | 1 + jlm/rvsdg/theta.cpp | 4 ++- jlm/rvsdg/theta.hpp | 26 ++++++++++++++++ tests/jlm/llvm/ir/operators/TestCall.cpp | 1 + 22 files changed, 47 insertions(+), 37 deletions(-) delete mode 100644 jlm/llvm/ir/operators/theta.hpp diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 856a651b6..52c457f3c 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include namespace jlm::hls diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 942726afc..1d8dffc4e 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -30,7 +30,6 @@ #include #include #include -#include #include #include #include diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 224194445..1a283ad20 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -112,7 +112,6 @@ libllvm_HEADERS = \ jlm/llvm/ir/operators/MemCpy.hpp \ jlm/llvm/ir/operators/MemoryStateOperations.hpp \ jlm/llvm/ir/operators/GetElementPtr.hpp \ - jlm/llvm/ir/operators/theta.hpp \ jlm/llvm/ir/operators/delta.hpp \ jlm/llvm/ir/operators/Store.hpp \ jlm/llvm/ir/operators/alloca.hpp \ diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 4cc9214f7..e5ce80c90 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index fae7410d6..897703677 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/ir/operators.hpp b/jlm/llvm/ir/operators.hpp index 2b3ec2a8e..67e8b7858 100644 --- a/jlm/llvm/ir/operators.hpp +++ b/jlm/llvm/ir/operators.hpp @@ -18,6 +18,5 @@ #include #include #include -#include #endif diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 6175e4939..4249f1037 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace jlm::llvm { diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index b73aefbdd..2e4d19475 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include @@ -328,9 +329,9 @@ node::ComputeCallSummary() const continue; } - if (auto result = is_theta_result(input)) + if (auto thetaResult = dynamic_cast(input)) { - auto output = result->output(); + auto output = thetaResult->output(); worklist.insert(worklist.end(), output->begin(), output->end()); continue; } diff --git a/jlm/llvm/ir/operators/theta.hpp b/jlm/llvm/ir/operators/theta.hpp deleted file mode 100644 index cbd4c2b98..000000000 --- a/jlm/llvm/ir/operators/theta.hpp +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright 2020 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#ifndef JLM_LLVM_IR_OPERATORS_THETA_HPP -#define JLM_LLVM_IR_OPERATORS_THETA_HPP - -#include - -namespace jlm::llvm -{ - -/* - FIXME: This should be defined in librvsdg. -*/ -static inline const jlm::rvsdg::result * -is_theta_result(const jlm::rvsdg::input * input) -{ - using namespace jlm::rvsdg; - - auto r = dynamic_cast(input); - if (r && is(r->region()->node())) - return r; - - return nullptr; -} - -} - -#endif diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index b218ce99e..698833030 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index d723ca643..e91049daa 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -6,6 +6,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index c7e11d3a3..b2bf90ce3 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -14,6 +14,7 @@ namespace jlm::rvsdg { class gamma_node; +class theta_node; } namespace jlm::llvm::aa diff --git a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp index e32218dad..b668bda57 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 4e03db4f6..fff523ccc 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index 6a0156b9a..893757f47 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -12,6 +12,7 @@ namespace jlm::rvsdg { class gamma_node; +class theta_node; } namespace jlm::llvm::aa diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 7fdbdcf3a..10f832dd9 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index d7e217864..0d0bfe298 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index e1e1b237e..f87f583be 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -8,6 +8,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index d43df4367..523e68766 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -7,6 +7,7 @@ #include #include #include +#include #include #include #include diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 247e81ffc..b67e56403 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -45,6 +45,8 @@ theta_output::~theta_output() noexcept ThetaArgument::~ThetaArgument() noexcept = default; +ThetaResult::~ThetaResult() noexcept = default; + /* theta node */ theta_node::~theta_node() @@ -81,7 +83,7 @@ theta_node::add_loopvar(jlm::rvsdg::output * origin) output->input_ = input; auto & thetaArgument = ThetaArgument::Create(*subregion(), *input); - result::create(subregion(), &thetaArgument, output, origin->Type()); + ThetaResult::Create(thetaArgument, *output); return output; } diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index fb2bea226..3bc7fea2a 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -381,6 +381,32 @@ class ThetaArgument final : public argument } }; +/** + * Represents a region result in a theta subregion. + */ +class ThetaResult final : public result +{ + friend theta_node; + +public: + ~ThetaResult() noexcept override; + +private: + ThetaResult(ThetaArgument & thetaArgument, theta_output & thetaOutput) + : result(thetaArgument.region(), &thetaArgument, &thetaOutput, thetaArgument.Type()) + { + JLM_ASSERT(is(thetaArgument.region()->node())); + } + + static ThetaResult & + Create(ThetaArgument & thetaArgument, theta_output & thetaOutput) + { + auto thetaResult = new ThetaResult(thetaArgument, thetaOutput); + thetaArgument.region()->append_result(thetaResult); + return *thetaResult; + } +}; + static inline bool is_invariant(const jlm::rvsdg::theta_output * output) noexcept { diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index b07647d80..882bf624c 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -10,6 +10,7 @@ #include #include #include +#include #include static void From 25a6ebc6f442c3cc1d24e413071f1560288a5c3f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 24 Jul 2024 09:13:20 +0200 Subject: [PATCH 026/170] Remove is_theta_input() function (#564) --- jlm/rvsdg/theta.hpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 3bc7fea2a..daedd8415 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -299,12 +299,6 @@ class theta_input final : public structural_input jlm::rvsdg::theta_output * output_; }; -static inline bool -is_theta_input(const jlm::rvsdg::input * input) noexcept -{ - return dynamic_cast(input) != nullptr; -} - static inline bool is_invariant(const jlm::rvsdg::theta_input * input) noexcept { From 1ae995a14145546b40c92b41d4eeb6f03a81bf81 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 24 Jul 2024 09:47:49 +0200 Subject: [PATCH 027/170] Remove is_gamma_input() function (#563) --- jlm/llvm/opt/pull.cpp | 2 +- jlm/rvsdg/gamma.hpp | 6 ------ 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 481205936..72429b467 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -223,7 +223,7 @@ is_used_in_nsubregions(const jlm::rvsdg::gamma_node * gamma, const jlm::rvsdg::n { for (const auto & user : *(node->output(n))) { - JLM_ASSERT(is_gamma_input(user)); + JLM_ASSERT(is(*user)); inputs.insert(static_cast(user)); } } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 169f4b450..c248cc332 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -378,12 +378,6 @@ class gamma_input final : public structural_input } }; -static inline bool -is_gamma_input(const jlm::rvsdg::input * input) noexcept -{ - return dynamic_cast(input) != nullptr; -} - /* gamma output */ class gamma_output final : public structural_output From aa7769c9e9d57de0312238d431835a57d8458311 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 24 Jul 2024 10:09:37 +0200 Subject: [PATCH 028/170] Remove is_phi_argument() function (#561) --- jlm/llvm/opt/DeadNodeElimination.cpp | 25 ++++++++----------------- 1 file changed, 8 insertions(+), 17 deletions(-) diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 698833030..d66c207f0 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -14,13 +14,6 @@ namespace jlm::llvm { -static bool -is_phi_argument(const jlm::rvsdg::output * output) -{ - auto argument = dynamic_cast(output); - return argument && argument->region()->node() && is(argument->region()->node()); -} - /** \brief Dead Node Elimination context class * * This class keeps track of all the nodes and outputs that are alive. In contrast to all other @@ -264,17 +257,15 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (is_phi_argument(&output)) + if (auto phiRecursionArgument = dynamic_cast(&output)) { - auto argument = util::AssertedCast(&output); - if (argument->input()) - { - MarkOutput(*argument->input()->origin()); - } - else - { - MarkOutput(*argument->region()->result(argument->index())->origin()); - } + MarkOutput(*phiRecursionArgument->result()->origin()); + return; + } + + if (auto phiInputArgument = dynamic_cast(&output)) + { + MarkOutput(*phiInputArgument->input()->origin()); return; } From 7ec4a847b4bb5e49a34401f0505b658724c7cbc8 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 24 Jul 2024 10:35:26 +0200 Subject: [PATCH 029/170] Remove outdated functions for checking phi input/outputs (#562) This PR does the following: 1. Removes `is_phi_output()` function 2. Removes `is_phi_cv()` function 3. Removes `is_phi_recvar_argument()` function 4. Removes `phi_result()` function --- jlm/llvm/ir/operators/Phi.hpp | 44 ---------------------------- jlm/llvm/ir/operators/call.cpp | 9 +++--- jlm/llvm/ir/operators/call.hpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 5 ++-- 4 files changed, 7 insertions(+), 53 deletions(-) diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index c2153c5ba..3bd6ff401 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -985,50 +985,6 @@ phi::node::RemovePhiOutputsWhere(const F & match) } -/* - FIXME: This should be defined in librvsdg. -*/ -static inline bool -is_phi_output(const jlm::rvsdg::output * output) -{ - using namespace jlm::rvsdg; - - return is(node_output::node(output)); -} - -/* - FIXME: This should be defined in librvsdg. -*/ -static inline bool -is_phi_cv(const jlm::rvsdg::output * output) -{ - using namespace jlm::rvsdg; - - auto a = dynamic_cast(output); - return a && is(a->region()->node()) && a->input() != nullptr; -} - -static inline bool -is_phi_recvar_argument(const jlm::rvsdg::output * output) -{ - using namespace jlm::rvsdg; - - auto a = dynamic_cast(output); - return a && is(a->region()->node()) && a->input() == nullptr; -} - -/* - FIXME: This should be defined in librvsdg. -*/ -static inline jlm::rvsdg::result * -phi_result(const jlm::rvsdg::output * output) -{ - JLM_ASSERT(is_phi_output(output)); - auto result = jlm::rvsdg::node_output::node(output)->region()->result(output->index()); - JLM_ASSERT(result->output() == output); - return result; -} - } #endif diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 4249f1037..703cea038 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -169,7 +169,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) if (is(rvsdg::node_output::node(origin))) return origin; - if (is_phi_recvar_argument(origin)) + if (is(origin)) { return origin; } @@ -220,10 +220,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) return origin; } - if (is_phi_cv(origin)) + if (auto phiInputArgument = dynamic_cast(origin)) { - auto argument = util::AssertedCast(origin); - origin = argument->input()->origin(); + origin = phiInputArgument->input()->origin(); continue; } @@ -249,7 +248,7 @@ CallNode::ClassifyCall(const CallNode & callNode) if (auto argument = dynamic_cast(output)) { - if (is_phi_recvar_argument(argument)) + if (is(argument)) { return CallTypeClassifier::CreateRecursiveDirectCallClassifier(*argument); } diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index 5b0206060..1aa87e620 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -227,7 +227,7 @@ class CallTypeClassifier final static std::unique_ptr CreateRecursiveDirectCallClassifier(jlm::rvsdg::argument & output) { - JLM_ASSERT(is_phi_recvar_argument(&output)); + JLM_ASSERT(is(&output)); return std::make_unique(CallType::RecursiveDirectCall, output); } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index d66c207f0..dbd5d670d 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -250,10 +250,9 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (is_phi_output(&output)) + if (auto phiOutput = dynamic_cast(&output)) { - auto structuralOutput = util::AssertedCast(&output); - MarkOutput(*structuralOutput->results.first()->origin()); + MarkOutput(*phiOutput->result()->origin()); return; } From 1949169ed3c8af0deee3640be4b2995c2a0f5681 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 24 Jul 2024 23:31:29 +0200 Subject: [PATCH 030/170] Clean up region class tests (#565) This PR does the following: 1. Rename TestRegion.cpp to RegionTests.cpp 2. Transform single unit test to individual unit tests 3. Split out the argument and result class tests into their own respective files 4. Some minor clean-up of the individual unit tests This PR cleans up the respective unit test files before handling issues #556 and #555. --------- Co-authored-by: Magnus Sjalander --- jlm/rvsdg/Makefile.sub | 4 +- tests/jlm/rvsdg/ArgumentTests.cpp | 48 +++++ .../rvsdg/{TestRegion.cpp => RegionTests.cpp} | 198 +++++++----------- tests/jlm/rvsdg/ResultTests.cpp | 51 +++++ 4 files changed, 180 insertions(+), 121 deletions(-) create mode 100644 tests/jlm/rvsdg/ArgumentTests.cpp rename tests/jlm/rvsdg/{TestRegion.cpp => RegionTests.cpp} (65%) create mode 100644 tests/jlm/rvsdg/ResultTests.cpp diff --git a/jlm/rvsdg/Makefile.sub b/jlm/rvsdg/Makefile.sub index 293e47a2e..5d3fae3a1 100644 --- a/jlm/rvsdg/Makefile.sub +++ b/jlm/rvsdg/Makefile.sub @@ -69,6 +69,9 @@ librvsdg_HEADERS = \ librvsdg_TESTS = \ tests/jlm/rvsdg/bitstring/bitstring \ + tests/jlm/rvsdg/ArgumentTests \ + tests/jlm/rvsdg/RegionTests \ + tests/jlm/rvsdg/ResultTests \ tests/jlm/rvsdg/test-binary \ tests/jlm/rvsdg/test-bottomup \ tests/jlm/rvsdg/test-cse \ @@ -79,7 +82,6 @@ librvsdg_TESTS = \ tests/jlm/rvsdg/test-theta \ tests/jlm/rvsdg/test-topdown \ tests/jlm/rvsdg/test-typemismatch \ - tests/jlm/rvsdg/TestRegion \ tests/jlm/rvsdg/TestStructuralNode \ librvsdg_TEST_LIBS = \ diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp new file mode 100644 index 000000000..5eb885a78 --- /dev/null +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +#include + +/** + * Test check for adding argument to input of wrong structural node. + */ +static int +ArgumentNodeMismatch() +{ + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + jlm::rvsdg::graph graph; + auto import = graph.add_import({ valueType, "import" }); + + auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); + auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); + + auto structuralInput = structural_input::create(structuralNode1, import, valueType); + + // Act + bool inputErrorHandlerCalled = false; + try + { + argument::create(structuralNode2->subregion(0), structuralInput, valueType); + } + catch (jlm::util::error & e) + { + inputErrorHandlerCalled = true; + } + + // Assert + assert(inputErrorHandlerCalled); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/ArgumentTests-ArgumentNodeMismatch", ArgumentNodeMismatch) diff --git a/tests/jlm/rvsdg/TestRegion.cpp b/tests/jlm/rvsdg/RegionTests.cpp similarity index 65% rename from tests/jlm/rvsdg/TestRegion.cpp rename to tests/jlm/rvsdg/RegionTests.cpp index ff02055cd..aa32243f5 100644 --- a/tests/jlm/rvsdg/TestRegion.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -3,151 +3,115 @@ * See COPYING for terms of redistribution. */ -#include "test-operation.hpp" -#include "test-registry.hpp" -#include "test-types.hpp" +#include +#include +#include #include -/** - * Test check for adding argument to input of wrong structural node. - */ -static void -TestArgumentNodeMismatch() -{ - using namespace jlm::rvsdg; - - auto vt = jlm::tests::valuetype::Create(); - - jlm::rvsdg::graph graph; - auto import = graph.add_import({ vt, "import" }); - - auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); - auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); - - auto structuralInput = structural_input::create(structuralNode1, import, vt); - - bool inputErrorHandlerCalled = false; - try - { - argument::create(structuralNode2->subregion(0), structuralInput, vt); - } - catch (jlm::util::error & e) - { - inputErrorHandlerCalled = true; - } - - assert(inputErrorHandlerCalled); -} - -/** - * Test check for adding result to output of wrong structural node. - */ -static void -TestResultNodeMismatch() -{ - using namespace jlm::rvsdg; - - auto vt = jlm::tests::valuetype::Create(); - - jlm::rvsdg::graph graph; - auto import = graph.add_import({ vt, "import" }); - - auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); - auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); - - auto structuralInput = structural_input::create(structuralNode1, import, vt); - - auto argument = argument::create(structuralNode1->subregion(0), structuralInput, vt); - auto structuralOutput = structural_output::create(structuralNode1, vt); - - bool outputErrorHandlerCalled = false; - try - { - result::create(structuralNode2->subregion(0), argument, structuralOutput, vt); - } - catch (jlm::util::error & e) - { - outputErrorHandlerCalled = true; - } - - assert(outputErrorHandlerCalled); -} - /** * Test region::Contains(). */ -static void -TestContainsMethod() +static int +Contains() { using namespace jlm::tests; - auto vt = valuetype::Create(); + // Arrange + auto valueType = valuetype::Create(); jlm::rvsdg::graph graph; - auto import = graph.add_import({ vt, "import" }); + auto import = graph.add_import({ valueType, "import" }); auto structuralNode1 = structural_node::create(graph.root(), 1); - auto structuralInput1 = jlm::rvsdg::structural_input::create(structuralNode1, import, vt); + auto structuralInput1 = jlm::rvsdg::structural_input::create(structuralNode1, import, valueType); auto regionArgument1 = - jlm::rvsdg::argument::create(structuralNode1->subregion(0), structuralInput1, vt); - unary_op::create(structuralNode1->subregion(0), vt, regionArgument1, vt); + jlm::rvsdg::argument::create(structuralNode1->subregion(0), structuralInput1, valueType); + unary_op::create(structuralNode1->subregion(0), valueType, regionArgument1, valueType); auto structuralNode2 = structural_node::create(graph.root(), 1); - auto structuralInput2 = jlm::rvsdg::structural_input::create(structuralNode2, import, vt); + auto structuralInput2 = jlm::rvsdg::structural_input::create(structuralNode2, import, valueType); auto regionArgument2 = - jlm::rvsdg::argument::create(structuralNode2->subregion(0), structuralInput2, vt); - binary_op::create(vt, vt, regionArgument2, regionArgument2); + jlm::rvsdg::argument::create(structuralNode2->subregion(0), structuralInput2, valueType); + binary_op::create(valueType, valueType, regionArgument2, regionArgument2); + // Act & Assert assert(jlm::rvsdg::region::Contains(*graph.root(), false)); assert(jlm::rvsdg::region::Contains(*graph.root(), true)); assert(jlm::rvsdg::region::Contains(*graph.root(), true)); assert(!jlm::rvsdg::region::Contains(*graph.root(), true)); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-Contains", Contains) + /** * Test region::IsRootRegion(). */ -static void -TestIsRootRegion() +static int +IsRootRegion() { + // Arrange jlm::rvsdg::graph graph; auto structuralNode = jlm::tests::structural_node::create(graph.root(), 1); + // Act & Assert assert(graph.root()->IsRootRegion()); assert(!structuralNode->subregion(0)->IsRootRegion()); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-IsRootRegion", IsRootRegion) + /** - * Test region::NumRegions() + * Test region::NumRegions() with an empty Rvsdg. */ -static void -TestNumRegions() +static int +NumRegions_EmptyRvsdg() { using namespace jlm::rvsdg; - { - jlm::rvsdg::graph graph; + // Arrange + jlm::rvsdg::graph graph; + + // Act & Assert + assert(region::NumRegions(*graph.root()) == 1); + + return 0; +} - assert(region::NumRegions(*graph.root()) == 1); - } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-NumRegions_EmptyRvsdg", NumRegions_EmptyRvsdg) + +/** + * Test region::NumRegions() with non-empty Rvsdg. + */ +static int +NumRegions_NonEmptyRvsdg() +{ + using namespace jlm::rvsdg; + + // Arrange + jlm::rvsdg::graph graph; + auto structuralNode = jlm::tests::structural_node::create(graph.root(), 4); + jlm::tests::structural_node::create(structuralNode->subregion(0), 2); + jlm::tests::structural_node::create(structuralNode->subregion(3), 5); - { - jlm::rvsdg::graph graph; - auto structuralNode = jlm::tests::structural_node::create(graph.root(), 4); - jlm::tests::structural_node::create(structuralNode->subregion(0), 2); - jlm::tests::structural_node::create(structuralNode->subregion(3), 5); + // Act & Assert + assert(region::NumRegions(*graph.root()) == 1 + 4 + 2 + 5); - assert(region::NumRegions(*graph.root()) == 1 + 4 + 2 + 5); - } + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-NumRegions_NonEmptyRvsdg", NumRegions_NonEmptyRvsdg) + /** * Test region::RemoveResultsWhere() */ -static void -TestRemoveResultsWhere() +static int +RemoveResultsWhere() { // Arrange jlm::rvsdg::graph rvsdg; @@ -193,13 +157,17 @@ TestRemoveResultsWhere() return true; }); assert(region.nresults() == 0); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-RemoveResultsWhere", RemoveResultsWhere) + /** * Test region::RemoveArgumentsWhere() */ -static void -TestRemoveArgumentsWhere() +static int +RemoveArgumentsWhere() { // Arrange jlm::rvsdg::graph rvsdg; @@ -241,13 +209,17 @@ TestRemoveArgumentsWhere() return argument.index() == 0; }); assert(region.narguments() == 0); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-RemoveArgumentsWhere", RemoveArgumentsWhere) + /** * Test region::PruneArguments() */ -static void -TestPruneArguments() +static int +PruneArguments() { // Arrange jlm::rvsdg::graph rvsdg; @@ -275,28 +247,14 @@ TestPruneArguments() region.remove_node(node); region.PruneArguments(); assert(region.narguments() == 0); -} - -static int -Test() -{ - TestArgumentNodeMismatch(); - TestResultNodeMismatch(); - - TestContainsMethod(); - TestIsRootRegion(); - TestNumRegions(); - TestRemoveResultsWhere(); - TestRemoveArgumentsWhere(); - TestPruneArguments(); return 0; } -JLM_UNIT_TEST_REGISTER("jlm/rvsdg/TestRegion", Test) +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-PruneArguments", PruneArguments) static int -TestToTree_EmptyRvsdg() +ToTree_EmptyRvsdg() { using namespace jlm::rvsdg; @@ -313,10 +271,10 @@ TestToTree_EmptyRvsdg() return 0; } -JLM_UNIT_TEST_REGISTER("jlm/rvsdg/TestRegion-TestToTree_EmptyRvsdg", TestToTree_EmptyRvsdg) +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-ToTree_EmptyRvsdg", ToTree_EmptyRvsdg) static int -TestToTree_RvsdgWithStructuralNodes() +ToTree_RvsdgWithStructuralNodes() { using namespace jlm::rvsdg; @@ -343,5 +301,5 @@ TestToTree_RvsdgWithStructuralNodes() } JLM_UNIT_TEST_REGISTER( - "jlm/rvsdg/TestRegion-TestToTree_RvsdgWithStructuralNodes", - TestToTree_RvsdgWithStructuralNodes) + "jlm/rvsdg/RegionTests-ToTree_RvsdgWithStructuralNodes", + ToTree_RvsdgWithStructuralNodes) diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp new file mode 100644 index 000000000..16a0cd11c --- /dev/null +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -0,0 +1,51 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +#include + +/** + * Test check for adding result to output of wrong structural node. + */ +static int +ResultNodeMismatch() +{ + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + jlm::rvsdg::graph graph; + auto import = graph.add_import({ valueType, "import" }); + + auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); + auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); + + auto structuralInput = structural_input::create(structuralNode1, import, valueType); + + auto argument = argument::create(structuralNode1->subregion(0), structuralInput, valueType); + auto structuralOutput = structural_output::create(structuralNode1, valueType); + + // Act + bool outputErrorHandlerCalled = false; + try + { + result::create(structuralNode2->subregion(0), argument, structuralOutput, valueType); + } + catch (jlm::util::error & e) + { + outputErrorHandlerCalled = true; + } + + // Assert + assert(outputErrorHandlerCalled); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/ResultTests-ResultNodeMismatch", ResultNodeMismatch) From 2b82ae6faaa3a2185a3241afcc5af3080c260120 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Wed, 7 Aug 2024 11:04:02 +0200 Subject: [PATCH 031/170] Make the ClangFormat action fail on format errors (#567) It turns out `for` loops in `/bin/sh` will ignore the exit codes of all commands except the very last command they run. This allowed some formatting errors to sneak into `master`. --- Makefile.rules | 2 +- jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp | 6 ++++-- jlm/tooling/Command.cpp | 7 +------ jlm/tooling/CommandLine.cpp | 5 ++++- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Makefile.rules b/Makefile.rules index 538d4447d..e27edb933 100644 --- a/Makefile.rules +++ b/Makefile.rules @@ -182,5 +182,5 @@ format: format-dry-run: @for FILE in $(SOURCES) $(HEADERS) ; do \ - clang-format-$(LLVM_VERSION) --dry-run --Werror --style="file:.clang-format" --verbose -i $$FILE ;\ + clang-format-$(LLVM_VERSION) --dry-run --Werror --style="file:.clang-format" --verbose -i $$FILE || exit 1 ;\ done diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp index 5e0822c17..db2a306ae 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp @@ -27,7 +27,8 @@ class VerilatorHarnessHLS : public BaseHLS /** * Construct a Verilator harness generator. * - * /param verilogFile The filename to the Verilog file that is to be used together with the generated harness as input to Verilator. + * /param verilogFile The filename to the Verilog file that is to be used together with the + * generated harness as input to Verilator. */ VerilatorHarnessHLS(const util::filepath verilogFile) : VerilogFile_(std::move(verilogFile)){}; @@ -36,7 +37,8 @@ class VerilatorHarnessHLS : public BaseHLS const util::filepath VerilogFile_; /** - * \return The Verilog filename that is to be used together with the generated harness as input to Verilator. + * \return The Verilog filename that is to be used together with the generated harness as input to + * Verilator. */ [[nodiscard]] const util::filepath & GetVerilogFileName() const noexcept diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index 6c1802772..5d3e918aa 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -624,12 +624,7 @@ JlmHlsCommand::~JlmHlsCommand() noexcept = default; std::string JlmHlsCommand::ToString() const { - return util::strfmt( - "jlm-hls ", - "-o ", - OutputFolder_.to_str(), - " ", - InputFile_.to_str()); + return util::strfmt("jlm-hls ", "-o ", OutputFolder_.to_str(), " ", InputFile_.to_str()); } void diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 497f598e1..94440c7b6 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -1159,7 +1159,10 @@ JhlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) cl::opt generateFirrtl("firrtl", cl::ValueDisallowed, cl::desc("Generate firrtl")); - cl::opt useCirct("circt", cl::Prefix, cl::desc("DEPRACATED - CIRCT is always used to generate FIRRTL")); + cl::opt useCirct( + "circt", + cl::Prefix, + cl::desc("DEPRACATED - CIRCT is always used to generate FIRRTL")); cl::ParseCommandLineOptions(argc, argv); From bb67a94a97be3b210e31a275d226a4d0bb40a611 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 8 Aug 2024 07:56:01 +0200 Subject: [PATCH 032/170] Add annotation support to region tree (#566) This PR adds support for annotations to region trees. Here is an example of how it will look like: ``` RootRegion -STRUCTURAL_TEST_NODE --Region[0] --Region[1] ---STRUCTURAL_TEST_NODE ----Region[0] ----Region[1] ----Region[2] NumNodes:0 NumArguments:0 ``` The plan is to use this for introducing a pass that enables the printing of RVSDG properties for debugging as well as metric gathering for pass evaluations. --------- Co-authored-by: Magnus Sjalander --- jlm/rvsdg/region.cpp | 117 +++++++++++--- jlm/rvsdg/region.hpp | 70 +++++++- jlm/util/AnnotationMap.hpp | 222 ++++++++++++++++++++++++++ jlm/util/Makefile.sub | 2 + tests/jlm/rvsdg/RegionTests.cpp | 70 +++++++- tests/jlm/util/AnnotationMapTests.cpp | 113 +++++++++++++ 6 files changed, 569 insertions(+), 25 deletions(-) create mode 100644 jlm/util/AnnotationMap.hpp create mode 100644 tests/jlm/util/AnnotationMapTests.cpp diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 7ba59867e..ca727700f 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -9,6 +9,7 @@ #include #include #include +#include namespace jlm::rvsdg { @@ -360,47 +361,121 @@ region::NumRegions(const jlm::rvsdg::region & region) noexcept } std::string -region::ToTree(const rvsdg::region & region) noexcept +region::ToTree(const rvsdg::region & region, const util::AnnotationMap & annotationMap) noexcept { - return ToTree(region, 0); + std::stringstream stream; + ToTree(region, annotationMap, 0, stream); + return stream.str(); } std::string -region::ToTree(const rvsdg::region & region, size_t identationDepth) noexcept +region::ToTree(const rvsdg::region & region) noexcept +{ + std::stringstream stream; + util::AnnotationMap annotationMap; + ToTree(region, annotationMap, 0, stream); + return stream.str(); +} + +void +region::ToTree( + const rvsdg::region & region, + const util::AnnotationMap & annotationMap, + size_t indentationDepth, + std::stringstream & stream) noexcept { - std::string subTree; - auto identationChar = '-'; + static const char indentationChar = '-'; + static const char annotationSeparator = ' '; + static const char labelValueSeparator = ':'; // Convert current region to a string - if (region.IsRootRegion()) - { - subTree = "RootRegion\n"; - identationDepth += 1; - } - else if (region.node()->nsubregions() != 1) - { - auto indentationString = std::string(identationDepth, identationChar); - subTree += util::strfmt(indentationString, "Region[", region.index(), "]\n"); - identationDepth += 1; - } + auto indentationString = std::string(indentationDepth, indentationChar); + auto regionString = + region.IsRootRegion() ? "RootRegion" : util::strfmt("Region[", region.index(), "]"); + auto regionAnnotationString = + GetAnnotationString(®ion, annotationMap, annotationSeparator, labelValueSeparator); + + stream << indentationString << regionString << regionAnnotationString << '\n'; // Convert the region's structural nodes with their subregions to a string - for (const auto & node : region.nodes) + indentationDepth++; + indentationString = std::string(indentationDepth, indentationChar); + for (auto & node : region.nodes) { if (auto structuralNode = dynamic_cast(&node)) { - auto identationString = std::string(identationDepth, identationChar); auto nodeString = structuralNode->operation().debug_string(); - subTree += util::strfmt(identationString, nodeString, '\n'); + auto annotationString = GetAnnotationString( + structuralNode, + annotationMap, + annotationSeparator, + labelValueSeparator); + stream << indentationString << nodeString << annotationString << '\n'; for (size_t n = 0; n < structuralNode->nsubregions(); n++) { - subTree += ToTree(*structuralNode->subregion(n), identationDepth + 1); + ToTree(*structuralNode->subregion(n), annotationMap, indentationDepth + 1, stream); } } } +} + +std::string +region::GetAnnotationString( + const void * key, + const util::AnnotationMap & annotationMap, + char annotationSeparator, + char labelValueSeparator) +{ + if (!annotationMap.HasAnnotations(key)) + return ""; + + auto & annotations = annotationMap.GetAnnotations(key); + return ToString(annotations, annotationSeparator, labelValueSeparator); +} + +std::string +region::ToString( + const std::vector & annotations, + char annotationSeparator, + char labelValueSeparator) +{ + std::stringstream stream; + for (auto & annotation : annotations) + { + auto annotationString = ToString(annotation, labelValueSeparator); + stream << annotationSeparator << annotationString; + } + + return stream.str(); +} + +std::string +region::ToString(const util::Annotation & annotation, char labelValueSeparator) +{ + std::string value; + if (annotation.HasValueType()) + { + value = annotation.Value(); + } + else if (annotation.HasValueType()) + { + value = util::strfmt(annotation.Value()); + } + else if (annotation.HasValueType()) + { + value = util::strfmt(annotation.Value()); + } + else if (annotation.HasValueType()) + { + value = util::strfmt(annotation.Value()); + } + else + { + JLM_UNREACHABLE("Unhandled annotation type."); + } - return subTree; + return util::strfmt(annotation.Label(), labelValueSeparator, value); } size_t diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 4522d4f57..d1e801ad5 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -13,6 +13,12 @@ #include #include +namespace jlm::util +{ +class Annotation; +class AnnotationMap; +} + namespace jlm::rvsdg { @@ -392,7 +398,47 @@ class region /** * Converts \p region and all of its contained structural nodes with subregions to a tree in - * ASCII format. + * ASCII format of the following form: + * + * RootRegion \n + * -STRUCTURAL_TEST_NODE \n + * --Region[0] \n + * --Region[1] \n + * ---STRUCTURAL_TEST_NODE \n + * ----Region[0] \n + * ----Region[1] \n + * ----Region[2] NumNodes:0 NumArguments:0 \n + * + * + * The above tree has a single structural node in the RVSDG's root region. This node has two + * subregions, where the second subregion contains another structural node with three subregions. + * For the third subregion, two annotations with label NumNodes and NumArguments was provided in + * \p annotationMap. + * + * @param region The top-level region that is converted + * @param annotationMap A map with annotations for instances of \ref region%s or + * structural_node%s. + * @return A string containing the ASCII tree of \p region. + */ + [[nodiscard]] static std::string + ToTree(const rvsdg::region & region, const util::AnnotationMap & annotationMap) noexcept; + + /** + * Converts \p region and all of its contained structural nodes with subregions to a tree in + * ASCII format of the following form: + * + * RootRegion \n + * -STRUCTURAL_TEST_NODE \n + * --Region[0] \n + * --Region[1] \n + * ---STRUCTURAL_TEST_NODE \n + * ----Region[0] \n + * ----Region[1] \n + * ----Region[2] \n + * + * + * The above tree has a single structural node in the RVSDG's root region. This node has two + * subregions, where the second subregion contains another structural node with three subregions. * * @param region The top-level region that is converted * @return A string containing the ASCII tree of \p region @@ -407,8 +453,28 @@ class region region_bottom_node_list bottom_nodes; private: + static void + ToTree( + const rvsdg::region & region, + const util::AnnotationMap & annotationMap, + size_t indentationDepth, + std::stringstream & stream) noexcept; + + [[nodiscard]] static std::string + GetAnnotationString( + const void * key, + const util::AnnotationMap & annotationMap, + char annotationSeparator, + char labelValueSeparator); + + [[nodiscard]] static std::string + ToString( + const std::vector & annotations, + char annotationSeparator, + char labelValueSeparator); + [[nodiscard]] static std::string - ToTree(const rvsdg::region & region, size_t identationDepth) noexcept; + ToString(const util::Annotation & annotation, char labelValueSeparator); size_t index_; jlm::rvsdg::graph * graph_; diff --git a/jlm/util/AnnotationMap.hpp b/jlm/util/AnnotationMap.hpp new file mode 100644 index 000000000..f38fd7a74 --- /dev/null +++ b/jlm/util/AnnotationMap.hpp @@ -0,0 +1,222 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_UTIL_ANNOTATION_MAP_HPP +#define JLM_UTIL_ANNOTATION_MAP_HPP + +#include +#include + +#include +#include +#include +#include + +namespace jlm::util +{ + +/** + * Represents a simple key-value pair with a label and a value of type std::string, int64_t, + * uint64_t, or double. + */ +class Annotation final +{ + using AnnotationValue = std::variant; + +public: + Annotation(std::string_view label, std::string value) + : Label_(std::move(label)), + Value_(std::move(value)) + {} + + Annotation(std::string_view label, int64_t value) + : Label_(std::move(label)), + Value_(std::move(value)) + {} + + Annotation(std::string_view label, uint64_t value) + : Label_(std::move(label)), + Value_(std::move(value)) + {} + + Annotation(std::string_view label, double value) + : Label_(std::move(label)), + Value_(std::move(value)) + {} + + /** + * Gets the label of the annotation. + */ + [[nodiscard]] const std::string_view & + Label() const noexcept + { + return Label_; + } + + /** + * Gets the value of the annotation. Requires the annotation value to be of type \p TValue. + */ + template + [[nodiscard]] const TValue & + Value() const + { + return std::get(Value_); + } + + /** + * Checks if the type of the annotation value is equivalent to \p TValue. + * + * @return True if the value type if equivalent to \p TValue, otherwise false. + */ + template + [[nodiscard]] bool + HasValueType() const noexcept + { + return std::holds_alternative(Value_); + } + + bool + operator==(Annotation & other) const noexcept + { + return Label_ == other.Label_ && Value_ == other.Value_; + } + + bool + operator!=(Annotation & other) const noexcept + { + return !(*this == other); + } + +private: + std::string_view Label_ = {}; + AnnotationValue Value_ = {}; +}; + +/** + * Represents a simple map that associates pointers with Annotation%s. + */ +class AnnotationMap final +{ + using AnnotationMapType = std::unordered_map>; + + class ConstIterator final + { + public: + using iterator_category = std::forward_iterator_tag; + using value_type = Annotation; + using difference_type = std::ptrdiff_t; + using pointer = Annotation *; + using reference = Annotation &; + + private: + friend AnnotationMap; + + explicit ConstIterator(const typename AnnotationMapType::const_iterator & it) + : It_(it) + {} + + public: + [[nodiscard]] const std::vector & + Annotations() const noexcept + { + return It_.operator->()->second; + } + + const std::vector & + operator*() const + { + return Annotations(); + } + + const std::vector * + operator->() const + { + return &Annotations(); + } + + ConstIterator & + operator++() + { + ++It_; + return *this; + } + + ConstIterator + operator++(int) + { + ConstIterator tmp = *this; + ++*this; + return tmp; + } + + bool + operator==(const ConstIterator & other) const + { + return It_ == other.It_; + } + + bool + operator!=(const ConstIterator & other) const + { + return !operator==(other); + } + + private: + typename AnnotationMapType::const_iterator It_ = {}; + }; + + using AnnotationRange = iterator_range; + +public: + /** + * Retrieves all annotations. + * + * @return An iterator_range of all the annotations. + */ + [[nodiscard]] AnnotationRange + Annotations() const + { + return { ConstIterator(Map_.begin()), ConstIterator(Map_.end()) }; + } + + /** + * Checks if an annotation with the given \p key exists. + * + * @return True if the annotation exists, otherwise false. + */ + [[nodiscard]] bool + HasAnnotations(const void * key) const noexcept + { + return Map_.find(key) != Map_.end(); + } + + /** + * Retrieves the annotation for the given \p key. The key must exist. + * + * @return A reference to an instance of Annotation. + */ + [[nodiscard]] const std::vector & + GetAnnotations(const void * key) const noexcept + { + JLM_ASSERT(HasAnnotations(key)); + return Map_.at(key); + } + + /** + * Adds \p annotation with the given \p key to the map. + */ + void + AddAnnotation(const void * key, Annotation annotation) + { + Map_[key].emplace_back(std::move(annotation)); + } + +private: + AnnotationMapType Map_ = {}; +}; + +} + +#endif diff --git a/jlm/util/Makefile.sub b/jlm/util/Makefile.sub index 2e1148337..a99dba3a5 100644 --- a/jlm/util/Makefile.sub +++ b/jlm/util/Makefile.sub @@ -5,6 +5,7 @@ libutil_SOURCES = \ jlm/util/Statistics.cpp \ libutil_HEADERS = \ + jlm/util/AnnotationMap.hpp \ jlm/util/BijectiveMap.hpp \ jlm/util/callbacks.hpp \ jlm/util/common.hpp \ @@ -24,6 +25,7 @@ libutil_HEADERS = \ jlm/util/Worklist.hpp \ libutil_TESTS += \ + tests/jlm/util/AnnotationMapTests \ tests/jlm/util/test-disjointset \ tests/jlm/util/test-intrusive-hash \ tests/jlm/util/test-intrusive-list \ diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index aa32243f5..8098de1dc 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -7,6 +7,8 @@ #include #include +#include + #include /** @@ -273,6 +275,32 @@ ToTree_EmptyRvsdg() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-ToTree_EmptyRvsdg", ToTree_EmptyRvsdg) +static int +ToTree_EmptyRvsdgWithAnnotations() +{ + using namespace jlm::rvsdg; + using namespace jlm::util; + + // Arrange + graph rvsdg; + + AnnotationMap annotationMap; + annotationMap.AddAnnotation(rvsdg.root(), Annotation("NumNodes", rvsdg.root()->nodes.size())); + + // Act + auto tree = region::ToTree(*rvsdg.root(), annotationMap); + std::cout << tree << std::flush; + + // Assert + assert(tree == "RootRegion NumNodes:0\n"); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/RegionTests-ToTree_EmptyRvsdgWithAnnotations", + ToTree_EmptyRvsdgWithAnnotations) + static int ToTree_RvsdgWithStructuralNodes() { @@ -281,6 +309,7 @@ ToTree_RvsdgWithStructuralNodes() // Arrange graph rvsdg; auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 2); + jlm::tests::structural_node::create(structuralNode->subregion(0), 1); jlm::tests::structural_node::create(structuralNode->subregion(1), 3); // Act @@ -290,8 +319,8 @@ ToTree_RvsdgWithStructuralNodes() // Assert auto numLines = std::count(tree.begin(), tree.end(), '\n'); - // We should find '\n' 8 times: 1 root region + 2 structural nodes + 5 subregions - assert(numLines == 8); + // We should find '\n' 8 times: 1 root region + 3 structural nodes + 6 subregions + assert(numLines == 10); // Check that the last line printed looks accordingly auto lastLine = std::string("----Region[2]\n"); @@ -303,3 +332,40 @@ ToTree_RvsdgWithStructuralNodes() JLM_UNIT_TEST_REGISTER( "jlm/rvsdg/RegionTests-ToTree_RvsdgWithStructuralNodes", ToTree_RvsdgWithStructuralNodes) + +static int +ToTree_RvsdgWithStructuralNodesAndAnnotations() +{ + using namespace jlm::rvsdg; + using namespace jlm::util; + + // Arrange + graph rvsdg; + auto structuralNode1 = jlm::tests::structural_node::create(rvsdg.root(), 2); + auto structuralNode2 = jlm::tests::structural_node::create(structuralNode1->subregion(1), 3); + auto subregion2 = structuralNode2->subregion(2); + + AnnotationMap annotationMap; + annotationMap.AddAnnotation(subregion2, Annotation("NumNodes", subregion2->nodes.size())); + annotationMap.AddAnnotation(subregion2, Annotation("NumArguments", subregion2->narguments())); + + // Act + auto tree = region::ToTree(*rvsdg.root(), annotationMap); + std::cout << tree << std::flush; + + // Assert + auto numLines = std::count(tree.begin(), tree.end(), '\n'); + + // We should find '\n' 8 times: 1 root region + 2 structural nodes + 5 subregions + assert(numLines == 8); + + // Check that the last line printed looks accordingly + auto lastLine = std::string("----Region[2] NumNodes:0 NumArguments:0\n"); + assert(tree.compare(tree.size() - lastLine.size(), lastLine.size(), lastLine) == 0); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/RegionTests-ToTree_RvsdgWithStructuralNodesAndAnnotations", + ToTree_RvsdgWithStructuralNodesAndAnnotations) diff --git a/tests/jlm/util/AnnotationMapTests.cpp b/tests/jlm/util/AnnotationMapTests.cpp new file mode 100644 index 000000000..0df4953b3 --- /dev/null +++ b/tests/jlm/util/AnnotationMapTests.cpp @@ -0,0 +1,113 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include + +static int +AnnotationKeyValueRetrieval() +{ + using namespace jlm::util; + + // Arrange + Annotation stringAnnotation("string", "value"); + Annotation intAnnotation("int", (int64_t)-1); + Annotation uintAnnotation("uint", (uint64_t)1); + Annotation doubleAnnotation("double", 1.0); + + // Act & Assert + assert(stringAnnotation.Label() == "string"); + assert(stringAnnotation.Value() == "value"); + assert(stringAnnotation.HasValueType()); + assert(!stringAnnotation.HasValueType()); + + assert(intAnnotation.Label() == "int"); + assert(intAnnotation.Value() == -1); + assert(intAnnotation.HasValueType()); + assert(!intAnnotation.HasValueType()); + + assert(uintAnnotation.Label() == "uint"); + assert(uintAnnotation.Value() == 1); + assert(uintAnnotation.HasValueType()); + + assert(doubleAnnotation.Label() == "double"); + assert(doubleAnnotation.Value() == 1.0); + assert(!doubleAnnotation.HasValueType()); + + try + { + (void)doubleAnnotation.Value(); + assert(false); // the line above should have thrown an exception + } + catch (...) + {} + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/util/AnnotationMapTests-AnnotationKeyValueRetrieval", + AnnotationKeyValueRetrieval) + +static int +AnnotationEquality() +{ + using namespace jlm::util; + + // Arrange + Annotation stringAnnotation("string", "value"); + Annotation intAnnotation("int", (int64_t)-1); + Annotation uintAnnotation("uint", (uint64_t)1); + Annotation doubleAnnotation("double", 1.0); + + // Act & Assert + assert(stringAnnotation != doubleAnnotation); + assert(stringAnnotation != intAnnotation); + assert(stringAnnotation != uintAnnotation); + + Annotation otherStringAnnotation("string", "value"); + assert(stringAnnotation == otherStringAnnotation); + + Annotation otherIntAnnotation("uint", (int64_t)1); + assert(uintAnnotation != otherIntAnnotation); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/util/AnnotationMapTests-AnnotationEquality", AnnotationEquality) + +static int +AnnotationMap() +{ + using namespace jlm::util; + + // Arrange + Annotation annotation("foo", "bar"); + + jlm::util::AnnotationMap map; + map.AddAnnotation((const void *)&AnnotationEquality, annotation); + + // Act & Assert + assert(map.HasAnnotations((const void *)&AnnotationEquality)); + assert(!map.HasAnnotations((const void *)&AnnotationKeyValueRetrieval)); + + auto annotations = map.GetAnnotations((const void *)&AnnotationEquality); + assert(annotations.size() == 1); + assert(annotations[0] == annotation); + + for (auto & iteratedAnnotations : map.Annotations()) + { + for (auto & iteratedAnnotation : iteratedAnnotations) + { + assert(iteratedAnnotation == annotation); + } + } + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/util/AnnotationMapTests-AnnotationMap", AnnotationMap) From 4f46984805c43e53455173964a4bdefb204ef6af Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 9 Aug 2024 09:32:43 +0200 Subject: [PATCH 033/170] Generalize CIRCT and MLIR jlm dialect build scripts (#568) I was having an issue where tests of the command parser would fail due to some MLIR option, even when neither `--enable-mlir` nor `--enable-hls` are passed to `configure.sh`. That is a separate issue that should be fixed, but I thought it was about time I got MLIR and CIRCT running on my own machine anyway. These changes should make the build scripts work no matter where LLVM is installed. This is as a draft PR for now, to check that these changes do not break CI. PS: Some `.cpp` files used symbols imported from the standard library indirectly, so I had to add some `#include <>` to make it work. I am on arch (btw), but I am guessing these changes will become necessary on Ubuntu eventually as well. --- .gitignore | 1 + configure.sh | 14 +++-------- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 ++ jlm/hls/util/view.cpp | 2 ++ jlm/mlir/Makefile.sub | 2 +- jlm/tooling/Makefile.sub | 2 +- scripts/build-circt.sh | 27 ++++++++++++++-------- scripts/build-mlir.sh | 32 ++++++++++++++++---------- tests/jlm/rvsdg/RegionTests.cpp | 1 + tools/Makefile.sub | 4 ++-- tools/jhls/Makefile.sub | 2 +- tools/jlm-hls/Makefile.sub | 2 +- 12 files changed, 52 insertions(+), 39 deletions(-) diff --git a/.gitignore b/.gitignore index 7892961b3..34e6b526f 100644 --- a/.gitignore +++ b/.gitignore @@ -4,3 +4,4 @@ Makefile.custom docs/html build build-* +usr diff --git a/configure.sh b/configure.sh index 492dfe302..d9c9736d2 100755 --- a/configure.sh +++ b/configure.sh @@ -114,15 +114,6 @@ if [ "${ENABLE_HLS}" == "yes" ] ; then CXXFLAGS_NO_COMMENT="-Wno-error=comment" CIRCT_LDFLAGS_ARRAY=( "-L${CIRCT_PATH}/lib" - "-lMLIR" - "-lMLIRBytecodeReader" - "-lMLIRBytecodeWriter" - "-lMLIRParser" - "-lMLIRSupport" - "-lMLIRIR" - "-lMLIROptLib" - "-lMLIRFuncDialect" - "-lMLIRTransforms" "-lCIRCTAnalysisTestPasses" "-lCIRCTDependenceAnalysis" "-lCIRCTExportFIRRTL" @@ -149,6 +140,7 @@ if [ "${ENABLE_HLS}" == "yes" ] ; then "-lCIRCTExportChiselInterface" "-lCIRCTOM" "-lCIRCTSupport" + "-lMLIR" ) fi @@ -156,7 +148,7 @@ CPPFLAGS_MLIR="" if [ "${ENABLE_MLIR}" == "yes" ] ; then CPPFLAGS_MLIR="-I${MLIR_PATH}/include -DENABLE_MLIR" CXXFLAGS_NO_COMMENT="-Wno-error=comment" - MLIR_LDFLAGS="-L${MLIR_PATH}/lib -lMLIR -lMLIRJLM -lMLIRRVSDG" + MLIR_LDFLAGS="-L${MLIR_PATH}/lib -lMLIRJLM -lMLIRRVSDG -lMLIR" fi if [ "${ENABLE_COVERAGE}" == "yes" ] ; then @@ -181,7 +173,7 @@ MLIR_LDFLAGS=${MLIR_LDFLAGS} LLVMCONFIG=${LLVM_CONFIG_BIN} LLVM_VERSION=${LLVM_VERSION} ENABLE_COVERAGE=${ENABLE_COVERAGE} -LD_LIBRARY_PATH=$(${LLVM_CONFIG_BIN} --libdir) +export LD_LIBRARY_PATH=$(${LLVM_CONFIG_BIN} --libdir) EOF if [ ! -z "${CXX-}" ] ; then echo "CXX=${CXX}" diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index acb696842..370fda00c 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -17,6 +17,8 @@ #include #include +#include + namespace jlm::hls { diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index fffbc48a2..170390ec3 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -14,6 +14,8 @@ #include #include +#include + namespace jlm::hls { diff --git a/jlm/mlir/Makefile.sub b/jlm/mlir/Makefile.sub index 66e9dab0d..53886015b 100644 --- a/jlm/mlir/Makefile.sub +++ b/jlm/mlir/Makefile.sub @@ -21,7 +21,7 @@ libmlir_TEST_LIBS += \ libjlmtest \ libmlir_TEST_EXTRA_LDFLAGS = \ - $(shell $(LLVMCONFIG) --ldflags --libs --system-libs) \ $(MLIR_LDFLAGS) \ + $(shell $(LLVMCONFIG) --ldflags --libs --system-libs) \ $(eval $(call common_library,libmlir)) diff --git a/jlm/tooling/Makefile.sub b/jlm/tooling/Makefile.sub index e4ddaa28e..3734f723d 100644 --- a/jlm/tooling/Makefile.sub +++ b/jlm/tooling/Makefile.sub @@ -65,7 +65,7 @@ libtooling_TEST_LIBS = \ libjlmtest \ libtooling_TEST_EXTRA_LDFLAGS = \ - $(shell $(LLVMCONFIG) --ldflags --libs --system-libs) \ $(MLIR_LDFLAGS) \ + $(shell $(LLVMCONFIG) --ldflags --libs --system-libs) \ $(eval $(call common_library,libtooling)) diff --git a/scripts/build-circt.sh b/scripts/build-circt.sh index dee85a7ff..ccefed813 100755 --- a/scripts/build-circt.sh +++ b/scripts/build-circt.sh @@ -5,15 +5,13 @@ GIT_COMMIT=debf1ed774c2bbdbfc8e7bc987a21f72e8f08f65 # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" -JLM_ROOT_DIR=${SCRIPT_DIR}/.. +JLM_ROOT_DIR="$(realpath "${SCRIPT_DIR}/..")" CIRCT_BUILD=${JLM_ROOT_DIR}/build-circt CIRCT_INSTALL=${JLM_ROOT_DIR}/usr LLVM_LIT_PATH=/usr/local/bin/lit LLVM_VERSION=17 -LLVM_CONFIG=llvm-config-${LLVM_VERSION} -LLVM_ROOT=$(${LLVM_CONFIG} --prefix) -LLVM_LIB=$(${LLVM_CONFIG} --libdir) +LLVM_CONFIG_BIN=llvm-config-${LLVM_VERSION} function commit() { @@ -24,6 +22,8 @@ function usage() { echo "Usage: ./build-circt.sh [OPTION] [VAR=VALUE]" echo "" + echo " --llvm-config PATH The llvm-config script used to determine up llvm" + echo " build dependencies. [${LLVM_CONFIG_BIN}]" echo " --build-path PATH The path where to build CIRCT." echo " [${CIRCT_BUILD}]" echo " --install-path PATH The path where to install CIRCT." @@ -36,6 +36,11 @@ function usage() while [[ "$#" -ge 1 ]] ; do case "$1" in + --llvm-config) + shift + LLVM_CONFIG_BIN="$1" + shift + ;; --build-path) shift CIRCT_BUILD=$(readlink -m "$1") @@ -62,6 +67,9 @@ while [[ "$#" -ge 1 ]] ; do esac done +LLVM_BINDIR=$(${LLVM_CONFIG_BIN} --bindir) +LLVM_CMAKEDIR=$(${LLVM_CONFIG_BIN} --cmakedir) + CIRCT_GIT_DIR=${CIRCT_BUILD}/circt.git CIRCT_BUILD_DIR=${CIRCT_BUILD}/build @@ -69,16 +77,15 @@ if [ ! -d "$CIRCT_GIT_DIR" ] ; then git clone https://github.com/EECS-NTNU/circt.git ${CIRCT_GIT_DIR} fi -cd ${CIRCT_GIT_DIR} -git checkout ${GIT_COMMIT} +git -C ${CIRCT_GIT_DIR} checkout ${GIT_COMMIT} cmake -G Ninja \ ${CIRCT_GIT_DIR} \ -B ${CIRCT_BUILD_DIR} \ - -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \ - -DCMAKE_CXX_COMPILER=clang++-${LLVM_VERSION} \ + -DCMAKE_C_COMPILER=${LLVM_BINDIR}/clang \ + -DCMAKE_CXX_COMPILER=${LLVM_BINDIR}/clang++ \ -DCMAKE_BUILD_TYPE=RelWithDebInfo \ - -DLLVM_DIR=${LLVM_ROOT}/cmake/ \ - -DMLIR_DIR=${LLVM_LIB}/cmake/mlir \ + -DLLVM_DIR=${LLVM_CMAKEDIR} \ + -DMLIR_DIR=${LLVM_CMAKEDIR}/../mlir \ -DLLVM_EXTERNAL_LIT="${LLVM_LIT_PATH}" \ -DLLVM_LIT_ARGS="-v --show-unsupported" \ -DVERILATOR_DISABLE=ON \ diff --git a/scripts/build-mlir.sh b/scripts/build-mlir.sh index 80b453ec8..38cbab79b 100755 --- a/scripts/build-mlir.sh +++ b/scripts/build-mlir.sh @@ -5,14 +5,12 @@ GIT_COMMIT=ab630d5a881a0e8fc5bdfa63a5984186fa9096c0 # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" -JLM_ROOT_DIR=${SCRIPT_DIR}/.. +JLM_ROOT_DIR="$(realpath "${SCRIPT_DIR}/..")" MLIR_BUILD=${JLM_ROOT_DIR}/build-mlir MLIR_INSTALL=${JLM_ROOT_DIR}/usr LLVM_VERSION=17 -LLVM_CONFIG=llvm-config-${LLVM_VERSION} -LLVM_ROOT=$(${LLVM_CONFIG} --prefix) -LLVM_LIB=$(${LLVM_CONFIG} --libdir) +LLVM_CONFIG_BIN=llvm-config-${LLVM_VERSION} function commit() { @@ -21,8 +19,10 @@ function commit() function usage() { - echo "Usage: ./build-circt.sh [OPTION] [VAR=VALUE]" + echo "Usage: ./build-mlir.sh [OPTION] [VAR=VALUE]" echo "" + echo " --llvm-config PATH The llvm-config script used to determine up llvm" + echo " build dependencies. [${LLVM_CONFIG_BIN}]" echo " --build-path PATH The path where to build MLIR." echo " [${MLIR_BUILD}]" echo " --install-path PATH The path where to install MLIR." @@ -33,6 +33,11 @@ function usage() while [[ "$#" -ge 1 ]] ; do case "$1" in + --llvm-config) + shift + LLVM_CONFIG_BIN="$1" + shift + ;; --build-path) shift MLIR_BUILD=$(readlink -m "$1") @@ -54,6 +59,9 @@ while [[ "$#" -ge 1 ]] ; do esac done +LLVM_BINDIR=$(${LLVM_CONFIG_BIN} --bindir) +LLVM_CMAKEDIR=$(${LLVM_CONFIG_BIN} --cmakedir) + MLIR_GIT_DIR=${MLIR_BUILD}/mlir_rvsdg.git MLIR_BUILD_DIR=${MLIR_BUILD}/build @@ -61,16 +69,16 @@ if [ ! -d "$MLIR_GIT_DIR" ] ; then git clone https://github.com/EECS-NTNU/mlir_rvsdg.git ${MLIR_GIT_DIR} fi -cd ${MLIR_GIT_DIR} -git checkout ${GIT_COMMIT} + +git -C ${MLIR_GIT_DIR} checkout ${GIT_COMMIT} cmake -G Ninja \ ${MLIR_GIT_DIR} \ -B ${MLIR_BUILD_DIR} \ - -DCMAKE_C_COMPILER=clang-${LLVM_VERSION} \ - -DCMAKE_CXX_COMPILER=clang++-${LLVM_VERSION} \ - -DLLVM_DIR=${LLVM_ROOT}/cmake/ \ - -DMLIR_DIR=${LLVM_LIB}/cmake/mlir \ + -DCMAKE_C_COMPILER=${LLVM_BINDIR}/clang \ + -DCMAKE_CXX_COMPILER=${LLVM_BINDIR}/clang++ \ + -DLLVM_DIR=${LLVM_CMAKEDIR} \ + -DMLIR_DIR=${LLVM_CMAKEDIR}/../mlir \ -DCMAKE_INSTALL_PREFIX=${MLIR_INSTALL} \ -Wno-dev -cmake --build ${MLIR_BUILD_DIR} +ninja -C ${MLIR_BUILD_DIR} ninja -C ${MLIR_BUILD_DIR} install diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 8098de1dc..d1d111960 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -9,6 +9,7 @@ #include +#include #include /** diff --git a/tools/Makefile.sub b/tools/Makefile.sub index a2206c679..0440e6a80 100644 --- a/tools/Makefile.sub +++ b/tools/Makefile.sub @@ -13,8 +13,8 @@ jlc_LIBS += \ libutil \ jlc_EXTRA_LDFLAGS += \ - $(shell $(LLVMCONFIG) --libs core irReader --ldflags --system-libs) \ ${MLIR_LDFLAGS} \ + $(shell $(LLVMCONFIG) --libs core irReader --ldflags --system-libs) \ $(eval $(call common_executable,jlc)) @@ -29,7 +29,7 @@ jlm-opt_LIBS = \ libutil \ jlm-opt_EXTRA_LDFLAGS = \ - $(shell $(LLVMCONFIG) --libs core irReader --ldflags --system-libs) \ ${MLIR_LDFLAGS} \ + $(shell $(LLVMCONFIG) --libs core irReader --ldflags --system-libs) \ $(eval $(call common_executable,jlm-opt)) diff --git a/tools/jhls/Makefile.sub b/tools/jhls/Makefile.sub index 0564e1071..dd3602c8f 100644 --- a/tools/jhls/Makefile.sub +++ b/tools/jhls/Makefile.sub @@ -14,7 +14,7 @@ jhls_LIBS = \ libutil \ jhls_EXTRA_LDFLAGS = \ - $(shell $(LLVMCONFIG) --libs core irReader --ldflags) \ ${MLIR_LDFLAGS} \ + $(shell $(LLVMCONFIG) --libs core irReader --ldflags) \ $(eval $(call common_executable,jhls)) diff --git a/tools/jlm-hls/Makefile.sub b/tools/jlm-hls/Makefile.sub index 12d12bceb..1672bea25 100644 --- a/tools/jlm-hls/Makefile.sub +++ b/tools/jlm-hls/Makefile.sub @@ -13,7 +13,7 @@ jlm-hls_LIBS = \ libutil \ jlm-hls_EXTRA_LDFLAGS = \ + ${CIRCT_LDFLAGS} \ $(shell $(LLVMCONFIG) --libs core irReader --ldflags --system-libs) \ - ${CIRCT_LDFLAGS} $(eval $(call common_executable,jlm-hls)) From c7b6ea47faf5ca7b8579e48c48ec8f1455807e6d Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 9 Aug 2024 14:29:25 +0200 Subject: [PATCH 034/170] Avoid OutputFormatParsing test failing when MLIR is disabled (#569) A test was failing when compiling without `--enable-mlir`, due to `--output-format mlir` being unknown when `ENABLE_MLIR` is not defined. This issue could have been fixed in a different location, such as not defining the `Mlir` value in the `OutputFormat` enum at all, but that would have created some additional situations where `#ifdef`s would have to be added. I think it is nice to use as few `#ifdef`s as possible, to avoid situations like this where tests fail in specific `#ifdef` configurations. I would not be opposed to including the `--output-format mlir` option in all builds, but possibly making it invisible in the help-text. If `OutputFormat::Mlir` is selected without being enabled in the build, it eventually triggers a `JLM_UNREACHABLE` that informs you that jlm has been compiled without the MLIR dialect. I think this is much more user-friendly than just denying that there ever was an output format called `mlir`. --- tests/jlm/tooling/TestJlmOptCommandLineParser.cpp | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index 83971e3c5..36844c2d5 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -164,8 +164,12 @@ OutputFormatParsing() for (size_t n = start; n != end; n++) { auto outputFormat = static_cast(n); - auto outputFormatString = JlmOptCommandLineOptions::ToCommandLineArgument(outputFormat); +#ifndef ENABLE_MLIR + if (outputFormat == JlmOptCommandLineOptions::OutputFormat::Mlir) + continue; +#endif + auto outputFormatString = JlmOptCommandLineOptions::ToCommandLineArgument(outputFormat); testOutputFormatParsing(outputFormatString, outputFormat); } From e4db641cd5f04184f25a42cfde63589d8e2e899c Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 9 Aug 2024 14:59:31 +0200 Subject: [PATCH 035/170] Make parser tests use c_str instead of copying strings (#571) This change simplifies a helper function in `TestJlcCommandLineParser.cpp` and `TestJlmOptCommandLineParser.cpp` by using `c_str()`. It is safe to do this as the strings owning the underlying data live longer than the vector of `const char *` (and the strings are never modified). To make this change legal, some `const` had to be added, but only in places where no mutation is happening. The `const char * const *` is necessary, as `char **` can not be cast to `const char **`. I had to google this to [understand why](https://isocpp.org/wiki/faq/const-correctness#constptrptr-conversion). --- jlm/tooling/CommandLine.cpp | 14 ++++---- jlm/tooling/CommandLine.hpp | 16 ++++----- .../jlm/tooling/TestJlcCommandLineParser.cpp | 32 +++--------------- .../tooling/TestJlmOptCommandLineParser.cpp | 33 +++---------------- 4 files changed, 25 insertions(+), 70 deletions(-) diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 94440c7b6..b85389066 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -361,7 +361,7 @@ CommandLineParser::Exception::~Exception() noexcept = default; JlcCommandLineParser::~JlcCommandLineParser() noexcept = default; const JlcCommandLineOptions & -JlcCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) +JlcCommandLineParser::ParseCommandLineArguments(int argc, const char * const * argv) { auto checkAndConvertJlmOptOptimizations = [](const ::llvm::cl::list & optimizations, @@ -712,7 +712,7 @@ JlcCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) JlmOptCommandLineParser::~JlmOptCommandLineParser() noexcept = default; const JlmOptCommandLineOptions & -JlmOptCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) +JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const * argv) { using namespace ::llvm; @@ -956,7 +956,7 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) } const JlmOptCommandLineOptions & -JlmOptCommandLineParser::Parse(int argc, char ** argv) +JlmOptCommandLineParser::Parse(int argc, const char * const * argv) { static JlmOptCommandLineParser parser; return parser.ParseCommandLineArguments(argc, argv); @@ -965,7 +965,7 @@ JlmOptCommandLineParser::Parse(int argc, char ** argv) JlmHlsCommandLineParser::~JlmHlsCommandLineParser() noexcept = default; const JlmHlsCommandLineOptions & -JlmHlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) +JlmHlsCommandLineParser::ParseCommandLineArguments(int argc, const char * const * argv) { CommandLineOptions_.Reset(); @@ -1026,7 +1026,7 @@ JlmHlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) } const JlmHlsCommandLineOptions & -JlmHlsCommandLineParser::Parse(int argc, char ** argv) +JlmHlsCommandLineParser::Parse(int argc, const char * const * argv) { static JlmHlsCommandLineParser parser; return parser.ParseCommandLineArguments(argc, argv); @@ -1035,7 +1035,7 @@ JlmHlsCommandLineParser::Parse(int argc, char ** argv) JhlsCommandLineParser::~JhlsCommandLineParser() noexcept = default; const JhlsCommandLineOptions & -JhlsCommandLineParser::ParseCommandLineArguments(int argc, char ** argv) +JhlsCommandLineParser::ParseCommandLineArguments(int argc, const char * const * argv) { CommandLineOptions_.Reset(); @@ -1304,7 +1304,7 @@ JhlsCommandLineParser::CreateDependencyFileFromFile(const util::filepath & f) } const JhlsCommandLineOptions & -JhlsCommandLineParser::Parse(int argc, char ** argv) +JhlsCommandLineParser::Parse(int argc, const char * const * argv) { static JhlsCommandLineParser parser; return parser.ParseCommandLineArguments(argc, argv); diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index ce3ef827c..189d430e6 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -584,7 +584,7 @@ class CommandLineParser CommandLineParser() = default; virtual const CommandLineOptions & - ParseCommandLineArguments(int argc, char ** argv) = 0; + ParseCommandLineArguments(int argc, const char * const * argv) = 0; }; /** @@ -596,7 +596,7 @@ class JlcCommandLineParser final : public CommandLineParser ~JlcCommandLineParser() noexcept override; const JlcCommandLineOptions & - ParseCommandLineArguments(int argc, char ** argv) override; + ParseCommandLineArguments(int argc, const char * const * argv) override; private: static bool @@ -629,10 +629,10 @@ class JlmOptCommandLineParser final : public CommandLineParser ~JlmOptCommandLineParser() noexcept override; const JlmOptCommandLineOptions & - ParseCommandLineArguments(int argc, char ** argv) override; + ParseCommandLineArguments(int argc, const char * const * argv) override; static const JlmOptCommandLineOptions & - Parse(int argc, char ** argv); + Parse(int argc, const char * const * argv); private: std::unique_ptr CommandLineOptions_; @@ -647,10 +647,10 @@ class JlmHlsCommandLineParser final : public CommandLineParser ~JlmHlsCommandLineParser() noexcept override; const JlmHlsCommandLineOptions & - ParseCommandLineArguments(int argc, char ** argv) override; + ParseCommandLineArguments(int argc, const char * const * argv) override; static const JlmHlsCommandLineOptions & - Parse(int argc, char ** argv); + Parse(int argc, const char * const * argv); private: JlmHlsCommandLineOptions CommandLineOptions_; @@ -665,10 +665,10 @@ class JhlsCommandLineParser final : public CommandLineParser ~JhlsCommandLineParser() noexcept override; const JhlsCommandLineOptions & - ParseCommandLineArguments(int argc, char ** argv) override; + ParseCommandLineArguments(int argc, const char * const * argv) override; static const JhlsCommandLineOptions & - Parse(int argc, char ** arv); + Parse(int argc, const char * const * arv); private: static bool diff --git a/tests/jlm/tooling/TestJlcCommandLineParser.cpp b/tests/jlm/tooling/TestJlcCommandLineParser.cpp index 3b1d38935..b1159b6c2 100644 --- a/tests/jlm/tooling/TestJlcCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlcCommandLineParser.cpp @@ -13,38 +13,16 @@ static const jlm::tooling::JlcCommandLineOptions & ParseCommandLineArguments(const std::vector & commandLineArguments) { - auto cleanUp = [](const std::vector & array) - { - for (const auto & ptr : array) - { - delete[] ptr; - } - }; - - std::vector array; + std::vector cStrings; for (const auto & commandLineArgument : commandLineArguments) { - array.push_back(new char[commandLineArgument.size() + 1]); - strncpy(array.back(), commandLineArgument.data(), commandLineArgument.size()); - array.back()[commandLineArgument.size()] = '\0'; + cStrings.push_back(commandLineArgument.c_str()); } static jlm::tooling::JlcCommandLineParser commandLineParser; - const jlm::tooling::JlcCommandLineOptions * commandLineOptions; - try - { - commandLineOptions = - &commandLineParser.ParseCommandLineArguments(static_cast(array.size()), &array[0]); - } - catch (...) - { - cleanUp(array); - throw; - } - - cleanUp(array); - - return *commandLineOptions; + return commandLineParser.ParseCommandLineArguments( + static_cast(cStrings.size()), + cStrings.data()); } static void diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index 36844c2d5..e82262691 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -9,42 +9,19 @@ #include -// FIXME: We have a similar function in TestJlcCommandLineParser.cpp. We need to clean up. static const jlm::tooling::JlmOptCommandLineOptions & ParseCommandLineArguments(const std::vector & commandLineArguments) { - auto cleanUp = [](const std::vector & array) - { - for (const auto & ptr : array) - { - delete[] ptr; - } - }; - - std::vector array; + std::vector cStrings; for (const auto & commandLineArgument : commandLineArguments) { - array.push_back(new char[commandLineArgument.size() + 1]); - strncpy(array.back(), commandLineArgument.data(), commandLineArgument.size()); - array.back()[commandLineArgument.size()] = '\0'; + cStrings.push_back(commandLineArgument.c_str()); } static jlm::tooling::JlmOptCommandLineParser commandLineParser; - const jlm::tooling::JlmOptCommandLineOptions * commandLineOptions; - try - { - commandLineOptions = - &commandLineParser.ParseCommandLineArguments(static_cast(array.size()), &array[0]); - } - catch (...) - { - cleanUp(array); - throw; - } - - cleanUp(array); - - return *commandLineOptions; + return commandLineParser.ParseCommandLineArguments( + static_cast(cStrings.size()), + cStrings.data()); } static void From 2a68ad053fe644353a0a1bfe085af524459992f9 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 12 Aug 2024 08:11:13 +0200 Subject: [PATCH 036/170] HLS: Configurable pointer size (#572) Makes it possible to manually change the size of a pointer by changing GetPointerSizeInBits() in base-hls.cpp. --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 17 +++++++++-------- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.hpp | 9 +++++++++ jlm/hls/backend/rhls2firrtl/json-hls.cpp | 1 + 4 files changed, 20 insertions(+), 9 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 702c92acf..9d41d03e4 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -327,7 +327,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) int bits = JlmSize(pointeeType); if (dynamic_cast(pointeeType)) { - ; + pointeeType = nullptr; } else if (auto arrayType = dynamic_cast(pointeeType)) { @@ -341,13 +341,13 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto input = GetSubfield(body, inBundles[i], "data"); auto asSInt = AddAsSIntOp(body, input); int bytes = bits / 8; - auto constantOp = GetConstant(body, 64, bytes); + auto constantOp = GetConstant(body, GetPointerSizeInBits(), bytes); auto cvtOp = AddCvtOp(body, constantOp); auto offset = AddMulOp(body, asSInt, cvtOp); result = AddAddOp(body, result, offset); } auto asUInt = AddAsUIntOp(body, result); - Connect(body, outData, AddBitsOp(body, asUInt, 63, 0)); + Connect(body, outData, AddBitsOp(body, asUInt, GetPointerSizeInBits() - 1, 0)); } else if (dynamic_cast(&(node->operation()))) { @@ -1576,7 +1576,7 @@ RhlsToFirrtlConverter::MlirGenMem(const jlm::rvsdg::simple_node * node) } else if (dynamic_cast(&node->output(0)->type())) { - bitWidth = 64; + bitWidth = GetPointerSizeInBits(); } else { @@ -3138,7 +3138,7 @@ RhlsToFirrtlConverter::AddMemReqPort(::llvm::SmallVectorgetStringAttr("addr"), false, - circt::firrtl::IntType::get(Builder_->getContext(), false, 64))); + circt::firrtl::IntType::get(Builder_->getContext(), false, GetPointerSizeInBits()))); memReqElements.push_back(BundleElement( Builder_->getStringAttr("data"), false, @@ -3798,6 +3798,7 @@ RhlsToFirrtlConverter::InitializeMemReq(circt::firrtl::FModuleOp module) auto zeroBitValue = GetConstant(body, 1, 0); auto invalid1 = GetInvalid(body, 1); auto invalid3 = GetInvalid(body, 3); + auto invalidPtr = GetInvalid(body, GetPointerSizeInBits()); auto invalid64 = GetInvalid(body, 64); auto memValid = GetSubfield(body, mem, "valid"); @@ -3807,7 +3808,7 @@ RhlsToFirrtlConverter::InitializeMemReq(circt::firrtl::FModuleOp module) auto memWidth = GetSubfield(body, mem, "width"); Connect(body, memValid, zeroBitValue); - Connect(body, memAddr, invalid64); + Connect(body, memAddr, invalidPtr); Connect(body, memData, invalid64); Connect(body, memWrite, invalid1); Connect(body, memWidth, invalid3); @@ -3931,7 +3932,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) int bits = JlmSize(pointeeType); if (dynamic_cast(pointeeType)) { - ; + pointeeType = nullptr; } else if (auto arrayType = dynamic_cast(pointeeType)) { @@ -3959,7 +3960,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) } else if (dynamic_cast(loadType)) { - bitWidth = 64; + bitWidth = GetPointerSizeInBits(); } else { diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index a02e90559..9af0fd651 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -115,7 +115,7 @@ BaseHLS::JlmSize(const jlm::rvsdg::type * type) } else if (dynamic_cast(type)) { - return 64; + return GetPointerSizeInBits(); } else if (auto ct = dynamic_cast(type)) { diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index 2f7594128..1f37e74d7 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -34,6 +34,15 @@ class BaseHLS static int JlmSize(const jlm::rvsdg::type * type); + /** + * @return The size of a pointer in bits. + */ + [[nodiscard]] static size_t + GetPointerSizeInBits() + { + return 64; + } + private: virtual std::string extension() = 0; diff --git a/jlm/hls/backend/rhls2firrtl/json-hls.cpp b/jlm/hls/backend/rhls2firrtl/json-hls.cpp index a627325f7..6d9089ce7 100644 --- a/jlm/hls/backend/rhls2firrtl/json-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/json-hls.cpp @@ -21,6 +21,7 @@ JsonHLS::get_text(llvm::RvsdgModule & rm) auto reg_args = get_reg_args(ln); auto reg_results = get_reg_results(ln); + json << "\"addr_width\": " << GetPointerSizeInBits() << ",\n"; json << "\"arguments\": ["; for (size_t i = 0; i < reg_args.size(); ++i) { From e5e1259bb9410fe224122db5aba2c46060c964f4 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 12 Aug 2024 16:21:17 +0200 Subject: [PATCH 037/170] CNE: Simple support for hls (#479) Duplicates the cne optimization found in the llvm backend for the hls backend with the addition of supporting the hls::loop_op. --- jlm/hls/Makefile.sub | 4 + jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 6 +- jlm/hls/opt/cne.cpp | 632 ++++++++++++++++++++++ jlm/hls/opt/cne.hpp | 39 ++ 4 files changed, 678 insertions(+), 3 deletions(-) create mode 100644 jlm/hls/opt/cne.cpp create mode 100644 jlm/hls/opt/cne.hpp diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index 3e1e1de41..3c9895dcd 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -36,6 +36,8 @@ libhls_SOURCES = \ \ jlm/hls/ir/hls.cpp \ \ + jlm/hls/opt/cne.cpp \ + \ jlm/hls/util/view.cpp \ libhls_HEADERS = \ @@ -72,6 +74,8 @@ libhls_HEADERS = \ \ jlm/hls/ir/hls.hpp \ \ + jlm/hls/opt/cne.hpp \ + \ jlm/hls/util/view.hpp \ libhls_TESTS += \ diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 1d8dffc4e..f303b6ea5 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -24,6 +24,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,6 @@ #include #include #include -#include #include #include #include @@ -56,7 +56,7 @@ split_opt(llvm::RvsdgModule & rm) { // TODO: figure out which optimizations to use here jlm::llvm::DeadNodeElimination dne; - jlm::llvm::cne cne; + jlm::hls::cne cne; jlm::llvm::InvariantValueRedirection ivr; jlm::llvm::tginversion tgi; jlm::llvm::nodereduction red; @@ -74,7 +74,7 @@ pre_opt(jlm::llvm::RvsdgModule & rm) { // TODO: figure out which optimizations to use here jlm::llvm::DeadNodeElimination dne; - jlm::llvm::cne cne; + jlm::hls::cne cne; jlm::llvm::InvariantValueRedirection ivr; jlm::llvm::tginversion tgi; jlm::util::StatisticsCollector statisticsCollector; diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp new file mode 100644 index 000000000..770379464 --- /dev/null +++ b/jlm/hls/opt/cne.cpp @@ -0,0 +1,632 @@ +/* + * Copyright 2017 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace jlm::hls +{ + +using namespace jlm::rvsdg; + +class cnestat final : public util::Statistics +{ + const char * MarkTimerLabel_ = "MarkTime"; + const char * DivertTimerLabel_ = "DivertTime"; + +public: + ~cnestat() override = default; + + explicit cnestat(const util::filepath & sourceFile) + : Statistics(Statistics::Id::CommonNodeElimination, sourceFile) + {} + + void + start_mark_stat(const jlm::rvsdg::graph & graph) noexcept + { + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddTimer(MarkTimerLabel_).start(); + } + + void + end_mark_stat() noexcept + { + GetTimer(MarkTimerLabel_).stop(); + } + + void + start_divert_stat() noexcept + { + AddTimer(DivertTimerLabel_).start(); + } + + void + end_divert_stat(const jlm::rvsdg::graph & graph) noexcept + { + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + GetTimer(DivertTimerLabel_).stop(); + } + + static std::unique_ptr + Create(const util::filepath & sourceFile) + { + return std::make_unique(sourceFile); + } +}; + +typedef std::unordered_set congruence_set; + +class cnectx +{ +public: + inline void + mark(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2) + { + auto s1 = set(o1); + auto s2 = set(o2); + + if (s1 == s2) + return; + + if (s2->size() < s1->size()) + { + s1 = outputs_[o2]; + s2 = outputs_[o1]; + } + + for (auto & o : *s1) + { + s2->insert(o); + outputs_[o] = s2; + } + } + + inline void + mark(const jlm::rvsdg::node * n1, const jlm::rvsdg::node * n2) + { + JLM_ASSERT(n1->noutputs() == n2->noutputs()); + + for (size_t n = 0; n < n1->noutputs(); n++) + mark(n1->output(n), n2->output(n)); + } + + inline bool + congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2) const noexcept + { + if (o1 == o2) + return true; + + auto it = outputs_.find(o1); + if (it == outputs_.end()) + return false; + + return it->second->find(o2) != it->second->end(); + } + + inline bool + congruent(const jlm::rvsdg::input * i1, const jlm::rvsdg::input * i2) const noexcept + { + return congruent(i1->origin(), i2->origin()); + } + + congruence_set * + set(jlm::rvsdg::output * output) noexcept + { + if (outputs_.find(output) == outputs_.end()) + { + std::unique_ptr set(new congruence_set({ output })); + outputs_[output] = set.get(); + sets_.insert(std::move(set)); + } + + return outputs_[output]; + } + +private: + std::unordered_set> sets_; + std::unordered_map outputs_; +}; + +class vset +{ +public: + void + insert(const jlm::rvsdg::output * o1, const jlm::rvsdg::output * o2) + { + auto it = sets_.find(o1); + if (it != sets_.end()) + sets_[o1].insert(o2); + else + sets_[o1] = { o2 }; + + it = sets_.find(o2); + if (it != sets_.end()) + sets_[o2].insert(o1); + else + sets_[o2] = { o1 }; + } + + bool + visited(const jlm::rvsdg::output * o1, const jlm::rvsdg::output * o2) const + { + auto it = sets_.find(o1); + if (it == sets_.end()) + return false; + + return it->second.find(o2) != it->second.end(); + } + +private: + std::unordered_map> + sets_; +}; + +/* mark phase */ + +static bool +congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & ctx) +{ + if (ctx.congruent(o1, o2) || vs.visited(o1, o2)) + return true; + + if (o1->type() != o2->type()) + return false; + + if (is(o1) && is(o2)) + { + JLM_ASSERT(o1->region()->node() == o2->region()->node()); + auto a1 = static_cast(o1); + auto a2 = static_cast(o2); + vs.insert(a1, a2); + auto i1 = a1->input(), i2 = a2->input(); + if (!congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx)) + return false; + + auto output1 = o1->region()->node()->output(i1->index()); + auto output2 = o2->region()->node()->output(i2->index()); + return congruent(output1, output2, vs, ctx); + } + + auto n1 = jlm::rvsdg::node_output::node(o1); + auto n2 = jlm::rvsdg::node_output::node(o2); + if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) + && n1 == n2) + { + auto so1 = static_cast(o1); + auto so2 = static_cast(o2); + vs.insert(o1, o2); + auto r1 = so1->results.first(); + auto r2 = so2->results.first(); + return congruent(r1->origin(), r2->origin(), vs, ctx); + } + + auto a1 = dynamic_cast(o1); + auto a2 = dynamic_cast(o2); + if (a1 && is(a1->region()->node()) && a2 && is(a2->region()->node())) + { + JLM_ASSERT(o1->region()->node() == o2->region()->node()); + if (a1->input() && a2->input()) + { + // input arguments + vs.insert(a1, a2); + return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); + } + } + + if (jlm::rvsdg::is(n1) && n1 == n2) + { + auto so1 = static_cast(o1); + auto so2 = static_cast(o2); + auto r1 = so1->results.begin(); + auto r2 = so2->results.begin(); + for (; r1 != so1->results.end(); r1++, r2++) + { + JLM_ASSERT(r1->region() == r2->region()); + if (!congruent(r1->origin(), r2->origin(), vs, ctx)) + return false; + } + return true; + } + + if (is(o1) && is(o2)) + { + JLM_ASSERT(o1->region()->node() == o2->region()->node()); + auto a1 = static_cast(o1); + auto a2 = static_cast(o2); + return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); + } + + if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) + && n1->operation() == n2->operation() && n1->ninputs() == n2->ninputs() + && o1->index() == o2->index()) + { + for (size_t n = 0; n < n1->ninputs(); n++) + { + auto origin1 = n1->input(n)->origin(); + auto origin2 = n2->input(n)->origin(); + if (!congruent(origin1, origin2, vs, ctx)) + return false; + } + return true; + } + + return false; +} + +static bool +congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, cnectx & ctx) +{ + vset vs; + return congruent(o1, o2, vs, ctx); +} + +static void +mark_arguments(jlm::rvsdg::structural_input * i1, jlm::rvsdg::structural_input * i2, cnectx & ctx) +{ + JLM_ASSERT(i1->node() && i1->node() == i2->node()); + JLM_ASSERT(i1->arguments.size() == i2->arguments.size()); + + auto a1 = i1->arguments.begin(); + auto a2 = i2->arguments.begin(); + for (; a1 != i1->arguments.end(); a1++, a2++) + { + JLM_ASSERT(a1->region() == a2->region()); + if (congruent(a1.ptr(), a2.ptr(), ctx)) + ctx.mark(a1.ptr(), a2.ptr()); + } +} + +static void +mark(jlm::rvsdg::region *, cnectx &); + +static void +mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node->operation())); + + /* mark entry variables */ + for (size_t i1 = 1; i1 < node->ninputs(); i1++) + { + for (size_t i2 = i1 + 1; i2 < node->ninputs(); i2++) + mark_arguments(node->input(i1), node->input(i2), ctx); + } + + for (size_t n = 0; n < node->nsubregions(); n++) + mark(node->subregion(n), ctx); + + /* mark exit variables */ + for (size_t o1 = 0; o1 < node->noutputs(); o1++) + { + for (size_t o2 = o1 + 1; o2 < node->noutputs(); o2++) + { + if (congruent(node->output(o1), node->output(o2), ctx)) + ctx.mark(node->output(o1), node->output(o2)); + } + } +} + +static void +mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); + auto theta = static_cast(node); + + /* mark loop variables */ + for (size_t i1 = 0; i1 < theta->ninputs(); i1++) + { + for (size_t i2 = i1 + 1; i2 < theta->ninputs(); i2++) + { + auto input1 = theta->input(i1); + auto input2 = theta->input(i2); + if (congruent(input1->argument(), input2->argument(), ctx)) + { + ctx.mark(input1->argument(), input2->argument()); + ctx.mark(input1->output(), input2->output()); + } + } + } + + mark(node->subregion(0), ctx); +} + +static void +mark_loop(const rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(rvsdg::is(node)); + auto loop = static_cast(node); + + /* mark loop variables */ + for (size_t i1 = 0; i1 < loop->ninputs(); i1++) + { + for (size_t i2 = i1 + 1; i2 < loop->ninputs(); i2++) + { + auto input1 = loop->input(i1); + auto input2 = loop->input(i2); + if (congruent(input1->arguments.first(), input2->arguments.first(), ctx)) + { + ctx.mark(input1->arguments.first(), input2->arguments.first()); + } + } + } + mark(node->subregion(0), ctx); +} + +static void +mark_lambda(const jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); + + /* mark dependencies */ + for (size_t i1 = 0; i1 < node->ninputs(); i1++) + { + for (size_t i2 = i1 + 1; i2 < node->ninputs(); i2++) + { + auto input1 = node->input(i1); + auto input2 = node->input(i2); + if (ctx.congruent(input1, input2)) + ctx.mark(input1->arguments.first(), input2->arguments.first()); + } + } + + mark(node->subregion(0), ctx); +} + +static void +mark_phi(const jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(is(node)); + + /* mark dependencies */ + for (size_t i1 = 0; i1 < node->ninputs(); i1++) + { + for (size_t i2 = i1 + 1; i2 < node->ninputs(); i2++) + { + auto input1 = node->input(i1); + auto input2 = node->input(i2); + if (ctx.congruent(input1, input2)) + ctx.mark(input1->arguments.first(), input2->arguments.first()); + } + } + + mark(node->subregion(0), ctx); +} + +static void +mark_delta(const jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); +} + +static void +mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + static std:: + unordered_map + map({ { std::type_index(typeid(jlm::rvsdg::gamma_op)), mark_gamma }, + { std::type_index(typeid(jlm::rvsdg::theta_op)), mark_theta }, + { std::type_index(typeid(jlm::hls::loop_op)), mark_loop }, + { typeid(llvm::lambda::operation), mark_lambda }, + { typeid(llvm::phi::operation), mark_phi }, + { typeid(llvm::delta::operation), mark_delta } }); + + auto & op = node->operation(); + JLM_ASSERT(map.find(typeid(op)) != map.end()); + map[typeid(op)](node, ctx); +} + +static void +mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) +{ + if (node->ninputs() == 0) + { + for (const auto & other : node->region()->top_nodes) + { + if (&other != node && node->operation() == other.operation()) + { + ctx.mark(node, &other); + break; + } + } + return; + } + + auto set = ctx.set(node->input(0)->origin()); + for (const auto & origin : *set) + { + for (const auto & user : *origin) + { + auto ni = dynamic_cast(user); + auto other = ni ? ni->node() : nullptr; + if (!other || other == node || other->operation() != node->operation() + || other->ninputs() != node->ninputs()) + continue; + + size_t n; + for (n = 0; n < node->ninputs(); n++) + { + if (!ctx.congruent(node->input(n), other->input(n))) + break; + } + if (n == node->ninputs()) + ctx.mark(node, other); + } + } +} + +static void +mark(jlm::rvsdg::region * region, cnectx & ctx) +{ + for (const auto & node : jlm::rvsdg::topdown_traverser(region)) + { + if (auto simple = dynamic_cast(node)) + mark(simple, ctx); + else + mark(static_cast(node), ctx); + } +} + +/* divert phase */ + +static void +divert_users(jlm::rvsdg::output * output, cnectx & ctx) +{ + auto set = ctx.set(output); + for (auto & other : *set) + other->divert_users(output); + set->clear(); +} + +static void +divert_outputs(jlm::rvsdg::node * node, cnectx & ctx) +{ + for (size_t n = 0; n < node->noutputs(); n++) + divert_users(node->output(n), ctx); +} + +static void +divert_arguments(jlm::rvsdg::region * region, cnectx & ctx) +{ + for (size_t n = 0; n < region->narguments(); n++) + divert_users(region->argument(n), ctx); +} + +static void +divert(jlm::rvsdg::region *, cnectx &); + +static void +divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); + auto gamma = static_cast(node); + + for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) + { + for (size_t n = 0; n < ev->narguments(); n++) + divert_users(ev->argument(n), ctx); + } + + for (size_t r = 0; r < node->nsubregions(); r++) + divert(node->subregion(r), ctx); + + divert_outputs(node, ctx); +} + +static void +divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); + auto theta = static_cast(node); + auto subregion = node->subregion(0); + + for (const auto & lv : *theta) + { + JLM_ASSERT(ctx.set(lv->argument())->size() == ctx.set(lv)->size()); + divert_users(lv->argument(), ctx); + divert_users(lv, ctx); + } + + divert(subregion, ctx); +} + +static void +divert_loop(rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(rvsdg::is(node)); + auto subregion = node->subregion(0); + divert(subregion, ctx); +} + +static void +divert_lambda(jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); + + divert_arguments(node->subregion(0), ctx); + divert(node->subregion(0), ctx); +} + +static void +divert_phi(jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(is(node)); + + divert_arguments(node->subregion(0), ctx); + divert(node->subregion(0), ctx); +} + +static void +divert_delta(jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + JLM_ASSERT(jlm::rvsdg::is(node)); +} + +static void +divert(jlm::rvsdg::structural_node * node, cnectx & ctx) +{ + static std::unordered_map map( + { { std::type_index(typeid(jlm::rvsdg::gamma_op)), divert_gamma }, + { std::type_index(typeid(jlm::rvsdg::theta_op)), divert_theta }, + { std::type_index(typeid(jlm::hls::loop_op)), divert_loop }, + { typeid(llvm::lambda::operation), divert_lambda }, + { typeid(llvm::phi::operation), divert_phi }, + { typeid(llvm::delta::operation), divert_delta } }); + + auto & op = node->operation(); + JLM_ASSERT(map.find(typeid(op)) != map.end()); + map[typeid(op)](node, ctx); +} + +static void +divert(jlm::rvsdg::region * region, cnectx & ctx) +{ + for (const auto & node : jlm::rvsdg::topdown_traverser(region)) + { + if (auto simple = dynamic_cast(node)) + divert_outputs(simple, ctx); + else + divert(static_cast(node), ctx); + } +} + +static void +cne(jlm::llvm::RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) +{ + auto & graph = rm.Rvsdg(); + + cnectx ctx; + auto statistics = cnestat::Create(rm.SourceFileName()); + + statistics->start_mark_stat(graph); + mark(graph.root(), ctx); + statistics->end_mark_stat(); + + statistics->start_divert_stat(); + divert(graph.root(), ctx); + statistics->end_divert_stat(graph); + + statisticsCollector.CollectDemandedStatistics(std::move(statistics)); +} + +/* cne class */ + +cne::~cne() +{} + +void +cne::run(llvm::RvsdgModule & module, util::StatisticsCollector & statisticsCollector) +{ + hls::cne(module, statisticsCollector); +} + +} diff --git a/jlm/hls/opt/cne.hpp b/jlm/hls/opt/cne.hpp new file mode 100644 index 000000000..929476d82 --- /dev/null +++ b/jlm/hls/opt/cne.hpp @@ -0,0 +1,39 @@ +/* + * Copyright 2017 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_HLS_OPT_CNE_HPP +#define JLM_HLS_OPT_CNE_HPP + +#include + +namespace jlm::llvm +{ +class RvsdgModule; +} + +namespace jlm::hls +{ + +// FIXME +// The cne optimization should be generalized such that it can be used for both the LLVM and HLS +// backend. + +/** + * \brief Common Node Elimination + * This is mainly a copy of the CNE optimization in the LLVM backend with the addition of support + * for the hls::loop_op. + */ +class cne final : public llvm::optimization +{ +public: + virtual ~cne(); + + virtual void + run(llvm::RvsdgModule & module, jlm::util::StatisticsCollector & statisticsCollector) override; +}; + +} + +#endif From 372116cd0c2a908aa7e17dd167690f6032e605f0 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 13 Aug 2024 08:54:35 +0200 Subject: [PATCH 038/170] Add missing type check to argument constructor (#574) The argument constructor did not check whether the type of the provided input is the same as the handed in type. In the future, the constructor should be refactored such that we either hand in an input or a simple type, but not both at the same time. This would avoid this check altogether. Closes #555 --- jlm/rvsdg/region.cpp | 10 ++++++ tests/jlm/rvsdg/ArgumentTests.cpp | 53 +++++++++++++++++++++++++++++++ 2 files changed, 63 insertions(+) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index ca727700f..b927cf543 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -36,6 +36,11 @@ argument::argument( if (input->node() != region->node()) throw jlm::util::error("Argument cannot be added to input."); + if (input->type() != *Type()) + { + throw util::type_error(Type()->debug_string(), input->type().debug_string()); + } + input->arguments.push_back(this); } } @@ -52,6 +57,11 @@ argument::argument( if (input->node() != region->node()) throw jlm::util::error("Argument cannot be added to input."); + if (input->type() != *Type()) + { + throw util::type_error(Type()->debug_string(), input->type().debug_string()); + } + input->arguments.push_back(this); } } diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index 5eb885a78..8a6316d0f 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -46,3 +46,56 @@ ArgumentNodeMismatch() } JLM_UNIT_TEST_REGISTER("jlm/rvsdg/ArgumentTests-ArgumentNodeMismatch", ArgumentNodeMismatch) + +static int +ArgumentInputTypeMismatch() +{ + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + auto stateType = jlm::tests::statetype::Create(); + + jlm::rvsdg::graph rvsdg; + auto x = rvsdg.add_import({ valueType, "import" }); + + auto structuralNode = structural_node::create(rvsdg.root(), 1); + auto structuralInput = jlm::rvsdg::structural_input::create(structuralNode, x, valueType); + + // Act & Assert + bool exceptionWasCaught = false; + try + { + jlm::rvsdg::argument::create(structuralNode->subregion(0), structuralInput, stateType); + // The line below should not be executed as the line above is expected to throw an exception. + assert(false); + } + catch (type_error &) + { + exceptionWasCaught = true; + } + assert(exceptionWasCaught); + + exceptionWasCaught = false; + try + { + jlm::rvsdg::argument::create( + structuralNode->subregion(0), + structuralInput, + jlm::rvsdg::port(stateType)); + // The line below should not be executed as the line above is expected to throw an exception. + assert(false); + } + catch (type_error &) + { + exceptionWasCaught = true; + } + assert(exceptionWasCaught); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/ArgumentTests-ArgumentInputTypeMismatch", + ArgumentInputTypeMismatch) From a0b507a71df62fd982ffa1cf2560529cdf42b95d Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 13 Aug 2024 09:16:00 +0200 Subject: [PATCH 039/170] Add missing type check to result constructors (#575) The result constructors did not check whether the type of the provided output is the same as the handed in type. In the future, these constructors should be refactored such that we either hand in an output or a simple type, but not both at the same time. This would avoid this check altogether. Closes #556 --- jlm/rvsdg/region.cpp | 12 ++++++- tests/jlm/rvsdg/ResultTests.cpp | 57 +++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+), 1 deletion(-) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index b927cf543..330a0e5c5 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -111,6 +111,11 @@ result::result( if (output->node() != region->node()) throw jlm::util::error("Result cannot be added to output."); + if (*Type() != *output->Type()) + { + throw jlm::util::type_error(Type()->debug_string(), output->Type()->debug_string()); + } + output->results.push_back(this); } } @@ -128,6 +133,11 @@ result::result( if (output->node() != region->node()) throw jlm::util::error("Result cannot be added to output."); + if (*Type() != *output->Type()) + { + throw jlm::util::type_error(Type()->debug_string(), output->Type()->debug_string()); + } + output->results.push_back(this); } } @@ -151,7 +161,7 @@ result::create( jlm::rvsdg::structural_output * output, std::shared_ptr type) { - auto result = new jlm::rvsdg::result(region, origin, output, jlm::rvsdg::port(std::move(type))); + auto result = new jlm::rvsdg::result(region, origin, output, std::move(type)); region->append_result(result); return result; } diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index 16a0cd11c..f03e5d590 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -49,3 +49,60 @@ ResultNodeMismatch() } JLM_UNIT_TEST_REGISTER("jlm/rvsdg/ResultTests-ResultNodeMismatch", ResultNodeMismatch) + +static int +ResultInputTypeMismatch() +{ + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + auto stateType = jlm::tests::statetype::Create(); + + jlm::rvsdg::graph rvsdg; + + auto structuralNode = structural_node::create(rvsdg.root(), 1); + auto structuralOutput = jlm::rvsdg::structural_output::create(structuralNode, valueType); + + // Act & Assert + bool exceptionWasCaught = false; + try + { + auto simpleNode = test_op::create(structuralNode->subregion(0), {}, { stateType }); + jlm::rvsdg::result::create( + structuralNode->subregion(0), + simpleNode->output(0), + structuralOutput, + stateType); + // The line below should not be executed as the line above is expected to throw an exception. + assert(false); + } + catch (type_error &) + { + exceptionWasCaught = true; + } + assert(exceptionWasCaught); + + exceptionWasCaught = false; + try + { + auto simpleNode = test_op::create(structuralNode->subregion(0), {}, { stateType }); + jlm::rvsdg::result::create( + structuralNode->subregion(0), + simpleNode->output(0), + structuralOutput, + jlm::rvsdg::port(stateType)); + // The line below should not be executed as the line above is expected to throw an exception. + assert(false); + } + catch (type_error &) + { + exceptionWasCaught = true; + } + assert(exceptionWasCaught); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/ResultTests-ResultInputTypeMismatch", ResultInputTypeMismatch) From a7eb1c73edd232e89f48817639c3f3bd19762014 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Tue, 13 Aug 2024 10:22:53 +0200 Subject: [PATCH 040/170] [AndersenAgnostic] Use new Andersen Configuration setup and change how double checking is performed (#570) This PR is the first step towards getting all the Andersen techniques into main. It makes a couple of changes, without adding any new techniques: - Configurations have a text representation - Configurations are not specified via environment variable, but there is instead a function for getting all possible configurations. - Double-checking against a different configuration is cleaner and faster. `PointerObjectSet`s are cloned, solved separately, and compared directly after solving. - The template stuff used to invoke the worklist solver uses `std::visit` now, making dispatch much nicer. - Some additional statistics have been added - Re-orders some of the existing operations in the worklist solver. This is the order they ended up in in the final code when all techniques are added. - Added `Erase` to `HashSet`, which is a thin wrapper around `std::unordered_set::erase()`. - Register tests in `TestAndersen.cpp` individually. --- jlm/llvm/opt/alias-analyses/Andersen.cpp | 346 ++++++++++++------ jlm/llvm/opt/alias-analyses/Andersen.hpp | 97 +++-- .../opt/alias-analyses/PointerObjectSet.cpp | 313 +++++++++++----- .../opt/alias-analyses/PointerObjectSet.hpp | 63 +++- jlm/util/HashSet.hpp | 11 + jlm/util/Statistics.hpp | 2 +- .../llvm/opt/alias-analyses/TestAndersen.cpp | 239 ++++++++---- .../alias-analyses/TestPointerObjectSet.cpp | 23 +- tests/jlm/rvsdg/RegionTests.cpp | 2 +- 9 files changed, 762 insertions(+), 334 deletions(-) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index e91049daa..ead7f10c1 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -24,61 +24,88 @@ IsOrContainsPointerType(const rvsdg::type & type) return IsOrContains(type); } -Andersen::Configuration -Andersen::Configuration::DefaultConfiguration() +std::string +Andersen::Configuration::ToString() const { - Configuration config; - - const auto configString = std::getenv(ENV_CONFIG_OVERRIDE); - if (configString == nullptr) - return config; + std::ostringstream str; + if (EnableOfflineVariableSubstitution_) + str << "OVS_"; + if (EnableOfflineConstraintNormalization_) + str << "NORM_"; + if (Solver_ == Solver::Naive) + { + str << "Solver=Naive_"; + } + else if (Solver_ == Solver::Worklist) + { + str << "Solver=Worklist_"; + str << "Policy="; + str << PointerObjectConstraintSet::WorklistSolverPolicyToString(WorklistSolverPolicy_); + str << "_"; - std::istringstream configStream(configString); - std::string option; - while (true) + if (EnableOnlineCycleDetection_) + str << "OnlineCD_"; + } + else { - configStream >> option; - if (configStream.fail()) - break; + JLM_UNREACHABLE("Unknown solver type"); + } + + auto result = str.str(); + result.erase(result.size() - 1, 1); // Remove trailing '_' + return result; +} + +std::vector +Andersen::Configuration::GetAllConfigurations() +{ + std::vector configs; + auto PickOnlineCycleDetection = [&](Configuration config) + { + config.EnableOnlineCycleDetection(false); + configs.push_back(config); + config.EnableOnlineCycleDetection(true); + configs.push_back(config); + }; + auto PickWorklistPolicy = [&](Configuration config) + { using Policy = PointerObjectConstraintSet::WorklistSolverPolicy; + config.SetWorklistSolverPolicy(Policy::LeastRecentlyFired); + PickOnlineCycleDetection(config); + config.SetWorklistSolverPolicy(Policy::TwoPhaseLeastRecentlyFired); + PickOnlineCycleDetection(config); + config.SetWorklistSolverPolicy(Policy::LastInFirstOut); + PickOnlineCycleDetection(config); + config.SetWorklistSolverPolicy(Policy::FirstInFirstOut); + PickOnlineCycleDetection(config); + }; + auto PickOfflineNormalization = [&](Configuration config) + { + config.EnableOfflineConstraintNormalization(false); + configs.push_back(config); + config.EnableOfflineConstraintNormalization(true); + configs.push_back(config); + }; + auto PickSolver = [&](Configuration config) + { + config.SetSolver(Solver::Worklist); + PickWorklistPolicy(config); + config.SetSolver(Solver::Naive); + PickOfflineNormalization(config); + }; + auto PickOfflineVariableSubstitution = [&](Configuration config) + { + config.EnableOfflineVariableSubstitution(false); + PickSolver(config); + config.EnableOfflineVariableSubstitution(true); + PickSolver(config); + }; - if (option == CONFIG_OVS_ON) - config.EnableOfflineVariableSubstitution(true); - else if (option == CONFIG_OVS_OFF) - config.EnableOfflineVariableSubstitution(false); - - else if (option == CONFIG_NORMALIZE_ON) - config.EnableOfflineConstraintNormalization(true); - else if (option == CONFIG_NORMALIZE_OFF) - config.EnableOfflineConstraintNormalization(false); - - else if (option == CONFIG_SOLVER_NAIVE) - config.SetSolver(Solver::Naive); - else if (option == CONFIG_SOLVER_WL) - config.SetSolver(Solver::Worklist); - - else if (option == CONFIG_WL_POLICY_LRF) - config.SetWorklistSolverPolicy(Policy::LeastRecentlyFired); - else if (option == CONFIG_WL_POLICY_TWO_PHASE_LRF) - config.SetWorklistSolverPolicy(Policy::TwoPhaseLeastRecentlyFired); - else if (option == CONFIG_WL_POLICY_FIFO) - config.SetWorklistSolverPolicy(Policy::FirstInFirstOut); - else if (option == CONFIG_WL_POLICY_LIFO) - config.SetWorklistSolverPolicy(Policy::LastInFirstOut); - - else if (option == CONFIG_ONLINE_CYCLE_DETECTION_ON) - config.EnableOnlineCycleDetection(true); - else if (option == CONFIG_ONLINE_CYCLE_DETECTION_OFF) - config.EnableOnlineCycleDetection(false); - else - { - std::cerr << "Unknown config option string: '" << option << "'" << std::endl; - JLM_UNREACHABLE("Andersen default config override string broken"); - } - } + // Adds one configuration for all valid combinations of features + PickOfflineVariableSubstitution(NaiveSolverConfiguration()); - return config; + return configs; } /** @@ -86,33 +113,58 @@ Andersen::Configuration::DefaultConfiguration() */ class Andersen::Statistics final : public util::Statistics { - inline static const char * NumPointerObjects_ = "#PointerObjects"; - inline static const char * NumRegisterPointerObjects_ = "#RegisterPointerObjects"; - inline static const char * NumRegistersMappedToPointerObject_ = "#RegistersMappedToPointerObject"; - - inline static const char * NumSupersetConstraints_ = "#SupersetConstraints"; - inline static const char * NumStoreConstraints_ = "#StoreConstraints"; - inline static const char * NumLoadConstraints_ = "#LoadConstraints"; - inline static const char * NumFunctionCallConstraints_ = "#FunctionCallConstraints"; - - inline static const char * NumUnificationsOvs_ = "#Unifications(OVS)"; - inline static const char * NumConstraintsRemovedOfflineNorm_ = "#ConstraintsRemoved(OfflineNorm)"; - - inline static const char * NumNaiveSolverIterations_ = "#NaiveSolverIterations"; - - inline static const char * WorklistPolicy_ = "WorklistPolicy"; - inline static const char * NumWorklistSolverWorkItems_ = "#WorklistSolverWorkItems"; - inline static const char * NumOnlineCyclesDetected_ = "#OnlineCyclesDetected"; - inline static const char * NumOnlineCycleUnifications_ = "#OnlineCycleUnifications"; - - inline static const char * AnalysisTimer_ = "AnalysisTimer"; - inline static const char * SetAndConstraintBuildingTimer_ = "SetAndConstraintBuildingTimer"; - inline static const char * OfflineVariableSubstitutionTimer_ = "OVSTimer"; - inline static const char * OfflineConstraintNormalizationTimer_ = "OfflineNormTimer"; - inline static const char * ConstraintSolvingNaiveTimer_ = "ConstraintSolvingNaiveTimer"; - inline static const char * ConstraintSolvingWorklistTimer_ = "ConstraintSolvingWorklistTimer"; - inline static const char * PointsToGraphConstructionTimer_ = "PointsToGraphConstructionTimer"; - inline static const char * PointsToGraphConstructionExternalToEscapedTimer_ = + static constexpr const char * NumPointerObjects_ = "#PointerObjects"; + static constexpr const char * NumPointerObjectsWithImplicitPointees_ = + "#PointerObjectsWithImplicitPointees"; + static constexpr const char * NumRegisterPointerObjects_ = "#RegisterPointerObjects"; + static constexpr const char * NumRegistersMappedToPointerObject_ = + "#RegistersMappedToPointerObject"; + + static constexpr const char * NumBaseConstraints_ = "#BaseConstraints"; + static constexpr const char * NumSupersetConstraints_ = "#SupersetConstraints"; + static constexpr const char * NumStoreConstraints_ = "#StoreConstraints"; + static constexpr const char * NumLoadConstraints_ = "#LoadConstraints"; + static constexpr const char * NumFunctionCallConstraints_ = "#FunctionCallConstraints"; + static constexpr const char * NumFlagConstraints_ = "#FlagConstraints"; + + static constexpr const char * Configuration_ = "Configuration"; + + // Offline technique statistics + static constexpr const char * NumUnificationsOvs_ = "#Unifications(OVS)"; + static constexpr const char * NumConstraintsRemovedOfflineNorm_ = + "#ConstraintsRemoved(OfflineNorm)"; + + // Solver statistics + static constexpr const char * NumNaiveSolverIterations_ = "#NaiveSolverIterations"; + + static constexpr const char * WorklistPolicy_ = "WorklistPolicy"; + static constexpr const char * NumWorklistSolverWorkItemsPopped_ = + "#WorklistSolverWorkItemsPopped"; + static constexpr const char * NumWorklistSolverWorkItemsNewPointees_ = + "#WorklistSolverWorkItemsNewPointees"; + + // Online technique statistics + static constexpr const char * NumOnlineCyclesDetected_ = "#OnlineCyclesDetected"; + static constexpr const char * NumOnlineCycleUnifications_ = "#OnlineCycleUnifications"; + + // After solving statistics + static constexpr const char * NumEscapedMemoryObjects_ = "#EscapedMemoryObjects"; + static constexpr const char * NumUnificationRoots_ = "#UnificationRoots"; + // These next measurements only count flags and pointees of unification roots + static constexpr const char * NumPointsToExternalFlags_ = "#PointsToExternalFlags"; + static constexpr const char * NumPointeesEscapingFlags_ = "#PointeesEscapingFlags"; + static constexpr const char * NumExplicitPointees_ = "#ExplicitPointees"; + // If a pointee is both implicit (through PointsToExternal flag) and explicit + static constexpr const char * NumDoubledUpPointees_ = "#DoubledUpPointees"; + + static constexpr const char * AnalysisTimer_ = "AnalysisTimer"; + static constexpr const char * SetAndConstraintBuildingTimer_ = "SetAndConstraintBuildingTimer"; + static constexpr const char * OfflineVariableSubstitutionTimer_ = "OVSTimer"; + static constexpr const char * OfflineConstraintNormalizationTimer_ = "OfflineNormTimer"; + static constexpr const char * ConstraintSolvingNaiveTimer_ = "ConstraintSolvingNaiveTimer"; + static constexpr const char * ConstraintSolvingWorklistTimer_ = "ConstraintSolvingWorklistTimer"; + static constexpr const char * PointsToGraphConstructionTimer_ = "PointsToGraphConstructionTimer"; + static constexpr const char * PointsToGraphConstructionExternalToEscapedTimer_ = "PointsToGraphConstructionExternalToEscapedTimer"; public: @@ -143,6 +195,9 @@ class Andersen::Statistics final : public util::Statistics GetTimer(SetAndConstraintBuildingTimer_).stop(); AddMeasurement(NumPointerObjects_, set.NumPointerObjects()); + AddMeasurement( + NumPointerObjectsWithImplicitPointees_, + set.NumPointerObjectsWithImplicitPointees()); AddMeasurement( NumRegisterPointerObjects_, set.NumPointerObjectsOfKind(PointerObjectKind::Register)); @@ -159,10 +214,12 @@ class Andersen::Statistics final : public util::Statistics numLoadConstraints += std::holds_alternative(constraint); numFunctionCallConstraints += std::holds_alternative(constraint); } + AddMeasurement(NumBaseConstraints_, constraints.NumBaseConstraints()); AddMeasurement(NumSupersetConstraints_, numSupersetConstraints); AddMeasurement(NumStoreConstraints_, numStoreConstraints); AddMeasurement(NumLoadConstraints_, numLoadConstraints); AddMeasurement(NumFunctionCallConstraints_, numFunctionCallConstraints); + AddMeasurement(NumFlagConstraints_, constraints.NumFlagConstraints()); } void @@ -222,7 +279,8 @@ class Andersen::Statistics final : public util::Statistics PointerObjectConstraintSet::WorklistSolverPolicyToString(statistics.Policy)); // How many work items were popped from the worklist in total - AddMeasurement(NumWorklistSolverWorkItems_, statistics.NumWorkItemsPopped); + AddMeasurement(NumWorklistSolverWorkItemsPopped_, statistics.NumWorkItemsPopped); + AddMeasurement(NumWorklistSolverWorkItemsNewPointees_, statistics.NumWorkItemNewPointees); if (statistics.NumOnlineCyclesDetected) AddMeasurement(NumOnlineCyclesDetected_, *statistics.NumOnlineCyclesDetected); @@ -231,6 +289,53 @@ class Andersen::Statistics final : public util::Statistics AddMeasurement(NumOnlineCycleUnifications_, *statistics.NumOnlineCycleUnifications); } + void + AddStatisticFromConfiguration(const Configuration & config) + { + AddMeasurement(Configuration_, config.ToString()); + } + + void + AddStatisticsFromSolution(const PointerObjectSet & set) + { + size_t numEscapedMemoryObjects = 0; + size_t numUnificationRoots = 0; + size_t numPointsToExternalFlags = 0; + size_t numPointeesEscapingFlags = 0; + size_t numExplicitPointees = 0; + size_t numDoubleUpPointees = 0; + + for (PointerObjectIndex i = 0; i < set.NumPointerObjects(); i++) + { + if (set.HasEscaped(i)) + numEscapedMemoryObjects++; + + if (!set.IsUnificationRoot(i)) + continue; + + numUnificationRoots++; + if (set.IsPointingToExternal(i)) + numPointsToExternalFlags++; + if (set.HasPointeesEscaping(i)) + numPointeesEscapingFlags++; + + const auto & pointees = set.GetPointsToSet(i); + numExplicitPointees += pointees.Size(); + + // If the PointsToExternal flag is set, any explicit pointee that has escaped is doubled up + if (set.IsPointingToExternal(i)) + for (auto pointee : pointees.Items()) + if (set.HasEscaped(pointee)) + numDoubleUpPointees++; + } + AddMeasurement(NumEscapedMemoryObjects_, numEscapedMemoryObjects); + AddMeasurement(NumUnificationRoots_, numUnificationRoots); + AddMeasurement(NumPointsToExternalFlags_, numPointsToExternalFlags); + AddMeasurement(NumPointeesEscapingFlags_, numPointeesEscapingFlags); + AddMeasurement(NumExplicitPointees_, numExplicitPointees); + AddMeasurement(NumDoubledUpPointees_, numDoubleUpPointees); + } + void StartPointsToGraphConstructionStatistics() { @@ -933,32 +1038,37 @@ Andersen::AnalyzeModule(const RvsdgModule & module, Statistics & statistics) } void -Andersen::SolveConstraints(const Configuration & config, Statistics & statistics) +Andersen::SolveConstraints( + PointerObjectConstraintSet & constraints, + const Configuration & config, + Statistics & statistics) { + statistics.AddStatisticFromConfiguration(config); + if (config.IsOfflineVariableSubstitutionEnabled()) { statistics.StartOfflineVariableSubstitution(); - auto numUnifications = Constraints_->PerformOfflineVariableSubstitution(); + auto numUnifications = constraints.PerformOfflineVariableSubstitution(); statistics.StopOfflineVariableSubstitution(numUnifications); } if (config.IsOfflineConstraintNormalizationEnabled()) { statistics.StartOfflineConstraintNormalization(); - auto numConstraintsRemoved = Constraints_->NormalizeConstraints(); + auto numConstraintsRemoved = constraints.NormalizeConstraints(); statistics.StopOfflineConstraintNormalization(numConstraintsRemoved); } if (config.GetSolver() == Configuration::Solver::Naive) { statistics.StartConstraintSolvingNaiveStatistics(); - size_t numIterations = Constraints_->SolveNaively(); + size_t numIterations = constraints.SolveNaively(); statistics.StopConstraintSolvingNaiveStatistics(numIterations); } else if (config.GetSolver() == Configuration::Solver::Worklist) { statistics.StartConstraintSolvingWorklistStatistics(); - auto worklistStatistics = Constraints_->SolveUsingWorklist( + auto worklistStatistics = constraints.SolveUsingWorklist( config.GetWorklistSoliverPolicy(), config.IsOnlineCycleDetectionEnabled()); statistics.StopConstraintSolvingWorklistStatistics(worklistStatistics); @@ -974,29 +1084,30 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis statistics->StartAndersenStatistics(module.Rvsdg()); // Check environment variables for debugging flags - const bool checkAgainstNaive = - std::getenv(ENV_COMPARE_SOLVE_NAIVE) && Config_ != Configuration::NaiveSolverConfiguration(); + const bool testAllConfigs = std::getenv(ENV_TEST_ALL_CONFIGS); + const bool doubleCheck = std::getenv(ENV_DOUBLE_CHECK); + const bool dumpGraphs = std::getenv(ENV_DUMP_SUBSET_GRAPH); util::GraphWriter writer; AnalyzeModule(module, *statistics); - // If double-checking against the naive solver, make a copy of the original constraint set + // If solving multiple times, make a copy of the original constraint set std::pair, std::unique_ptr> copy; - if (checkAgainstNaive) + if (testAllConfigs || doubleCheck) copy = Constraints_->Clone(); // Draw subset graph both before and after solving if (dumpGraphs) Constraints_->DrawSubsetGraph(writer); - SolveConstraints(Config_, *statistics); + SolveConstraints(*Constraints_, Config_, *statistics); + statistics->AddStatisticsFromSolution(*Set_); if (dumpGraphs) { auto & graph = Constraints_->DrawSubsetGraph(writer); graph.AppendToLabel("After Solving"); - writer.OutputAllGraphs(std::cout, util::GraphOutputFormat::Dot); } @@ -1006,38 +1117,45 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis statisticsCollector.CollectDemandedStatistics(std::move(statistics)); // Solve again if double-checking against naive is enabled - if (checkAgainstNaive) + if (testAllConfigs || doubleCheck) { - std::cerr << "Double checking Andersen analysis using naive solving" << std::endl; - // Restore the problem to before solving started - Set_ = std::move(copy.first); - Constraints_ = std::move(copy.second); - - // Create a separate Statistics instance for naive statistics - auto naiveStatistics = Statistics::Create(module.SourceFileName()); - SolveConstraints(Configuration::NaiveSolverConfiguration(), *naiveStatistics); + if (doubleCheck) + std::cerr << "Double checking Andersen analysis using naive solving" << std::endl; - auto naiveResult = ConstructPointsToGraphFromPointerObjectSet(*Set_, *naiveStatistics); + // If only double-checking, use the naive configuration + std::vector configs; + if (testAllConfigs) + configs = Configuration::GetAllConfigurations(); + else + configs.push_back(Configuration::NaiveSolverConfiguration()); - statisticsCollector.CollectDemandedStatistics(std::move(naiveStatistics)); + // If testing all, benchmarking is being done, so do 50 iterations of all configurations. + // Double-checking against Set_ only needs to be done once per configuration + const auto iterations = testAllConfigs ? 50 : 1; - // Check if the PointsToGraphs are identical - bool error = false; - if (!naiveResult->IsSupergraphOf(*result)) - { - std::cerr << "The naive PointsToGraph is NOT a supergraph of the PointsToGraph" << std::endl; - error = true; - } - if (!result->IsSupergraphOf(*naiveResult)) - { - std::cerr << "The PointsToGraph is NOT a supergraph of the naive PointsToGraph" << std::endl; - error = true; - } - if (error) + for (auto i = 0; i < iterations; i++) { - std::cout << PointsToGraph::ToDot(*result) << std::endl; - std::cout << PointsToGraph::ToDot(*naiveResult) << std::endl; - JLM_UNREACHABLE("PointsToGraph double checking uncovered differences!"); + for (const auto & config : configs) + { + // Create a clone of the unsolved pointer object set and constraint set + auto workingCopy = copy.second->Clone(); + // These statistics will only contain solving data + auto solvingStats = Statistics::Create(module.SourceFileName()); + SolveConstraints(*workingCopy.second, config, *solvingStats); + solvingStats->AddStatisticsFromSolution(*workingCopy.first); + statisticsCollector.CollectDemandedStatistics(std::move(solvingStats)); + + // Only double check on the first iteration + if (doubleCheck && i == 0) + { + if (workingCopy.first->HasIdenticalSolAs(*Set_)) + continue; + std::cerr << "Solving with original config: " << Config_.ToString() + << " did not produce the same solution as the config " << config.ToString() + << std::endl; + JLM_UNREACHABLE("Andersen solver double checking uncovered differences!"); + } + } } } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index b2bf90ce3..2618db14d 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -32,35 +32,22 @@ class Andersen final : public AliasAnalysis public: /** - * Environment variable that will trigger double checking of the analysis, - * by running analysis again with the naive solver and no extra processing. - * Any differences in the produced PointsToGraph result in an error. + * Environment variable that when set, triggers analyzing the program with every single + * valid combination of Configuration flags. */ - static inline const char * const ENV_COMPARE_SOLVE_NAIVE = "JLM_ANDERSEN_COMPARE_SOLVE_NAIVE"; + static inline const char * const ENV_TEST_ALL_CONFIGS = "JLM_ANDERSEN_TEST_ALL_CONFIGS"; /** - * Environment variable that will trigger dumping the subset graph before and after solving. + * Environment variable that will trigger double checking of the analysis. + * If ENV_TEST_ALL_CONFIGS is set, the output is double checked against them all. + * Otherwise, the output is double checked only against the default naive solver. */ - static inline const char * const ENV_DUMP_SUBSET_GRAPH = "JLM_ANDERSEN_DUMP_SUBSET_GRAPH"; + static inline const char * const ENV_DOUBLE_CHECK = "JLM_ANDERSEN_DOUBLE_CHECK"; /** - * Environment variable for overriding the default configuration. - * The variable should something look like - * "+OVS +Normalize -OnlineCD Solver=Worklist WLPolicy=LRF" + * Environment variable that will trigger dumping the subset graph before and after solving. */ - static inline const char * const ENV_CONFIG_OVERRIDE = "JLM_ANDERSEN_CONFIG_OVERRIDE"; - static inline const char * const CONFIG_OVS_ON = "+OVS"; - static inline const char * const CONFIG_OVS_OFF = "-OVS"; - static inline const char * const CONFIG_NORMALIZE_ON = "+Normalize"; - static inline const char * const CONFIG_NORMALIZE_OFF = "-Normalize"; - static inline const char * const CONFIG_SOLVER_WL = "Solver=Worklist"; - static inline const char * const CONFIG_SOLVER_NAIVE = "Solver=Naive"; - static inline const char * const CONFIG_WL_POLICY_LRF = "WLPolicy=LRF"; - static inline const char * const CONFIG_WL_POLICY_TWO_PHASE_LRF = "WLPolicy=2LRF"; - static inline const char * const CONFIG_WL_POLICY_FIFO = "WLPolicy=FIFO"; - static inline const char * const CONFIG_WL_POLICY_LIFO = "WLPolicy=LIFO"; - static inline const char * const CONFIG_ONLINE_CYCLE_DETECTION_ON = "+OnlineCD"; - static inline const char * const CONFIG_ONLINE_CYCLE_DETECTION_OFF = "-OnlineCD"; + static inline const char * const ENV_DUMP_SUBSET_GRAPH = "JLM_ANDERSEN_DUMP_SUBSET_GRAPH"; /** * class for configuring the Andersen pass, such as what solver to use. @@ -77,20 +64,6 @@ class Andersen final : public AliasAnalysis Worklist }; - [[nodiscard]] bool - operator==(const Configuration & other) const noexcept - { - return EnableOfflineVariableSubstitution_ == other.EnableOfflineVariableSubstitution_ - && EnableOfflineConstraintNormalization_ == other.EnableOfflineConstraintNormalization_ - && Solver_ == other.Solver_ && WorklistSolverPolicy_ == other.WorklistSolverPolicy_; - } - - [[nodiscard]] bool - operator!=(const Configuration & other) const noexcept - { - return !operator==(other); - } - /** * Sets which solver algorithm to use. * Not all solvers are compatible with all online techniques. @@ -161,7 +134,6 @@ class Andersen final : public AliasAnalysis /** * Enables or disables online cycle detection in the Worklist solver, as described by * Pearce, 2003: "Online cycle detection and difference propagation for pointer analysis" - * Only used by the worklist solver. * It detects all cycles, so it can not be combined with other cycle detection techniques. */ void @@ -176,12 +148,25 @@ class Andersen final : public AliasAnalysis return EnableOnlineCycleDetection_; } + [[nodiscard]] std::string + ToString() const; + /** - * Creates the default Andersen constraint set solver configuration - * @return the solver configuration + * @return the default configuration */ - [[nodiscard]] static Configuration - DefaultConfiguration(); + static Configuration + DefaultConfiguration() + { + Configuration config; + config.EnableOfflineVariableSubstitution(true); + // Constraints are normalized inside the Worklist's representation either way + config.EnableOfflineConstraintNormalization(false); + config.SetSolver(Solver::Worklist); + config.SetWorklistSolverPolicy( + PointerObjectConstraintSet::WorklistSolverPolicy::LeastRecentlyFired); + config.EnableOnlineCycleDetection(false); + return config; + } /** * Creates a solver configuration using the naive solver, @@ -191,21 +176,28 @@ class Andersen final : public AliasAnalysis [[nodiscard]] static Configuration NaiveSolverConfiguration() noexcept { - auto config = Configuration(); + Configuration config; config.EnableOfflineVariableSubstitution(false); config.EnableOfflineConstraintNormalization(false); config.SetSolver(Solver::Naive); - config.EnableOnlineCycleDetection(false); return config; } + /** + * @return a list containing all possible Configurations, + * avoiding useless combinations of techniques. + */ + [[nodiscard]] static std::vector + GetAllConfigurations(); + private: - bool EnableOfflineVariableSubstitution_ = true; - bool EnableOfflineConstraintNormalization_ = true; - Solver Solver_ = Solver::Worklist; + // All techniques are turned off by default + bool EnableOfflineVariableSubstitution_ = false; + bool EnableOfflineConstraintNormalization_ = false; + Solver Solver_ = Solver::Naive; PointerObjectConstraintSet::WorklistSolverPolicy WorklistSolverPolicy_ = PointerObjectConstraintSet::WorklistSolverPolicy::LeastRecentlyFired; - bool EnableOnlineCycleDetection_ = true; + bool EnableOnlineCycleDetection_ = false; }; ~Andersen() noexcept override = default; @@ -362,13 +354,16 @@ class Andersen final : public AliasAnalysis AnalyzeModule(const RvsdgModule & module, Statistics & statistics); /** - * Works with the members Set_ and Constraints_, and solves the constraint problem - * using the techniques and solver specified in the given configuration + * Solves the constraint problem using the techniques and solver specified in the given config. + * @param constraints the instance of PointerObjectConstraintSet being operated on * @param config settings for the solving * @param statistics the Statistics instance used to track info about the analysis */ - void - SolveConstraints(const Configuration & config, Statistics & statistics); + static void + SolveConstraints( + PointerObjectConstraintSet & constraints, + const Configuration & config, + Statistics & statistics); Configuration Config_ = Configuration::DefaultConfiguration(); diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index ba8d94d71..c103da20b 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -7,7 +7,6 @@ #include #include -#include #include #include @@ -45,6 +44,17 @@ PointerObjectSet::NumPointerObjects() const noexcept return PointerObjects_.size(); } +size_t +PointerObjectSet::NumPointerObjectsWithImplicitPointees() const noexcept +{ + size_t count = 0; + for (auto & pointerObject : PointerObjects_) + { + count += pointerObject.CanTrackPointeesImplicitly(); + } + return count; +} + size_t PointerObjectSet::NumPointerObjectsOfKind(PointerObjectKind kind) const noexcept { @@ -264,6 +274,13 @@ PointerObjectSet::MarkAsPointingToExternal(PointerObjectIndex index) return true; } +bool +PointerObjectSet::CanTrackPointeesImplicitly(PointerObjectIndex index) const noexcept +{ + auto root = GetUnificationRoot(index); + return PointerObjects_[root].CanTrackPointeesImplicitly(); +} + PointerObjectIndex PointerObjectSet::GetUnificationRoot(PointerObjectIndex index) const noexcept { @@ -367,12 +384,62 @@ PointerObjectSet::MakePointsToSetSuperset(PointerObjectIndex superset, PointerOb return modified; } +bool +PointerObjectSet::IsPointingTo(PointerObjectIndex pointer, PointerObjectIndex pointee) const +{ + // Check if it is an implicit pointee + if (IsPointingToExternal(pointer) && HasEscaped(pointee)) + { + return true; + } + + // Otherwise, check if it is an explicit pointee + if (GetPointsToSet(pointer).Contains(pointee)) + { + return true; + } + + return false; +} + std::unique_ptr PointerObjectSet::Clone() const { return std::make_unique(*this); } +bool +PointerObjectSet::HasIdenticalSolAs(const PointerObjectSet & other) const +{ + if (NumPointerObjects() != other.NumPointerObjects()) + return false; + + // Check that each pointer object has the same Sol set in both sets + for (PointerObjectIndex i = 0; i < NumPointerObjects(); i++) + { + // Either i escapes in both sets, or in neither set + if (HasEscaped(i) != other.HasEscaped(i)) + return false; + + // Either i points to external in both sets, or in neither set + if (IsPointingToExternal(i) != other.IsPointingToExternal(i)) + return false; + + // Each explicit pointee of i in one set, should also be a pointee of i in the opposite set + for (auto thisPointee : GetPointsToSet(i).Items()) + { + if (!other.IsPointingTo(i, thisPointee)) + return false; + } + for (auto otherPointee : other.GetPointsToSet(i).Items()) + { + if (!IsPointingTo(i, otherPointee)) + return false; + } + } + return true; +} + // Makes P(superset) a superset of P(subset) bool SupersetConstraint::ApplyDirectly(PointerObjectSet & set) @@ -765,6 +832,38 @@ PointerObjectConstraintSet::GetConstraints() const noexcept return Constraints_; } +size_t +PointerObjectConstraintSet::NumBaseConstraints() const noexcept +{ + size_t numBaseConstraints = 0; + for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) + { + if (Set_.IsUnificationRoot(i)) + numBaseConstraints += Set_.GetPointsToSet(i).Size(); + } + return numBaseConstraints; +} + +size_t +PointerObjectConstraintSet::NumFlagConstraints() const noexcept +{ + size_t numFlagConstraints = 0; + for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) + { + if (Set_.HasEscaped(i)) + numFlagConstraints++; + + if (!Set_.IsUnificationRoot(i)) + continue; + + if (Set_.IsPointingToExternal(i)) + numFlagConstraints++; + if (Set_.HasPointeesEscaping(i)) + numFlagConstraints++; + } + return numFlagConstraints; +} + /** * Creates a label describing the PointerObject with the given \p index in the given \p set. * The label includes the index and the PointerObjectKind. @@ -1006,9 +1105,9 @@ PointerObjectConstraintSet::CreateOvsSubsetGraph() std::vector> successors(totalNodeCount); std::vector isDirectNode(totalNodeCount, false); - // Initialize all registers as direct nodes + // Nodes representing registers can be direct nodes, but only if they have empty points-to sets for (auto [_, index] : Set_.GetRegisterMap()) - isDirectNode[index] = true; + isDirectNode[index] = Set_.GetPointsToSet(index).IsEmpty(); // Mark all function argument register nodes as not direct for (auto [lambda, _] : Set_.GetFunctionMap()) @@ -1029,7 +1128,8 @@ PointerObjectConstraintSet::CreateOvsSubsetGraph() auto superset = Set_.GetUnificationRoot(supersetConstraint->GetSuperset()); successors[subset].Insert(superset); - // Also add an edge for *subset -> *superset + // Also add an edge for *subset -> *superset, from the original OVS paper. + // It is not mentioned in Hardekopf and Lin, 2007: The Ant and the Grasshopper. successors[subset + derefNodeOffset].Insert(superset + derefNodeOffset); } else if (auto * storeConstraint = std::get_if(&constraint)) @@ -1264,6 +1364,9 @@ template void PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { + // Check that the provided worklist implementation inherits from Worklist + static_assert(std::is_base_of_v, Worklist>); + // Create auxiliary subset graph. // All edges must have their tail be a unification root (non-root nodes have no successors). // If supersetEdges[x] contains y, (x -> y), that means P(y) supseteq P(x) @@ -1312,6 +1415,15 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } } + // Makes superset point to everything subset points to, and propagates the PointsToEscaped flag. + // Returns true if any pointees were new, or the flag was new. + // Does not add superset to the worklist. + const auto & MakePointsToSetSuperset = [&](PointerObjectIndex superset, + PointerObjectIndex subset) -> bool + { + return Set_.MakePointsToSetSuperset(superset, subset); + }; + // Performs unification safely while the worklist algorithm is running. // Ensures all constraints end up being owned by the new root. // It does NOT redirect constraints owned by other nodes, referencing a or b. @@ -1370,6 +1482,22 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) worklist.PushWorkItem(i); } + // Helper function for marking a PointerObject such that all its pointees will escape + const auto MarkAsPointeesEscaping = [&](PointerObjectIndex index) + { + index = Set_.GetUnificationRoot(index); + if (Set_.MarkAsPointeesEscaping(index)) + worklist.PushWorkItem(index); + }; + + // Helper function for flagging a pointer as pointing to external. Adds to the worklist if changed + const auto MarkAsPointsToExternal = [&](PointerObjectIndex index) + { + index = Set_.GetUnificationRoot(index); + if (Set_.MarkAsPointingToExternal(index)) + worklist.PushWorkItem(index); + }; + // Helper function for adding superset edges, propagating everything currently in the subset. // The superset's root is added to the worklist if its points-to set or flags are changed. const auto AddSupersetEdge = [&](PointerObjectIndex superset, PointerObjectIndex subset) @@ -1398,7 +1526,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } // A new edge was added, propagate points to-sets. If the superset changes, add to the worklist - if (Set_.MakePointsToSetSuperset(superset, subset)) + if (MakePointsToSetSuperset(superset, subset)) worklist.PushWorkItem(superset); }; @@ -1420,22 +1548,6 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) newSupersetEdges.Clear(); }; - // Helper function for marking a PointerObject such that all its pointees will escape - const auto MarkAsPointeesEscaping = [&](PointerObjectIndex index) - { - index = Set_.GetUnificationRoot(index); - if (Set_.MarkAsPointeesEscaping(index)) - worklist.PushWorkItem(index); - }; - - // Helper function for flagging a pointer as pointing to external. Adds to the worklist if changed - const auto MarkAsPointsToExternal = [&](PointerObjectIndex index) - { - index = Set_.GetUnificationRoot(index); - if (Set_.MarkAsPointingToExternal(index)) - worklist.PushWorkItem(index); - }; - // Ensure that all functions that have already escaped have informed their arguments and results // The worklist will only inform functions if their HasEscaped flag changes EscapedFunctionConstraint::PropagateEscapedFunctionsDirectly(Set_); @@ -1456,11 +1568,66 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if (!Set_.IsUnificationRoot(node)) return; + auto & nodePointees = Set_.GetPointsToSet(node); + statistics.NumWorkItemNewPointees += nodePointees.Size(); + + // If n is marked as PointeesEscaping, add the escaped flag to all pointees + if (Set_.HasPointeesEscaping(node)) + { + for (const auto pointee : nodePointees.Items()) + { + const auto pointeeRoot = Set_.GetUnificationRoot(pointee); + + // Marking a node as escaped will imply two flags on the unification root: + // - PointeesEscaping + // - PointsToExternal + const bool rootAlreadyHasFlags = + Set_.HasPointeesEscaping(pointeeRoot) && Set_.IsPointingToExternal(pointeeRoot); + + // Mark the pointee itself as escaped, not the pointee's unifiction root! + if (!Set_.MarkAsEscaped(pointee)) + continue; + + // If the PointerObject we just marked as escaped is a function, inform it about escaping + if (Set_.GetPointerObjectKind(pointee) == PointerObjectKind::FunctionMemoryObject) + HandleEscapedFunction(Set_, pointee, MarkAsPointeesEscaping, MarkAsPointsToExternal); + + // If the pointee's unification root previously didn't have both the flags implied by + // having one of the unification members escaping, add the root to the worklist + if (!rootAlreadyHasFlags) + { + JLM_ASSERT(Set_.HasPointeesEscaping(pointeeRoot)); + JLM_ASSERT(Set_.IsPointingToExternal(pointeeRoot)); + worklist.PushWorkItem(pointeeRoot); + } + } + } + + // Propagate P(n) along all edges n -> superset + auto supersets = supersetEdges[node].Items(); + for (auto it = supersets.begin(); it != supersets.end();) + { + const auto supersetParent = Set_.GetUnificationRoot(*it); + + // Remove self-edges + if (supersetParent == node) + { + it = supersetEdges[node].Erase(it); + continue; + } + + // The current it-edge should be kept as is, prepare "it" for the next iteration. + ++it; + + if (MakePointsToSetSuperset(supersetParent, node)) + worklist.PushWorkItem(supersetParent); + } + // Stores on the form *n = value. for (const auto value : storeConstraints[node].Items()) { // This loop ensures *P(n) supseteq P(value) - for (const auto pointee : Set_.GetPointsToSet(node).Items()) + for (const auto pointee : nodePointees.Items()) QueueNewSupersetEdge(pointee, value); // If P(n) contains "external", the contents of the written value escapes @@ -1472,7 +1639,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) for (const auto value : loadConstraints[node].Items()) { // This loop ensures P(value) supseteq *P(n) - for (const auto pointee : Set_.GetPointsToSet(node).Items()) + for (const auto pointee : nodePointees.Items()) QueueNewSupersetEdge(value, pointee); // If P(n) contains "external", the loaded value may also point to external @@ -1484,7 +1651,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) for (const auto callNode : callConstraints[node].Items()) { // Connect the inputs and outputs of the callNode to every possible function pointee - for (const auto pointee : Set_.GetPointsToSet(node).Items()) + for (const auto pointee : nodePointees.Items()) { const auto kind = Set_.GetPointerObjectKind(pointee); if (kind == PointerObjectKind::ImportMemoryObject) @@ -1507,47 +1674,6 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) MarkAsPointsToExternal); } - // Propagate P(n) along all edges n -> superset - for (const auto superset : supersetEdges[node].Items()) - { - // FIXME: replace supersets by their unification root, to remove duplicate edges - const auto supersetParent = Set_.GetUnificationRoot(superset); - if (Set_.MakePointsToSetSuperset(supersetParent, node)) - worklist.PushWorkItem(supersetParent); - } - - // If n is marked as PointeesEscaping, add the escaped flag to all pointees - if (Set_.HasPointeesEscaping(node)) - { - for (const auto pointee : Set_.GetPointsToSet(node).Items()) - { - const auto pointeeRoot = Set_.GetUnificationRoot(pointee); - - // Marking a node as escaped will imply two flags on the unification root: - // - PointeesEscaping - // - PointsToExternal - const bool rootAlreadyHasFlags = - Set_.HasPointeesEscaping(pointeeRoot) && Set_.IsPointingToExternal(pointeeRoot); - - // Mark the pointee itself as escaped, not the pointee's unifiction root! - if (!Set_.MarkAsEscaped(pointee)) - continue; - - // If the PointerObject we just marked as escaped is a function, inform it about escaping - if (Set_.GetPointerObjectKind(pointee) == PointerObjectKind::FunctionMemoryObject) - HandleEscapedFunction(Set_, pointee, MarkAsPointeesEscaping, MarkAsPointsToExternal); - - // If the pointee's unification root previously didn't have both the flags implied by - // having one of the unification members escaping, add the root to the worklist - if (!rootAlreadyHasFlags) - { - JLM_ASSERT(Set_.HasPointeesEscaping(pointeeRoot)); - JLM_ASSERT(Set_.IsPointingToExternal(pointeeRoot)); - worklist.PushWorkItem(pointeeRoot); - } - } - } - // Add all new superset edges, which also propagates points-to sets immediately // and possibly performs unifications to eliminate cycles. // Any unified nodes, or nodes with updated points-to sets, are added to the worklist. @@ -1570,41 +1696,44 @@ PointerObjectConstraintSet::SolveUsingWorklist( bool enableOnlineCycleDetection) { - auto DispatchOnlineCycleDetection = [&](auto enabled) + // Takes all parameters as compile time types. + // tWorklist is a pointer to one of the Worklist implementations. + // the rest are instances of std::bool_constant, either std::true_type or std::false_type + const auto Dispatch = [&](auto tWorklist, auto tOnlineCycleDetection) -> WorklistStatistics { - constexpr bool enableOnlineCycleDetection = decltype(enabled)::value; + using Worklist = std::remove_pointer_t; + constexpr bool vOnlineCycleDetection = decltype(tOnlineCycleDetection)::value; WorklistStatistics statistics(policy); - if (policy == WorklistSolverPolicy::LeastRecentlyFired) - { - RunWorklistSolver, enableOnlineCycleDetection>( - statistics); - } - else if (policy == WorklistSolverPolicy::TwoPhaseLeastRecentlyFired) - { - RunWorklistSolver, enableOnlineCycleDetection>( - statistics); - } - else if (policy == WorklistSolverPolicy::FirstInFirstOut) - { - RunWorklistSolver, enableOnlineCycleDetection>( - statistics); - } - else if (policy == WorklistSolverPolicy::LastInFirstOut) - { - RunWorklistSolver, enableOnlineCycleDetection>( - statistics); - } - else - JLM_UNREACHABLE("Unknown WorklistSolverPolicy"); - + RunWorklistSolver(statistics); return statistics; }; + std::variant< + typename util::LrfWorklist *, + typename util::TwoPhaseLrfWorklist *, + typename util::LifoWorklist *, + typename util::FifoWorklist *> + policyVariant; + + if (policy == WorklistSolverPolicy::LeastRecentlyFired) + policyVariant = (util::LrfWorklist *)nullptr; + else if (policy == WorklistSolverPolicy::TwoPhaseLeastRecentlyFired) + policyVariant = (util::TwoPhaseLrfWorklist *)nullptr; + else if (policy == WorklistSolverPolicy::LastInFirstOut) + policyVariant = (util::LifoWorklist *)nullptr; + else if (policy == WorklistSolverPolicy::FirstInFirstOut) + policyVariant = (util::FifoWorklist *)nullptr; + else + JLM_UNREACHABLE("Unknown worklist policy"); + + std::variant onlineCycleDetectionVariant; if (enableOnlineCycleDetection) - return DispatchOnlineCycleDetection(std::true_type{}); + onlineCycleDetectionVariant = std::true_type{}; else - return DispatchOnlineCycleDetection(std::false_type{}); + onlineCycleDetectionVariant = std::false_type{}; + + return std::visit(Dispatch, policyVariant, onlineCycleDetectionVariant); } const char * diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index cf45da546..d2994938a 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -166,6 +166,12 @@ class PointerObjectSet final [[nodiscard]] size_t NumPointerObjects() const noexcept; + /** + * @return the number of PointerObjects where CanTrackPointeesImplicitly() is true + */ + [[nodiscard]] size_t + NumPointerObjectsWithImplicitPointees() const noexcept; + /** * @return the number of PointerObjects in the set matching the specified \p kind. */ @@ -335,6 +341,14 @@ class PointerObjectSet final bool MarkAsPointingToExternal(PointerObjectIndex index); + /** + * @return true if the PointerObject with the given \p index is flagged as both + * PointsToExternal and PointeesEscaping. + * In that case, any explicit pointee will also be implicit, so it is better to avoid explicit. + */ + [[nodiscard]] bool + CanTrackPointeesImplicitly(PointerObjectIndex index) const noexcept; + /** * @return the root in the unification the PointerObject with the given \p index belongs to. * PointerObjects that have not been unified will always be their own root. @@ -381,7 +395,8 @@ class PointerObjectSet final AddToPointsToSet(PointerObjectIndex pointer, PointerObjectIndex pointee); /** - * Makes P(\p superset) a superset of P(\p subset), by adding any elements in the set difference + * Makes P(\p superset) a superset of P(\p subset), by adding any elements in the set difference. + * Also propagates the PointsToExternal flag. * @param superset the index of the PointerObject that shall point to everything subset points to * @param subset the index of the PointerObject whose pointees shall all be pointed to by superset * as well @@ -391,6 +406,14 @@ class PointerObjectSet final bool MakePointsToSetSuperset(PointerObjectIndex superset, PointerObjectIndex subset); + /** + * @param pointer the PointerObject possibly pointing to \p pointee + * @param pointee the PointerObject possibly being pointed at + * @return true if \p pointer points to \p pointee, either explicitly, implicitly, or both. + */ + bool + IsPointingTo(PointerObjectIndex pointer, PointerObjectIndex pointee) const; + /** * Creates a clone of this PointerObjectSet, with all the same PointerObjects, * flags, unifications and points-to sets. @@ -398,6 +421,17 @@ class PointerObjectSet final */ [[nodiscard]] std::unique_ptr Clone() const; + + /** + * Compares the Sol sets of all PointerObjects between two PointerObjectSets. + * Assumes that this and \p other represent the same set of PointerObjects, and in the same order. + * Only the final Sol set of each PointerObject matters, so unifications do not need to match. + * The set of escaped PointerObjects must match. + * @param other the set being compared to + * @return true if this and \p other are identical, false otherwise + */ + [[nodiscard]] bool + HasIdenticalSolAs(const PointerObjectSet & other) const; }; /** @@ -756,6 +790,12 @@ class PointerObjectConstraintSet final */ size_t NumWorkItemsPopped{}; + /** + * The sum of the number of new pointees, for each visited work item. + * If Difference Propagation is not enabled, all pointees are always regarded as new. + */ + size_t NumWorkItemNewPointees{}; + /** * The number of cycles detected by online cycle detection, if enabled. */ @@ -823,11 +863,23 @@ class PointerObjectConstraintSet final AddConstraint(ConstraintVariant c); /** - * Retrieves all added constraints that were not simple one-off flag changes + * @return all added constraints that were not simple one-off pointee inclusions or flag changes */ [[nodiscard]] const std::vector & GetConstraints() const noexcept; + /** + * @return the number of base constraints + */ + [[nodiscard]] size_t + NumBaseConstraints() const noexcept; + + /** + * @return the number of flag constraints, including memory objects that are not pointees. + */ + [[nodiscard]] size_t + NumFlagConstraints() const noexcept; + /** * Creates a subset graph containing all PointerObjects, their current points-to sets, * and edges representing the current set of constraints. @@ -876,10 +928,11 @@ class PointerObjectConstraintSet final * Finds a least solution satisfying all constraints, using the Worklist algorithm. * Descriptions of the algorithm can be found in * - Pearce et al. 2003: "Online cycle detection and difference propagation for pointer analysis" - * - Hardekopf et al. 2007: "The Ant and the Grasshopper". + * - Hardekopf and Lin, 2007: "The Ant and the Grasshopper". + * These papers also describe a set of techniques that potentially improve solving performance: + * - Online Cycle Detection (Pearce, 2003) * @param policy the worklist iteration order policy to use - * @param enableOnlineCycleDetection if true, online cycle detection will be performed, from - * Pearce et al. 2003: "Online cycle detection and difference propagation for pointer analysis" + * @param enableOnlineCycleDetection if true, online cycle detection will be performed. * @return an instance of WorklistStatistics describing solver statistics */ WorklistStatistics diff --git a/jlm/util/HashSet.hpp b/jlm/util/HashSet.hpp index f2b57957d..ccdc2f7c5 100644 --- a/jlm/util/HashSet.hpp +++ b/jlm/util/HashSet.hpp @@ -299,6 +299,17 @@ class HashSet return numRemoved; } + /** + * Removes the element pointed to by the given iterator + * @param iterator the element to remove + * @return an iterator to the element after the removed element + */ + ItemConstIterator + Erase(ItemConstIterator iterator) + { + return ItemConstIterator(Set_.erase(iterator.It_)); + } + /** * Compares the items of this HashSet object with the items of \p other for equality. * diff --git a/jlm/util/Statistics.hpp b/jlm/util/Statistics.hpp index ef8dae244..02328228d 100644 --- a/jlm/util/Statistics.hpp +++ b/jlm/util/Statistics.hpp @@ -161,7 +161,7 @@ class Statistics /** * Adds a measurement, identified by \p name, with the given value. * Requires that the measurement doesn't already exist. - * @tparam T the type of the measurement, must be one of: std::string, int64_t, uint16_4, double + * @tparam T the type of the measurement, must be one of: std::string, int64_t, uint64_t, double */ template void diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index b2c0593c0..f5e218fff 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -56,7 +56,7 @@ EscapedIsExactly( return ptg.GetEscapedMemoryNodes() == jlm::util::HashSet(nodes); } -static void +static int TestStore1() { jlm::tests::StoreTest1 test; @@ -97,9 +97,12 @@ TestStore1() assert(TargetsExactly(plambda, { &lambda })); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestStore1", TestStore1) -static void +static int TestStore2() { jlm::tests::StoreTest2 test; @@ -140,9 +143,12 @@ TestStore2() assert(TargetsExactly(plambda, { &lambda })); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestStore2", TestStore2) -static void +static int TestLoad1() { jlm::tests::LoadTest1 test; @@ -163,9 +169,12 @@ TestLoad1() assert(TargetsExactly(lambdaArgument0, { &lambda, &ptg->GetExternalMemoryNode() })); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestLoad1", TestLoad1) -static void +static int TestLoad2() { jlm::tests::LoadTest2 test; @@ -194,9 +203,12 @@ TestLoad2() assert(TargetsExactly(pload_a, { &alloca_a })); assert(EscapedIsExactly(*ptg, { &lambdaMemoryNode })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestLoad2", TestLoad2) -static void +static int TestLoadFromUndef() { jlm::tests::LoadFromUndefTest test; @@ -210,9 +222,14 @@ TestLoadFromUndef() assert(TargetsExactly(undefValueNode, {})); assert(EscapedIsExactly(*ptg, { &lambdaMemoryNode })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestLoadFromUndef", + TestLoadFromUndef) -static void +static int TestGetElementPtr() { jlm::tests::GetElementPtrTest test; @@ -232,9 +249,14 @@ TestGetElementPtr() assert(TargetsExactly(gepX, { &lambda, &ptg->GetExternalMemoryNode() })); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestGetElementPtr", + TestGetElementPtr) -static void +static int TestBitCast() { jlm::tests::BitCastTest test; @@ -253,9 +275,12 @@ TestBitCast() assert(TargetsExactly(bitCast, { &lambda, &ptg->GetExternalMemoryNode() })); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestBitCast", TestBitCast) -static void +static int TestConstantPointerNull() { jlm::tests::ConstantPointerNullTest test; @@ -275,9 +300,14 @@ TestConstantPointerNull() assert(TargetsExactly(constantPointerNull, {})); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestConstantPointerNull", + TestConstantPointerNull) -static void +static int TestBits2Ptr() { jlm::tests::Bits2PtrTest test; @@ -296,9 +326,12 @@ TestBits2Ptr() assert(TargetsExactly(bits2ptr, { &lambdaTestMemoryNode, &externalMemoryNode })); assert(EscapedIsExactly(*ptg, { &lambdaTestMemoryNode })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestBits2Ptr", TestBits2Ptr) -static void +static int TestCall1() { jlm::tests::CallTest1 test; @@ -351,9 +384,12 @@ TestCall1() assert(TargetsExactly(lambda_h_cv1, { &lambda_g })); assert(EscapedIsExactly(*ptg, { &lambda_h })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestCall1", TestCall1) -static void +static int TestCall2() { jlm::tests::CallTest2 test; @@ -397,9 +433,12 @@ TestCall2() assert(TargetsExactly(malloc_out, { &malloc })); assert(EscapedIsExactly(*ptg, { &lambda_test })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestCall2", TestCall2) -static void +static int TestIndirectCall1() { jlm::tests::IndirectCallTest1 test; @@ -438,9 +477,14 @@ TestIndirectCall1() assert(TargetsExactly(lambda_test_cv2, { &lambda_three })); assert(EscapedIsExactly(*ptg, { &lambda_test })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestIndirectCall1", + TestIndirectCall1) -static void +static int TestIndirectCall2() { jlm::tests::IndirectCallTest2 test; @@ -459,9 +503,14 @@ TestIndirectCall2() assert(TargetsExactly(lambdaThreeOutput, { &lambdaThree })); assert(TargetsExactly(lambdaFourOutput, { &lambdaFour })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestIndirectCall2", + TestIndirectCall2) -static void +static int TestExternalCall1() { jlm::tests::ExternalCallTest1 test; @@ -484,9 +533,14 @@ TestExternalCall1() assert(TargetsExactly(lambdaFArgument0, { &lambdaF, &importG, &externalMemory })); assert(TargetsExactly(lambdaFArgument1, { &lambdaF, &importG, &externalMemory })); assert(TargetsExactly(callResult, { &lambdaF, &importG, &externalMemory })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestExternalCall1", + TestExternalCall1) -static void +static int TestGamma() { jlm::tests::GammaTest test; @@ -519,9 +573,12 @@ TestGamma() } assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestGamma", TestGamma) -static void +static int TestTheta() { jlm::tests::ThetaTest test; @@ -548,9 +605,12 @@ TestTheta() assert(TargetsExactly(thetaOutput2, { &lambda, &ptg->GetExternalMemoryNode() })); assert(EscapedIsExactly(*ptg, { &lambda })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestTheta", TestTheta) -static void +static int TestDelta1() { jlm::tests::DeltaTest1 test; @@ -583,9 +643,12 @@ TestDelta1() assert(TargetsExactly(lambda_h_cv1, { &lambda_g })); assert(EscapedIsExactly(*ptg, { &lambda_h })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestDelta1", TestDelta1) -static void +static int TestDelta2() { jlm::tests::DeltaTest2 test; @@ -623,9 +686,12 @@ TestDelta2() assert(&lambda_f2_cvf1 == &lambda_f1_out); assert(EscapedIsExactly(*ptg, { &lambda_f2 })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestDelta2", TestDelta2) -static void +static int TestImports() { jlm::tests::ImportTest test; @@ -663,9 +729,12 @@ TestImports() assert(&lambda_f2_cvf1 == &lambda_f1_out); assert(EscapedIsExactly(*ptg, { &lambda_f2, &d1, &d2 })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestImports", TestImports) -static void +static int TestPhi1() { jlm::tests::PhiTest1 test; @@ -705,9 +774,12 @@ TestPhi1() assert(TargetsExactly(alloca_out, { &alloca })); assert(EscapedIsExactly(*ptg, { &lambda_test })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestPhi1", TestPhi1) -static void +static int TestExternalMemory() { jlm::tests::ExternalMemoryTest test; @@ -724,9 +796,14 @@ TestExternalMemory() assert(TargetsExactly(lambdaFArgument1, { &lambdaF, &ptg->GetExternalMemoryNode() })); assert(EscapedIsExactly(*ptg, { &lambdaF })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestExternalMemory", + TestExternalMemory) -static void +static int TestEscapedMemory1() { jlm::tests::EscapedMemoryTest1 test; @@ -753,9 +830,14 @@ TestEscapedMemory1() assert(TargetsExactly(loadNode1Output, { deltaA, deltaX, deltaY, lambdaTest, externalMemory })); assert(EscapedIsExactly(*ptg, { lambdaTest, deltaA, deltaX, deltaY })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestEscapedMemory1", + TestEscapedMemory1) -static void +static int TestEscapedMemory2() { jlm::tests::EscapedMemoryTest2 test; @@ -797,9 +879,14 @@ TestEscapedMemory2() callExternalFunction1Malloc, externalFunction1Import, externalFunction2Import })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestEscapedMemory2", + TestEscapedMemory2) -static void +static int TestEscapedMemory3() { jlm::tests::EscapedMemoryTest3 test; @@ -822,9 +909,14 @@ TestEscapedMemory3() { lambdaTest, deltaGlobal, importExternalFunction, externalMemory })); assert(EscapedIsExactly(*ptg, { lambdaTest, deltaGlobal, importExternalFunction })); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestEscapedMemory3", + TestEscapedMemory3) -static void +static int TestMemcpy() { jlm::tests::MemcpyTest test; @@ -847,9 +939,12 @@ TestMemcpy() assert(TargetsExactly(memCpySrc, { localArray })); assert(EscapedIsExactly(*ptg, { globalArray, localArray, lambdaF, lambdaG })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestMemcpy", TestMemcpy) -static void +static int TestLinkedList() { jlm::tests::LinkedListTest test; @@ -867,9 +962,12 @@ TestLinkedList() assert(TargetsExactly(allocaNode, { &deltaMyListNode, &lambdaNextNode, &externalMemoryNode })); assert( TargetsExactly(deltaMyListNode, { &deltaMyListNode, &lambdaNextNode, &externalMemoryNode })); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestLinkedList", TestLinkedList) -static void +static int TestStatistics() { // Arrange @@ -891,9 +989,56 @@ TestStatistics() assert(statistics.GetMeasurementValue("#LoadConstraints") == 1); assert(statistics.GetMeasurementValue("#PointsToGraphNodes") == ptg->NumNodes()); assert(statistics.GetTimerElapsedNanoseconds("AnalysisTimer") > 0); + + return 0; } +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen-TestStatistics", TestStatistics) + +static int +TestConfiguration() +{ + using namespace jlm::llvm::aa; + auto config = Andersen::Configuration::DefaultConfiguration(); + + // Arrange + config.SetSolver(Andersen::Configuration::Solver::Naive); + config.EnableOfflineVariableSubstitution(true); + config.EnableOfflineConstraintNormalization(false); + + // Act + auto configString = config.ToString(); + + // Assert + assert(config.GetSolver() == Andersen::Configuration::Solver::Naive); + assert(config.IsOfflineVariableSubstitutionEnabled()); + assert(!config.IsOfflineConstraintNormalizationEnabled()); + assert(configString.find("Solver=Naive") != std::string::npos); + assert(configString.find("OVS") != std::string::npos); + assert(configString.find("Norm") == std::string::npos); + + // Arrange some more + auto policy = PointerObjectConstraintSet::WorklistSolverPolicy::TwoPhaseLeastRecentlyFired; + config.SetSolver(Andersen::Configuration::Solver::Worklist); + config.SetWorklistSolverPolicy(policy); + config.EnableOfflineVariableSubstitution(false); + config.EnableOnlineCycleDetection(true); -static void + // Act + configString = config.ToString(); + + // Assert + assert(configString.find("Solver=Worklist") != std::string::npos); + assert(configString.find("Policy=TwoPhaseLeastRecentlyFired") != std::string::npos); + assert(configString.find("OVS") == std::string::npos); + assert(configString.find("OnlineCD") != std::string::npos); + + return 0; +} +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestConfiguration", + TestConfiguration) + +static int TestConstructPointsToGraph() { using namespace jlm::llvm::aa; @@ -976,41 +1121,9 @@ TestConstructPointsToGraph() assert(TargetsExactly(importNode, { &deltaNode, &importNode, &externalMemory })); // mallocR points to mallocNode, as well as everything that has escaped assert(TargetsExactly(mallocRNode, { &mallocNode, &deltaNode, &importNode, &externalMemory })); -} - -static int -TestAndersen() -{ - TestStore1(); - TestStore2(); - TestLoad1(); - TestLoad2(); - TestLoadFromUndef(); - TestGetElementPtr(); - TestBitCast(); - TestConstantPointerNull(); - TestBits2Ptr(); - TestCall1(); - TestCall2(); - TestIndirectCall1(); - TestIndirectCall2(); - TestExternalCall1(); - TestGamma(); - TestTheta(); - TestDelta1(); - TestDelta2(); - TestImports(); - TestPhi1(); - TestExternalMemory(); - TestEscapedMemory1(); - TestEscapedMemory2(); - TestEscapedMemory3(); - TestMemcpy(); - TestLinkedList(); - TestStatistics(); - TestConstructPointsToGraph(); return 0; } - -JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestAndersen", TestAndersen) +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestAndersen-TestConstructPointsToGraph", + TestConstructPointsToGraph) diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 5556cd5b0..2f65223be 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -7,8 +7,8 @@ #include +#include #include -#include #include @@ -875,14 +875,23 @@ TestPointerObjectSet() TestAddRegisterContentEscapedConstraint(); TestDrawSubsetGraph(); TestPointerObjectConstraintSetSolve(); - using Policy = jlm::llvm::aa::PointerObjectConstraintSet::WorklistSolverPolicy; - for (int onlineCD = 0; onlineCD <= 1; onlineCD++) + + auto allConfigs = jlm::llvm::aa::Andersen::Configuration::GetAllConfigurations(); + for (const auto & config : allConfigs) { - TestPointerObjectConstraintSetSolve(Policy::LeastRecentlyFired, onlineCD); - TestPointerObjectConstraintSetSolve(Policy::TwoPhaseLeastRecentlyFired, onlineCD); - TestPointerObjectConstraintSetSolve(Policy::FirstInFirstOut, onlineCD); - TestPointerObjectConstraintSetSolve(Policy::LastInFirstOut, onlineCD); + // Ignore all configs that enable features that do not affect SolveUsingWorklist() + if (config.GetSolver() != jlm::llvm::aa::Andersen::Configuration::Solver::Worklist) + continue; + if (config.IsOfflineVariableSubstitutionEnabled()) + continue; + if (config.IsOfflineConstraintNormalizationEnabled()) + continue; + + TestPointerObjectConstraintSetSolve( + config.GetWorklistSoliverPolicy(), + config.IsOnlineCycleDetectionEnabled()); } + TestClonePointerObjectConstraintSet(); return 0; } diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index d1d111960..248d651b4 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -320,7 +320,7 @@ ToTree_RvsdgWithStructuralNodes() // Assert auto numLines = std::count(tree.begin(), tree.end(), '\n'); - // We should find '\n' 8 times: 1 root region + 3 structural nodes + 6 subregions + // We should find '\n' 10 times: 1 root region + 3 structural nodes + 6 subregions assert(numLines == 10); // Check that the last line printed looks accordingly From 839352c45bd5031d59121a02106c7cb55657e629 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 16 Aug 2024 07:13:10 +0200 Subject: [PATCH 041/170] Add Copy() method to argument class (#578) This PR does the following: 1. Introduces a Copy() method to the argument class. 2. The introduced method enables us to fix a bug in the copy() method of the region. Previously, arguments were not copied according to their argument subtypes, but simply a new instance of argument was created. 3. Add unit test. This PR is a necessary step to get rid off ports. --- jlm/hls/ir/hls.cpp | 7 +++++++ jlm/hls/ir/hls.hpp | 3 +++ jlm/llvm/ir/operators/Phi.cpp | 14 ++++++++++++++ jlm/llvm/ir/operators/Phi.hpp | 6 ++++++ jlm/llvm/ir/operators/delta.cpp | 7 +++++++ jlm/llvm/ir/operators/delta.hpp | 3 +++ jlm/llvm/ir/operators/lambda.cpp | 14 ++++++++++++++ jlm/llvm/ir/operators/lambda.hpp | 6 ++++++ jlm/rvsdg/gamma.cpp | 7 +++++++ jlm/rvsdg/gamma.hpp | 3 +++ jlm/rvsdg/region.cpp | 14 ++++++++++---- jlm/rvsdg/region.hpp | 14 ++++++++++++++ jlm/rvsdg/theta.cpp | 7 +++++++ jlm/rvsdg/theta.hpp | 3 +++ tests/jlm/rvsdg/test-graph.cpp | 24 ++++++++++++++++++++++++ tests/test-operation.hpp | 24 ++++++++++++++++++++++++ 16 files changed, 152 insertions(+), 4 deletions(-) diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 528aab089..4eae808f2 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -35,6 +35,13 @@ bundletype::ComputeHash() const noexcept return seed; } +backedge_argument & +backedge_argument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) +{ + JLM_ASSERT(input == nullptr); + return *backedge_argument::create(®ion, Type()); +} + jlm::rvsdg::structural_output * loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 00d07eefa..4affed7c5 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -623,6 +623,9 @@ class backedge_argument : public jlm::rvsdg::argument return result_; } + backedge_argument & + Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) override; + private: backedge_argument( jlm::rvsdg::region * region, diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 40e176cf7..e8447c3b9 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -168,11 +168,25 @@ rvoutput::~rvoutput() rvargument::~rvargument() {} +rvargument & +rvargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +{ + JLM_ASSERT(input == nullptr); + return *rvargument::create(®ion, Type()); +} + /* phi context variable argument class */ cvargument::~cvargument() {} +cvargument & +cvargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +{ + auto phiInput = util::AssertedCast(input); + return *cvargument::create(®ion, phiInput, Type()); +} + /* phi recursion variable result class */ rvresult::~rvresult() diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 3bd6ff401..c37742cd4 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -710,6 +710,9 @@ class rvargument final : public jlm::rvsdg::argument return output()->result(); } + rvargument & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + private: rvoutput * output_; }; @@ -748,6 +751,9 @@ class cvargument final : public jlm::rvsdg::argument cvargument & operator=(cvargument &&) = delete; + cvargument & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + static cvargument * create(jlm::rvsdg::region * region, phi::cvinput * input, const jlm::rvsdg::port & port) { diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index b1f06ee53..749cab5e2 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -172,6 +172,13 @@ output::~output() cvargument::~cvargument() {} +cvargument & +cvargument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) +{ + auto deltaInput = util::AssertedCast(input); + return *cvargument::create(®ion, deltaInput); +} + /* delta result class */ result::~result() diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 6862a6657..3598418f3 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -424,6 +424,9 @@ class cvargument final : public rvsdg::argument public: ~cvargument() override; + cvargument & + Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) override; + private: cvargument(rvsdg::region * region, cvinput * input) : rvsdg::argument(region, input, input->port()) diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 2e4d19475..3c7349272 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -418,10 +418,24 @@ output::~output() = default; fctargument::~fctargument() = default; +fctargument & +fctargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +{ + JLM_ASSERT(input == nullptr); + return *fctargument::create(®ion, Type()); +} + /* lambda context variable argument class */ cvargument::~cvargument() = default; +cvargument & +cvargument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) +{ + auto lambdaInput = util::AssertedCast(input); + return *cvargument::create(®ion, lambdaInput); +} + /* lambda result class */ result::~result() = default; diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index e702ac8c7..84cf4e606 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -525,6 +525,9 @@ class fctargument final : public jlm::rvsdg::argument attributes_ = attributes; } + fctargument & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + private: fctargument(jlm::rvsdg::region * region, std::shared_ptr type) : jlm::rvsdg::argument(region, nullptr, std::move(type)) @@ -599,6 +602,9 @@ class cvargument final : public jlm::rvsdg::argument public: ~cvargument() override; + cvargument & + Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) override; + private: cvargument(jlm::rvsdg::region * region, cvinput * input) : jlm::rvsdg::argument(region, input, input->port()) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 9d6d92d1f..28c648a85 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -377,6 +377,13 @@ gamma_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & sma GammaArgument::~GammaArgument() noexcept = default; +GammaArgument & +GammaArgument::Copy(rvsdg::region & region, structural_input * input) +{ + auto gammaInput = util::AssertedCast(input); + return Create(region, *gammaInput); +} + GammaResult::~GammaResult() noexcept = default; } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index c248cc332..c6a9241ff 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -469,6 +469,9 @@ class GammaArgument final : public argument public: ~GammaArgument() noexcept override; + GammaArgument & + Copy(rvsdg::region & region, structural_input * input) override; + private: GammaArgument(rvsdg::region & region, gamma_input & input) : argument(®ion, &input, input.Type()) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 330a0e5c5..3cd27506b 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -66,6 +66,12 @@ argument::argument( } } +argument & +argument::Copy(rvsdg::region & region, structural_input * input) +{ + return *argument::create(®ion, input, port()); +} + jlm::rvsdg::argument * argument::create( jlm::rvsdg::region * region, @@ -285,14 +291,14 @@ region::copy(region * target, substitution_map & smap, bool copy_arguments, bool context[node.depth()].push_back(&node); } - /* copy arguments */ if (copy_arguments) { for (size_t n = 0; n < narguments(); n++) { - auto input = smap.lookup(argument(n)->input()); - auto narg = argument::create(target, input, argument(n)->port()); - smap.insert(argument(n), narg); + auto oldArgument = argument(n); + auto input = smap.lookup(oldArgument->input()); + auto & newArgument = oldArgument->Copy(*target, input); + smap.insert(oldArgument, &newArgument); } } diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index d1e801ad5..e2beb15d0 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -70,6 +70,20 @@ class argument : public output return input_; } + /** + * Creates a copy of the argument in \p region with the structural_input \p input. + * + * @param region The region where the copy of the argument is created in. + * @param input The structural_input to the argument, if any. + * + * @return A reference to the copied argument. + * + * FIXME: This method should be made abstract once we enforced that no instances of argument + * itself can be created any longer. + */ + virtual argument & + Copy(rvsdg::region & region, structural_input * input); + static jlm::rvsdg::argument * create(jlm::rvsdg::region * region, structural_input * input, const jlm::rvsdg::port & port); diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index b67e56403..0781bf608 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -45,6 +45,13 @@ theta_output::~theta_output() noexcept ThetaArgument::~ThetaArgument() noexcept = default; +ThetaArgument & +ThetaArgument::Copy(rvsdg::region & region, structural_input * input) +{ + auto thetaInput = util::AssertedCast(input); + return ThetaArgument::Create(region, *thetaInput); +} + ThetaResult::~ThetaResult() noexcept = default; /* theta node */ diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index daedd8415..7c05f93cf 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -359,6 +359,9 @@ class ThetaArgument final : public argument public: ~ThetaArgument() noexcept override; + ThetaArgument & + Copy(rvsdg::region & region, structural_input * input) override; + private: ThetaArgument(rvsdg::region & region, theta_input & input) : argument(®ion, &input, input.Type()) diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 5e8a5d07f..9d7c6de2d 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -135,3 +135,27 @@ test_graph(void) } JLM_UNIT_TEST_REGISTER("jlm/rvsdg/test-graph", test_graph) + +static int +Copy() +{ + using namespace jlm::rvsdg; + using namespace jlm::tests; + + // Arrange + auto type = jlm::tests::valuetype::Create(); + + jlm::rvsdg::graph graph; + TestGraphArgument::Create(*graph.root(), type); + + // Act + auto newGraph = graph.copy(); + + // Assert + assert(newGraph->root()->narguments() == 1); + assert(is(newGraph->root()->argument(0))); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/test-graph-Copy", Copy) diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 66684f056..a72663a2a 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -344,6 +344,30 @@ create_testop( return rvsdg::simple_node::create_normalized(region, op, { operands }); } +class TestGraphArgument final : public jlm::rvsdg::argument +{ +private: + TestGraphArgument(jlm::rvsdg::region & region, std::shared_ptr type) + : jlm::rvsdg::argument(®ion, nullptr, type) + {} + +public: + TestGraphArgument & + Copy(jlm::rvsdg::region & region, jlm::rvsdg::structural_input * input) override + { + JLM_ASSERT(input == nullptr); + return Create(region, Type()); + } + + static TestGraphArgument & + Create(jlm::rvsdg::region & region, std::shared_ptr type) + { + auto graphArgument = new TestGraphArgument(region, std::move(type)); + region.append_argument(graphArgument); + return *graphArgument; + } +}; + } #endif From 78ae7a5efb311d74faa3d19d1cd121a658d50131 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 16 Aug 2024 08:03:50 +0200 Subject: [PATCH 042/170] [AndersenAgnostic] Add hybrid cycle detection to Andersen (#576) Adds Hybrid Cycle Detection from Hardekopf 2007. The implementation is quite simple, but requires some extra info to be kept from the Offline Variable Substitution. --- jlm/llvm/opt/alias-analyses/Andersen.cpp | 32 ++++- jlm/llvm/opt/alias-analyses/Andersen.hpp | 29 +++-- .../opt/alias-analyses/PointerObjectSet.cpp | 119 ++++++++++++++++-- .../opt/alias-analyses/PointerObjectSet.hpp | 28 ++++- .../llvm/opt/alias-analyses/TestAndersen.cpp | 25 ++-- .../alias-analyses/TestPointerObjectSet.cpp | 3 +- 6 files changed, 203 insertions(+), 33 deletions(-) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index ead7f10c1..479d00840 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -5,8 +5,6 @@ #include #include -#include -#include #include #include @@ -45,6 +43,8 @@ Andersen::Configuration::ToString() const if (EnableOnlineCycleDetection_) str << "OnlineCD_"; + if (EnableHybridCycleDetection_) + str << "HybridCD_"; } else { @@ -61,11 +61,23 @@ Andersen::Configuration::GetAllConfigurations() { std::vector configs; + auto PickHybridCycleDetection = [&](Configuration config) + { + config.EnableHybridCycleDetection(false); + configs.push_back(config); + // Hybrid Cycle Detection can only be enabled when OVS is enabled + if (config.IsOfflineVariableSubstitutionEnabled()) + { + config.EnableHybridCycleDetection(true); + configs.push_back(config); + } + }; auto PickOnlineCycleDetection = [&](Configuration config) { config.EnableOnlineCycleDetection(false); - configs.push_back(config); + PickHybridCycleDetection(config); config.EnableOnlineCycleDetection(true); + // OnlineCD can not be combined with HybridCD configs.push_back(config); }; auto PickWorklistPolicy = [&](Configuration config) @@ -147,6 +159,8 @@ class Andersen::Statistics final : public util::Statistics static constexpr const char * NumOnlineCyclesDetected_ = "#OnlineCyclesDetected"; static constexpr const char * NumOnlineCycleUnifications_ = "#OnlineCycleUnifications"; + static constexpr const char * NumHybridCycleUnifications_ = "#HybridCycleUnifications"; + // After solving statistics static constexpr const char * NumEscapedMemoryObjects_ = "#EscapedMemoryObjects"; static constexpr const char * NumUnificationRoots_ = "#UnificationRoots"; @@ -287,6 +301,9 @@ class Andersen::Statistics final : public util::Statistics if (statistics.NumOnlineCycleUnifications) AddMeasurement(NumOnlineCycleUnifications_, *statistics.NumOnlineCycleUnifications); + + if (statistics.NumHybridCycleUnifications) + AddMeasurement(NumHybridCycleUnifications_, *statistics.NumHybridCycleUnifications); } void @@ -1048,7 +1065,9 @@ Andersen::SolveConstraints( if (config.IsOfflineVariableSubstitutionEnabled()) { statistics.StartOfflineVariableSubstitution(); - auto numUnifications = constraints.PerformOfflineVariableSubstitution(); + // If the solver uses hybrid cycle detection, tell OVS to store info about ref node cycles + bool hasHCD = config.IsHybridCycleDetectionEnabled(); + auto numUnifications = constraints.PerformOfflineVariableSubstitution(hasHCD); statistics.StopOfflineVariableSubstitution(numUnifications); } @@ -1070,7 +1089,8 @@ Andersen::SolveConstraints( statistics.StartConstraintSolvingWorklistStatistics(); auto worklistStatistics = constraints.SolveUsingWorklist( config.GetWorklistSoliverPolicy(), - config.IsOnlineCycleDetectionEnabled()); + config.IsOnlineCycleDetectionEnabled(), + config.IsHybridCycleDetectionEnabled()); statistics.StopConstraintSolvingWorklistStatistics(worklistStatistics); } else @@ -1122,7 +1142,7 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis if (doubleCheck) std::cerr << "Double checking Andersen analysis using naive solving" << std::endl; - // If only double-checking, use the naive configuration + // If double-checking, only use the naive configuration. Otherwise try all configurations std::vector configs; if (testAllConfigs) configs = Configuration::GetAllConfigurations(); diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index 2618db14d..5fe496639 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -10,12 +10,8 @@ #include #include #include - -namespace jlm::rvsdg -{ -class gamma_node; -class theta_node; -} +#include +#include namespace jlm::llvm::aa { @@ -148,6 +144,23 @@ class Andersen final : public AliasAnalysis return EnableOnlineCycleDetection_; } + /** + * Enables or disables hybrid cycle detection in the Worklist solver, as described by + * Hardekopf and Lin, 2007: "The Ant & the Grasshopper" + * It detects some cycles, so it can not be combined with techniques that find all cycles. + */ + void + EnableHybridCycleDetection(bool enable) noexcept + { + EnableHybridCycleDetection_ = enable; + } + + [[nodiscard]] bool + IsHybridCycleDetectionEnabled() const noexcept + { + return EnableHybridCycleDetection_; + } + [[nodiscard]] std::string ToString() const; @@ -165,6 +178,7 @@ class Andersen final : public AliasAnalysis config.SetWorklistSolverPolicy( PointerObjectConstraintSet::WorklistSolverPolicy::LeastRecentlyFired); config.EnableOnlineCycleDetection(false); + config.EnableHybridCycleDetection(true); return config; } @@ -198,6 +212,7 @@ class Andersen final : public AliasAnalysis PointerObjectConstraintSet::WorklistSolverPolicy WorklistSolverPolicy_ = PointerObjectConstraintSet::WorklistSolverPolicy::LeastRecentlyFired; bool EnableOnlineCycleDetection_ = false; + bool EnableHybridCycleDetection_ = false; }; ~Andersen() noexcept override = default; @@ -371,6 +386,6 @@ class Andersen final : public AliasAnalysis std::unique_ptr Constraints_; }; -} // namespace +} #endif diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index c103da20b..b9c3214f6 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -11,6 +11,7 @@ #include #include +#include namespace jlm::llvm::aa { @@ -1234,7 +1235,7 @@ AssignOvsEquivalenceSetLabels( } size_t -PointerObjectConstraintSet::PerformOfflineVariableSubstitution() +PointerObjectConstraintSet::PerformOfflineVariableSubstitution(bool storeRefCycleUnificationRoot) { // Performing unification on direct nodes relies on all subset edges being known offline. // This is only safe if no more constraints are added to the node in the future. @@ -1297,6 +1298,32 @@ PointerObjectConstraintSet::PerformOfflineVariableSubstitution() unificationRoot[equivalenceSetLabel] = i; } + // If hybrid cycle detection is enabled, it requires some information to be kept from OVS + if (storeRefCycleUnificationRoot) + { + // For each ref node that is in a cycle with a regular node, store it for hybrid cycle detection + // The idea: Any pointee of p should be unified with a, if *p and a are in the same SCC + // NOTE: We do not use equivalence set labels here, as they represent more than just cycles + + // First find one unification root representing each SCC + std::vector> unificationRootPerSCC(numSccs, std::nullopt); + for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) + { + if (!unificationRootPerSCC[sccIndex[i]]) + unificationRootPerSCC[sccIndex[i]] = Set_.GetUnificationRoot(i); + } + + // Assign unification roots to ref nodes that belong to SCCs with at least one regular node + const size_t derefNodeOffset = Set_.NumPointerObjects(); + for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) + { + if (auto optRoot = unificationRootPerSCC[sccIndex[i + derefNodeOffset]]) + { + RefNodeUnificationRoot_[i] = *optRoot; + } + } + } + return numUnifications; } @@ -1360,13 +1387,19 @@ PointerObjectConstraintSet::NormalizeConstraints() return reduction; } -template +template void PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { // Check that the provided worklist implementation inherits from Worklist static_assert(std::is_base_of_v, Worklist>); + // Online cycle detections detects all cycles immediately, so there is no point in enabling others + if constexpr (EnableOnlineCycleDetection) + { + static_assert(!EnableHybridCycleDetection, "OnlineCD can not be combined with HybridCD"); + } + // Create auxiliary subset graph. // All edges must have their tail be a unification root (non-root nodes have no successors). // If supersetEdges[x] contains y, (x -> y), that means P(y) supseteq P(x) @@ -1458,6 +1491,17 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) callConstraints[root].UnionWith(callConstraints[nonRoot]); callConstraints[nonRoot].Clear(); + if constexpr (EnableHybridCycleDetection) + { + // If the new root did not have a ref node unification target, check if the other node has one + if (RefNodeUnificationRoot_.count(root) == 0) + { + const auto nonRootRefUnification = RefNodeUnificationRoot_.find(nonRoot); + if (nonRootRefUnification != RefNodeUnificationRoot_.end()) + RefNodeUnificationRoot_[root] = nonRootRefUnification->second; + } + } + return root; }; @@ -1474,6 +1518,9 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if constexpr (EnableOnlineCycleDetection) onlineCycleDetector.InitializeTopologicalOrdering(); + if constexpr (EnableHybridCycleDetection) + statistics.NumHybridCycleUnifications = 0; + // The worklist, initialized with every unification root Worklist worklist; for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) @@ -1571,6 +1618,41 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) auto & nodePointees = Set_.GetPointsToSet(node); statistics.NumWorkItemNewPointees += nodePointees.Size(); + // Perform hybrid cycle detection if all pointees of node should be unified + if constexpr (EnableHybridCycleDetection) + { + // If all pointees of node should be unified, do it now + const auto & refUnificationRootIt = RefNodeUnificationRoot_.find(node); + if (refUnificationRootIt != RefNodeUnificationRoot_.end()) + { + auto & refUnificationRoot = refUnificationRootIt->second; + // The ref unification root may no longer be a root, so update it first + refUnificationRoot = Set_.GetUnificationRoot(refUnificationRoot); + + // if any unification happens, the result must be added to the worklist + bool anyUnification = false; + for (const auto pointee : nodePointees.Items()) + { + const auto pointeeRoot = Set_.GetUnificationRoot(pointee); + if (pointeeRoot == refUnificationRoot) + continue; + + (*statistics.NumHybridCycleUnifications)++; + anyUnification = true; + refUnificationRoot = UnifyPointerObjects(refUnificationRoot, pointeeRoot); + } + + if (anyUnification) + { + JLM_ASSERT(Set_.IsUnificationRoot(refUnificationRoot)); + worklist.PushWorkItem(refUnificationRoot); + // If the current node became unified due to HCD, stop the current work item visit. + if (Set_.GetUnificationRoot(node) == refUnificationRoot) + return; + } + } + } + // If n is marked as PointeesEscaping, add the escaped flag to all pointees if (Set_.HasPointeesEscaping(node)) { @@ -1693,20 +1775,31 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) PointerObjectConstraintSet::WorklistStatistics PointerObjectConstraintSet::SolveUsingWorklist( WorklistSolverPolicy policy, - bool enableOnlineCycleDetection) + bool enableOnlineCycleDetection, + bool enableHybridCycleDetection) { // Takes all parameters as compile time types. // tWorklist is a pointer to one of the Worklist implementations. // the rest are instances of std::bool_constant, either std::true_type or std::false_type - const auto Dispatch = [&](auto tWorklist, auto tOnlineCycleDetection) -> WorklistStatistics + const auto Dispatch = [&](auto tWorklist, + auto tOnlineCycleDetection, + auto tHybridCycleDetection) -> WorklistStatistics { using Worklist = std::remove_pointer_t; constexpr bool vOnlineCycleDetection = decltype(tOnlineCycleDetection)::value; + constexpr bool vHybridCycleDetection = decltype(tHybridCycleDetection)::value; - WorklistStatistics statistics(policy); - RunWorklistSolver(statistics); - return statistics; + if constexpr (vOnlineCycleDetection && vHybridCycleDetection) + { + JLM_UNREACHABLE("Can not enable hybrid cycle detection with online cycle detection"); + } + else + { + WorklistStatistics statistics(policy); + RunWorklistSolver(statistics); + return statistics; + } }; std::variant< @@ -1733,7 +1826,17 @@ PointerObjectConstraintSet::SolveUsingWorklist( else onlineCycleDetectionVariant = std::false_type{}; - return std::visit(Dispatch, policyVariant, onlineCycleDetectionVariant); + std::variant hybridCycleDetectionVariant; + if (enableHybridCycleDetection) + hybridCycleDetectionVariant = std::true_type{}; + else + hybridCycleDetectionVariant = std::false_type{}; + + return std::visit( + Dispatch, + policyVariant, + onlineCycleDetectionVariant, + hybridCycleDetectionVariant); } const char * diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index d2994938a..691ce65c5 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -797,7 +797,9 @@ class PointerObjectConstraintSet final size_t NumWorkItemNewPointees{}; /** - * The number of cycles detected by online cycle detection, if enabled. + * The number of cycles detected by online cycle detection, + * and number of unifications made to eliminate the cycles, + * if Online Cycle Detection is enabled. */ std::optional NumOnlineCyclesDetected; @@ -805,6 +807,11 @@ class PointerObjectConstraintSet final * The number of unifications made by online cycle detection, if enabled. */ std::optional NumOnlineCycleUnifications; + + /** + * The number of unifications performed due to hybrid cycle detection. + */ + std::optional NumHybridCycleUnifications; }; explicit PointerObjectConstraintSet(PointerObjectSet & set) @@ -907,11 +914,13 @@ class PointerObjectConstraintSet final * All PointerObjects v1, ... vN where n(v1), ... n(vN) share equivalence set label, get unified. * The run time is linear in the amount of PointerObjects and constraints. * + * @param storeRefCycleUnificationRoot if true, ref nodes in cycles with regular nodes are stored, + * to be used by hybrid cycle detection during solving. * @return the number PointerObject unifications made * @see NormalizeConstraints() call it afterwards to remove constraints made unnecessary. */ size_t - PerformOfflineVariableSubstitution(); + PerformOfflineVariableSubstitution(bool storeRefCycleUnificationRoot); /** * Traverses the list of constraints, and does the following: @@ -931,12 +940,17 @@ class PointerObjectConstraintSet final * - Hardekopf and Lin, 2007: "The Ant and the Grasshopper". * These papers also describe a set of techniques that potentially improve solving performance: * - Online Cycle Detection (Pearce, 2003) + * - Hybrid Cycle Detection (Hardekopf 2007) * @param policy the worklist iteration order policy to use * @param enableOnlineCycleDetection if true, online cycle detection will be performed. + * @param enableHybridCycleDetection if true, hybrid cycle detection will be performed. * @return an instance of WorklistStatistics describing solver statistics */ WorklistStatistics - SolveUsingWorklist(WorklistSolverPolicy policy, bool enableOnlineCycleDetection); + SolveUsingWorklist( + WorklistSolverPolicy policy, + bool enableOnlineCycleDetection, + bool enableHybridCycleDetection); /** * Iterates over and applies constraints until all points-to-sets satisfy them. @@ -979,9 +993,10 @@ class PointerObjectConstraintSet final * @param statistics the WorklistStatistics instance that will get information about this run. * @tparam Worklist a type supporting the worklist interface with PointerObjectIndex as work items * @tparam EnableOnlineCycleDetection if true, online cycle detection is enabled. + * @tparam EnableHybridCycleDetection if true, hybrid cycle detection is enabled. * @see SolveUsingWorklist() for the public interface. */ - template + template void RunWorklistSolver(WorklistStatistics & statistics); @@ -994,6 +1009,11 @@ class PointerObjectConstraintSet final // When true, no new constraints can be added. // Only offline processing is allowed to modify the constraint set. bool ConstraintSetFrozen_; + + // Offline Variable Substitution can determine that all pointees of a node p, + // should be unified together, possibly with some other PointerObjects. + // This happens when *p is in a cycle with regular nodes + std::unordered_map RefNodeUnificationRoot_; }; } // namespace jlm::llvm::aa diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index f5e218fff..41f07c2d7 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -998,23 +998,23 @@ static int TestConfiguration() { using namespace jlm::llvm::aa; - auto config = Andersen::Configuration::DefaultConfiguration(); + auto config = Andersen::Configuration::NaiveSolverConfiguration(); // Arrange + config.EnableOfflineVariableSubstitution(false); + config.EnableOfflineConstraintNormalization(true); config.SetSolver(Andersen::Configuration::Solver::Naive); - config.EnableOfflineVariableSubstitution(true); - config.EnableOfflineConstraintNormalization(false); // Act auto configString = config.ToString(); // Assert + assert(!config.IsOfflineVariableSubstitutionEnabled()); + assert(config.IsOfflineConstraintNormalizationEnabled()); assert(config.GetSolver() == Andersen::Configuration::Solver::Naive); - assert(config.IsOfflineVariableSubstitutionEnabled()); - assert(!config.IsOfflineConstraintNormalizationEnabled()); + assert(configString.find("OVS") == std::string::npos); + assert(configString.find("NORM") != std::string::npos); assert(configString.find("Solver=Naive") != std::string::npos); - assert(configString.find("OVS") != std::string::npos); - assert(configString.find("Norm") == std::string::npos); // Arrange some more auto policy = PointerObjectConstraintSet::WorklistSolverPolicy::TwoPhaseLeastRecentlyFired; @@ -1032,6 +1032,17 @@ TestConfiguration() assert(configString.find("OVS") == std::string::npos); assert(configString.find("OnlineCD") != std::string::npos); + // Arrange some more + config.EnableOnlineCycleDetection(false); + config.EnableHybridCycleDetection(true); + + // Act + configString = config.ToString(); + + // Assert + assert(configString.find("OnlineCD") == std::string::npos); + assert(configString.find("HybridCD") != std::string::npos); + return 0; } JLM_UNIT_TEST_REGISTER( diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 2f65223be..74be2b70f 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -889,7 +889,8 @@ TestPointerObjectSet() TestPointerObjectConstraintSetSolve( config.GetWorklistSoliverPolicy(), - config.IsOnlineCycleDetectionEnabled()); + config.IsOnlineCycleDetectionEnabled(), + config.IsHybridCycleDetectionEnabled()); } TestClonePointerObjectConstraintSet(); From 49b3d823c91c60dc4243ad601975481600fc5d52 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 16 Aug 2024 16:03:31 +0200 Subject: [PATCH 043/170] Make use of the latest hls-test-suite commit (#582) --- scripts/run-hls-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index a685dbcd4..c102a12ef 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=effbe0bff96b396fb41e7c95bb74c7c772567136 +GIT_COMMIT=3e92b3d37b654b0f14b8b13d9ff8c07cad5f3796 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" From 6c82eeae51be93f37df02acabc31f12eec18f9f2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 18 Aug 2024 07:44:55 +0200 Subject: [PATCH 044/170] Remove impport class (#577) This PR does the following: 1. Introduces the GraphImport class and its subclasses to model external entities to the module. These classes serve as a replacement for the old impport classes. 2. Replaces all usages of impport with GraphImport 3. Removes old impport classes. This is one of the steps necessary in order to completely remove ports from the code base. --- .../rhls2firrtl/verilator-harness-hls.cpp | 15 +-- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 24 ++-- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 15 +-- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 44 +++---- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 31 +++-- .../InterProceduralGraphConversion.cpp | 14 ++- jlm/llvm/ir/RvsdgModule.cpp | 18 +-- jlm/llvm/ir/RvsdgModule.hpp | 79 +++++------- jlm/llvm/ir/operators/call.cpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 2 +- .../opt/alias-analyses/PointerObjectSet.cpp | 4 +- .../opt/alias-analyses/PointerObjectSet.hpp | 7 +- jlm/llvm/opt/alias-analyses/PointsToGraph.cpp | 8 +- jlm/llvm/opt/alias-analyses/PointsToGraph.hpp | 16 ++- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 35 +++--- jlm/rvsdg/graph.cpp | 22 +--- jlm/rvsdg/graph.hpp | 53 ++------ tests/TestRvsdgs.cpp | 61 +++++---- tests/TestRvsdgs.hpp | 4 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 18 +-- .../backend/llvm/r2j/test-recursive-data.cpp | 4 +- tests/jlm/llvm/ir/operators/LoadTests.cpp | 48 +++---- tests/jlm/llvm/ir/operators/StoreTests.cpp | 58 ++++----- tests/jlm/llvm/ir/operators/TestCall.cpp | 24 ++-- tests/jlm/llvm/ir/operators/TestFree.cpp | 7 +- tests/jlm/llvm/ir/operators/TestLambda.cpp | 6 +- tests/jlm/llvm/ir/operators/TestPhi.cpp | 4 +- tests/jlm/llvm/ir/operators/test-delta.cpp | 6 +- tests/jlm/llvm/ir/operators/test-sext.cpp | 9 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 53 ++++---- tests/jlm/llvm/opt/TestLoadMuxReduction.cpp | 17 +-- tests/jlm/llvm/opt/TestLoadStoreReduction.cpp | 7 +- .../opt/alias-analyses/TestPointsToGraph.cpp | 7 +- tests/jlm/llvm/opt/test-cne.cpp | 42 +++---- tests/jlm/llvm/opt/test-inlining.cpp | 4 +- tests/jlm/llvm/opt/test-inversion.cpp | 8 +- tests/jlm/llvm/opt/test-pull.cpp | 10 +- tests/jlm/llvm/opt/test-push.cpp | 20 +-- tests/jlm/llvm/opt/test-unroll.cpp | 6 +- tests/jlm/rvsdg/ArgumentTests.cpp | 4 +- tests/jlm/rvsdg/RegionTests.cpp | 2 +- tests/jlm/rvsdg/ResultTests.cpp | 2 +- tests/jlm/rvsdg/bitstring/bitstring.cpp | 117 +++++++++--------- tests/jlm/rvsdg/test-binary.cpp | 16 +-- tests/jlm/rvsdg/test-cse.cpp | 2 +- tests/jlm/rvsdg/test-gamma.cpp | 50 ++++---- tests/jlm/rvsdg/test-graph.cpp | 2 +- tests/jlm/rvsdg/test-nodes.cpp | 8 +- tests/jlm/rvsdg/test-statemux.cpp | 8 +- tests/jlm/rvsdg/test-theta.cpp | 30 ++--- tests/jlm/rvsdg/test-topdown.cpp | 2 +- tests/test-operation.cpp | 6 + tests/test-operation.hpp | 23 ++++ 55 files changed, 535 insertions(+), 557 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index b9032b421..45bf5a6f8 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -408,12 +408,12 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) auto root = rm.Rvsdg().root(); for (size_t i = 0; i < root->narguments(); ++i) { - if (auto ip = dynamic_cast(&root->argument(i)->port())) + if (auto graphImport = dynamic_cast(root->argument(i))) { - if (dynamic_cast(&ip->type())) + if (dynamic_cast(&graphImport->type())) { - cpp << "extern " << convert_to_c_type(&root->argument(i)->port().type()) << " " - << ip->name() << ";\n"; + cpp << "extern " << convert_to_c_type(&graphImport->type()) << " " << graphImport->Name() + << ";\n"; } else { @@ -493,12 +493,9 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) for (size_t i = 0; i < ln->ncvarguments(); ++i) { std::string name; - if (auto a = dynamic_cast(ln->input(i)->origin())) + if (auto graphImport = dynamic_cast(ln->input(i)->origin())) { - if (auto ip = dynamic_cast(&a->port())) - { - name = ip->name(); - } + name = graphImport->Name(); } else { diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 52c457f3c..599947172 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -62,9 +62,9 @@ convert_prints(llvm::RvsdgModule & rm) // TODO: make this less hacky by using the correct state types auto fct = llvm::FunctionType::Create({ rvsdg::bittype::Create(64), rvsdg::bittype::Create(64) }, {}); - llvm::impport imp(fct, "printnode", llvm::linkage::external_linkage); - auto printf = graph.add_import(imp); - convert_prints(root, printf, fct); + auto & printf = + llvm::GraphImport::Create(graph, fct, "printnode", llvm::linkage::external_linkage); + convert_prints(root, &printf, fct); } jlm::rvsdg::output * diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 020e880f4..5fc17f2f1 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -94,11 +94,11 @@ instrument_ref(llvm::RvsdgModule & rm) llvm::iostatetype::Create(), llvm::MemoryStateType::Create() }, { llvm::iostatetype::Create(), llvm::MemoryStateType::Create() }); - jlm::llvm::impport load_imp( + auto & reference_load = llvm::GraphImport::Create( + graph, loadFunctionType, "reference_load", - jlm::llvm::linkage::external_linkage); - auto reference_load = graph.add_import(load_imp); + llvm::linkage::external_linkage); // addr, data, width, memstate auto storeFunctionType = jlm::llvm::FunctionType::Create( { jlm::llvm::PointerType::Create(), @@ -107,11 +107,11 @@ instrument_ref(llvm::RvsdgModule & rm) llvm::iostatetype::Create(), jlm::llvm::MemoryStateType::Create() }, { llvm::iostatetype::Create(), jlm::llvm::MemoryStateType::Create() }); - jlm::llvm::impport store_imp( + auto & reference_store = llvm::GraphImport::Create( + graph, storeFunctionType, "reference_store", - jlm::llvm::linkage::external_linkage); - auto reference_store = graph.add_import(store_imp); + llvm::linkage::external_linkage); // addr, size, memstate auto allocaFunctionType = jlm::llvm::FunctionType::Create( { jlm::llvm::PointerType::Create(), @@ -119,20 +119,20 @@ instrument_ref(llvm::RvsdgModule & rm) llvm::iostatetype::Create(), jlm::llvm::MemoryStateType::Create() }, { llvm::iostatetype::Create(), jlm::llvm::MemoryStateType::Create() }); - jlm::llvm::impport alloca_imp( + auto & reference_alloca = llvm::GraphImport::Create( + graph, allocaFunctionType, "reference_alloca", - jlm::llvm::linkage::external_linkage); - auto reference_alloca = graph.add_import(alloca_imp); + llvm::linkage::external_linkage); instrument_ref( root, newLambda->subregion()->argument(ioStateArgumentIndex), - reference_load, + &reference_load, loadFunctionType, - reference_store, + &reference_store, storeFunctionType, - reference_alloca, + &reference_alloca, allocaFunctionType); } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 67585a3c3..a9a3519ba 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -170,11 +170,8 @@ get_impport_function_name(jlm::rvsdg::input * input) { auto traced = trace_call(input); JLM_ASSERT(traced); - auto arg = dynamic_cast(traced); - JLM_ASSERT(arg); - auto ip = dynamic_cast(&arg->port()); - JLM_ASSERT(ip); - return ip->name(); + auto arg = jlm::util::AssertedCast(traced); + return arg->Name(); } // trace function ptr to its call @@ -725,15 +722,11 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) { JLM_ASSERT(cvarg->nusers() == 0); auto cvip = cvarg->input(); - auto imp = cvip->origin(); newLambda->subregion()->RemoveArgument(cvarg->index()); // TODO: work around const newLambda->RemoveInput(cvip->index()); - auto imparg = dynamic_cast(imp); - JLM_ASSERT(imparg); - auto impprt = dynamic_cast(&imparg->port()); - JLM_ASSERT(impprt); - root->RemoveArgument(imparg->index()); + auto graphImport = util::AssertedCast(cvip->origin()); + root->RemoveArgument(graphImport->index()); } } } diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index f303b6ea5..66c533430 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -151,16 +151,14 @@ inline_calls(jlm::rvsdg::region * region) auto so = dynamic_cast(traced); if (!so) { - auto arg = dynamic_cast(traced); - auto ip = dynamic_cast(&arg->port()); - if (ip) + if (auto graphImport = dynamic_cast(traced)) { - if (ip->name().rfind("decouple_", 0) == 0) + if (graphImport->Name().rfind("decouple_", 0) == 0) { // can't inline pseudo functions used for decoupling continue; } - throw jlm::util::error("can not inline external function " + ip->name()); + throw jlm::util::error("can not inline external function " + graphImport->Name()); } } JLM_ASSERT(rvsdg::is(so->node())); @@ -346,10 +344,13 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) if (!orig_node_output) { // handle decouple stuff - auto arg = dynamic_cast(ln->input(i)->origin()); - auto ip = dynamic_cast(&arg->port()); - auto new_arg = rhls->Rvsdg().add_import(*ip); - smap.insert(ln->input(i)->origin(), new_arg); + auto oldGraphImport = dynamic_cast(ln->input(i)->origin()); + auto & newGraphImport = llvm::GraphImport::Create( + rhls->Rvsdg(), + oldGraphImport->ValueType(), + oldGraphImport->Name(), + oldGraphImport->Linkage()); + smap.insert(ln->input(i)->origin(), &newGraphImport); continue; } auto orig_node = orig_node_output->node(); @@ -366,12 +367,12 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) } std::cout << "delta node " << odn->name() << ": " << odn->type().debug_string() << "\n"; // add import for delta to rhls - llvm::impport im(odn->Type(), odn->name(), llvm::linkage::external_linkage); - // JLM_ASSERT(im.name()==odn->name()); - auto arg = rhls->Rvsdg().add_import(im); - auto tmp = dynamic_cast(&arg->port()); - JLM_ASSERT(tmp && tmp->name() == odn->name()); - smap.insert(ln->input(i)->origin(), arg); + auto & graphImport = llvm::GraphImport::Create( + rhls->Rvsdg(), + odn->Type(), + odn->name(), + llvm::linkage::external_linkage); + smap.insert(ln->input(i)->origin(), &graphImport); // add export for delta to rm // TODO: check if not already exported and maybe adjust linkage? rm.Rvsdg().add_export(odn->output(), { odn->output()->Type(), odn->name() }); @@ -390,12 +391,12 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) nullptr, new_ln->output()->Type()); // add function as input to rm and remove it - llvm::impport im( + auto & graphImport = llvm::GraphImport::Create( + rm.Rvsdg(), ln->Type(), ln->name(), llvm::linkage::external_linkage); // TODO: change linkage? - auto arg = rm.Rvsdg().add_import(im); - ln->output()->divert_users(arg); + ln->output()->divert_users(&graphImport); remove(ln); std::cout << "function " << new_ln->name() << " extracted for HLS\n"; return rhls; @@ -451,9 +452,10 @@ dump_ref(llvm::RvsdgModule & rhls, std::string & path) instrument_ref(*reference); for (size_t i = 0; i < reference->Rvsdg().root()->narguments(); ++i) { - auto arg = reference->Rvsdg().root()->argument(i); - auto imp = dynamic_cast(&arg->port()); - std::cout << "impport " << imp->name() << ": " << imp->type().debug_string() << "\n"; + auto graphImport = + util::AssertedCast(reference->Rvsdg().root()->argument(i)); + std::cout << "impport " << graphImport->Name() << ": " << graphImport->type().debug_string() + << "\n"; } ::llvm::LLVMContext ctx; jlm::util::StatisticsCollector statisticsCollector; diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index e5ce80c90..ca7f8ac4e 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -55,16 +55,9 @@ namespace rvsdg2jlm { static std::shared_ptr -is_function_import(const rvsdg::argument * argument) +is_function_import(const llvm::GraphImport * graphImport) { - JLM_ASSERT(argument->region()->graph()->root() == argument->region()); - - if (auto rvsdgImport = dynamic_cast(&argument->port())) - { - return std::dynamic_pointer_cast(rvsdgImport->Type()); - } - - return {}; + return std::dynamic_pointer_cast(graphImport->ValueType()); } static std::unique_ptr @@ -556,20 +549,24 @@ convert_imports(const rvsdg::graph & graph, ipgraph_module & im, context & ctx) for (size_t n = 0; n < graph.root()->narguments(); n++) { - auto argument = graph.root()->argument(n); - auto import = static_cast(&argument->port()); - if (auto ftype = is_function_import(argument)) + auto graphImport = util::AssertedCast(graph.root()->argument(n)); + if (auto ftype = is_function_import(graphImport)) { - auto f = function_node::create(ipg, import->name(), ftype, import->linkage()); + auto f = function_node::create(ipg, graphImport->Name(), ftype, graphImport->Linkage()); auto v = im.create_variable(f); - ctx.insert(argument, v); + ctx.insert(graphImport, v); } else { - auto dnode = - data_node::Create(ipg, import->name(), import->Type(), import->linkage(), "", false); + auto dnode = data_node::Create( + ipg, + graphImport->Name(), + graphImport->ValueType(), + graphImport->Linkage(), + "", + false); auto v = im.create_global_value(dnode); - ctx.insert(argument, v); + ctx.insert(graphImport, v); } } } diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 897703677..6abd1cb7e 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -988,8 +988,11 @@ ConvertFunctionNode( */ if (functionNode.cfg() == nullptr) { - impport port(functionNode.GetFunctionType(), functionNode.name(), functionNode.linkage()); - return region.graph()->add_import(port); + return &GraphImport::Create( + *region.graph(), + functionNode.GetFunctionType(), + functionNode.name(), + functionNode.linkage()); } return ConvertControlFlowGraph(functionNode, regionalizedVariableMap, statisticsCollector); @@ -1026,8 +1029,11 @@ ConvertDataNode( */ if (!dataNodeInitialization) { - impport port(dataNode.GetValueType(), dataNode.name(), dataNode.linkage()); - return region.graph()->add_import(port); + return &GraphImport::Create( + *region.graph(), + dataNode.GetValueType(), + dataNode.name(), + dataNode.linkage()); } /* diff --git a/jlm/llvm/ir/RvsdgModule.cpp b/jlm/llvm/ir/RvsdgModule.cpp index ea3178d1d..7d69f6b94 100644 --- a/jlm/llvm/ir/RvsdgModule.cpp +++ b/jlm/llvm/ir/RvsdgModule.cpp @@ -8,22 +8,10 @@ namespace jlm::llvm { -/* impport class */ - -impport::~impport() -{} - -bool -impport::operator==(const port & other) const noexcept -{ - auto p = dynamic_cast(&other); - return p && p->type() == type() && p->name() == name() && p->linkage() == linkage(); -} - -std::unique_ptr -impport::copy() const +GraphImport & +GraphImport::Copy(rvsdg::region & region, rvsdg::structural_input * input) { - return std::unique_ptr(new impport(*this)); + return GraphImport::Create(*region.graph(), ValueType(), Name(), Linkage()); } } diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 1b941d9d5..8891b383a 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -14,70 +14,57 @@ namespace jlm::llvm { -/* impport class */ - -class impport final : public jlm::rvsdg::impport +/** + * Represents an import into the RVSDG of an external entity. + * It is used to model LLVM module declarations. + */ +class GraphImport final : public rvsdg::GraphImport { -public: - virtual ~impport(); - - impport( - std::shared_ptr valueType, - const std::string & name, - const linkage & lnk) - : jlm::rvsdg::impport(PointerType::Create(), name), - linkage_(lnk), +private: + GraphImport( + rvsdg::graph & graph, + std::shared_ptr valueType, + std::string name, + llvm::linkage linkage) + : rvsdg::GraphImport(graph, PointerType::Create(), std::move(name)), + Linkage_(std::move(linkage)), ValueType_(std::move(valueType)) {} - impport(const impport & other) = default; - - impport(impport && other) = default; - - impport & - operator=(const impport &) = delete; - - impport & - operator=(impport &&) = delete; - - const jlm::llvm::linkage & - linkage() const noexcept +public: + [[nodiscard]] const linkage & + Linkage() const noexcept { - return linkage_; + return Linkage_; } [[nodiscard]] const std::shared_ptr & - Type() const noexcept + ValueType() const noexcept { return ValueType_; } - [[nodiscard]] const jlm::rvsdg::valuetype & - GetValueType() const noexcept + GraphImport & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + + static GraphImport & + Create( + rvsdg::graph & graph, + std::shared_ptr valueType, + std::string name, + llvm::linkage linkage) { - return *ValueType_; + auto graphImport = + new GraphImport(graph, std::move(valueType), std::move(name), std::move(linkage)); + graph.root()->append_argument(graphImport); + return *graphImport; } - virtual bool - operator==(const port &) const noexcept override; - - virtual std::unique_ptr - copy() const override; - private: - jlm::llvm::linkage linkage_; - std::shared_ptr ValueType_; + llvm::linkage Linkage_; + std::shared_ptr ValueType_; }; -static inline bool -is_import(const jlm::rvsdg::output * output) -{ - auto graph = output->region()->graph(); - - auto argument = dynamic_cast(output); - return argument && argument->region() == graph->root(); -} - static inline bool is_export(const jlm::rvsdg::input * input) { diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 703cea038..6bfbec681 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -163,7 +163,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) if (is(origin)) return origin; - if (is_import(origin)) + if (is(origin)) return origin; if (is(rvsdg::node_output::node(origin))) diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index dbd5d670d..cb4ba2a0f 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -193,7 +193,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) Context_->MarkAlive(output); - if (is_import(&output)) + if (is(&output)) { return; } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 479d00840..954059748 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -998,7 +998,7 @@ Andersen::AnalyzeRvsdg(const rvsdg::graph & graph) // These symbols can either be global variables or functions for (size_t n = 0; n < rootRegion.narguments(); n++) { - auto & argument = *rootRegion.argument(n); + auto & argument = *util::AssertedCast(rootRegion.argument(n)); // Only care about imported pointer values if (!IsOrContainsPointerType(argument.type())) diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index b9c3214f6..1bea24408 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -151,7 +151,7 @@ PointerObjectSet::GetLambdaNodeFromFunctionMemoryObject(PointerObjectIndex index } PointerObjectIndex -PointerObjectSet::CreateImportMemoryObject(const rvsdg::argument & importNode) +PointerObjectSet::CreateImportMemoryObject(const GraphImport & importNode) { JLM_ASSERT(ImportMap_.count(&importNode) == 0); auto importMemoryObject = AddPointerObject(PointerObjectKind::ImportMemoryObject); @@ -192,7 +192,7 @@ PointerObjectSet::GetFunctionMap() const noexcept return FunctionMap_; } -const std::unordered_map & +const std::unordered_map & PointerObjectSet::GetImportMap() const noexcept { return ImportMap_; diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index 691ce65c5..d85f33760 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -154,7 +155,7 @@ class PointerObjectSet final util::BijectiveMap FunctionMap_; - std::unordered_map ImportMap_; + std::unordered_map ImportMap_; /** * Internal helper function for adding PointerObjects, use the Create* methods instead @@ -260,7 +261,7 @@ class PointerObjectSet final GetLambdaNodeFromFunctionMemoryObject(PointerObjectIndex index) const; [[nodiscard]] PointerObjectIndex - CreateImportMemoryObject(const rvsdg::argument & importNode); + CreateImportMemoryObject(const GraphImport & importNode); const std::unordered_map & GetRegisterMap() const noexcept; @@ -277,7 +278,7 @@ class PointerObjectSet final const util::BijectiveMap & GetFunctionMap() const noexcept; - const std::unordered_map & + const std::unordered_map & GetImportMap() const noexcept; /** diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp index 9f8e87d33..15a5588ba 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp @@ -490,10 +490,9 @@ PointsToGraph::RegisterNode::ToString(const rvsdg::output & output) if (node != nullptr) return util::strfmt(node->operation().debug_string(), ":a", output.index()); - if (is_import(&output)) + if (auto graphImport = dynamic_cast(&output)) { - auto port = util::AssertedCast(&output.port()); - return util::strfmt("import:", port->name()); + return util::strfmt("import:", graphImport->Name()); } return "RegisterNode"; @@ -555,8 +554,7 @@ PointsToGraph::ImportNode::~ImportNode() noexcept = default; std::string PointsToGraph::ImportNode::DebugString() const { - auto port = util::AssertedCast(&GetArgument().port()); - return port->name(); + return GetArgument().Name(); } PointsToGraph::UnknownMemoryNode::~UnknownMemoryNode() noexcept = default; diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp index 768b281ca..32d94c82e 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp @@ -753,32 +753,30 @@ class PointsToGraph::ImportNode final : public PointsToGraph::MemoryNode ~ImportNode() noexcept override; private: - ImportNode(PointsToGraph & pointsToGraph, const jlm::rvsdg::argument & argument) + ImportNode(PointsToGraph & pointsToGraph, const GraphImport & graphImport) : MemoryNode(pointsToGraph), - Argument_(&argument) - { - JLM_ASSERT(dynamic_cast(&argument.port())); - } + GraphImport_(&graphImport) + {} public: - const jlm::rvsdg::argument & + const GraphImport & GetArgument() const noexcept { - return *Argument_; + return *GraphImport_; } std::string DebugString() const override; static PointsToGraph::ImportNode & - Create(PointsToGraph & pointsToGraph, const jlm::rvsdg::argument & argument) + Create(PointsToGraph & pointsToGraph, const GraphImport & argument) { auto n = std::unique_ptr(new ImportNode(pointsToGraph, argument)); return pointsToGraph.AddImportNode(std::move(n)); } private: - const jlm::rvsdg::argument * Argument_; + const GraphImport * GraphImport_; }; /** \brief PointsTo graph unknown node diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index fff523ccc..314a23184 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -251,10 +251,9 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":out", index); } - if (is_import(Output_)) + if (auto graphImport = dynamic_cast(Output_)) { - auto import = jlm::util::AssertedCast(&Output_->port()); - return jlm::util::strfmt("imp:", import->name()); + return jlm::util::strfmt("imp:", graphImport->Name()); } if (is(Output_)) @@ -467,16 +466,15 @@ class ImportLocation final : public MemoryLocation { ~ImportLocation() override = default; - ImportLocation(const rvsdg::argument & argument, PointsToFlags pointsToFlags) + ImportLocation(const GraphImport & graphImport, PointsToFlags pointsToFlags) : MemoryLocation(), - Argument_(argument) + Argument_(graphImport) { - JLM_ASSERT(dynamic_cast(&argument.port())); SetPointsToFlags(pointsToFlags); } public: - [[nodiscard]] const rvsdg::argument & + [[nodiscard]] const GraphImport & GetArgument() const noexcept { return Argument_; @@ -489,24 +487,23 @@ class ImportLocation final : public MemoryLocation } static std::unique_ptr - Create(const rvsdg::argument & argument) + Create(const GraphImport & graphImport) { - JLM_ASSERT(is(argument.type())); + JLM_ASSERT(is(graphImport.type())); // If the imported memory location is a pointer type or contains a pointer type, then these // pointers can point to values that escaped this module. - auto & rvsdgImport = *util::AssertedCast(&argument.port()); - bool isOrContainsPointerType = IsOrContains(rvsdgImport.GetValueType()); + bool isOrContainsPointerType = IsOrContains(*graphImport.ValueType()); return std::unique_ptr(new ImportLocation( - argument, + graphImport, isOrContainsPointerType ? PointsToFlags::PointsToExternalMemory | PointsToFlags::PointsToEscapedMemory : PointsToFlags::PointsToNone)); } private: - const rvsdg::argument & Argument_; + const GraphImport & Argument_; }; /** @@ -606,9 +603,9 @@ class Steensgaard::Context final } Location & - InsertImportLocation(const jlm::rvsdg::argument & argument) + InsertImportLocation(const GraphImport & graphImport) { - Locations_.push_back(ImportLocation::Create(argument)); + Locations_.push_back(ImportLocation::Create(graphImport)); auto location = Locations_.back().get(); DisjointLocationSet_.insert(location); @@ -1760,12 +1757,12 @@ Steensgaard::AnalyzeImports(const rvsdg::graph & graph) auto rootRegion = graph.root(); for (size_t n = 0; n < rootRegion->narguments(); n++) { - auto & argument = *rootRegion->argument(n); + auto & graphImport = *util::AssertedCast(rootRegion->argument(n)); - if (HasOrContainsPointerType(argument)) + if (HasOrContainsPointerType(graphImport)) { - auto & importLocation = Context_->InsertImportLocation(argument); - auto & registerLocation = Context_->GetOrInsertRegisterLocation(argument); + auto & importLocation = Context_->InsertImportLocation(graphImport); + auto & registerLocation = Context_->GetOrInsertRegisterLocation(graphImport); registerLocation.SetPointsTo(importLocation); } } diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 1e7993cd7..bb868f466 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -14,24 +14,14 @@ namespace jlm::rvsdg { -/* impport */ - -impport::~impport() +GraphImport::GraphImport( + rvsdg::graph & graph, + std::shared_ptr type, + std::string name) + : argument(graph.root(), nullptr, std::move(type)), + Name_(std::move(name)) {} -bool -impport::operator==(const port & other) const noexcept -{ - auto p = dynamic_cast(&other); - return p && p->type() == type() && p->name() == name(); -} - -std::unique_ptr -impport::copy() const -{ - return std::unique_ptr(new impport(*this)); -} - /* expport */ expport::~expport() diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index 2573c525d..5524236c0 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -22,48 +22,23 @@ namespace jlm::rvsdg { -/* impport class */ - -class impport : public port +/** + * Represents an import into the RVSDG of an external entity. + */ +class GraphImport : public argument { -public: - virtual ~impport(); - - impport(std::shared_ptr type, const std::string & name) - : port(std::move(type)), - name_(name) - {} - - impport(const impport & other) - : port(other), - name_(other.name_) - {} - - impport(impport && other) - : port(other), - name_(std::move(other.name_)) - {} - - impport & - operator=(const impport &) = delete; +protected: + GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name); - impport & - operator=(impport &&) = delete; - - const std::string & - name() const noexcept +public: + [[nodiscard]] const std::string & + Name() const noexcept { - return name_; + return Name_; } - virtual bool - operator==(const port &) const noexcept override; - - virtual std::unique_ptr - copy() const override; - private: - std::string name_; + std::string Name_; }; /* expport class */ @@ -144,12 +119,6 @@ class graph jlm::rvsdg::node_normal_form * node_normal_form(const std::type_info & type) noexcept; - inline jlm::rvsdg::argument * - add_import(const impport & port) - { - return argument::create(root(), nullptr, port); - } - inline jlm::rvsdg::input * add_export(jlm::rvsdg::output * operand, const expport & port) { diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index b4570eb33..937cc1838 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1129,7 +1129,7 @@ ExternalCallTest1::SetupRvsdg() auto SetupFunctionGDeclaration = [&]() { - return rvsdg->add_import(impport(functionGType, "g", linkage::external_linkage)); + return &GraphImport::Create(*rvsdg, functionGType, "g", linkage::external_linkage); }; auto SetupFunctionF = [&](jlm::rvsdg::argument * functionG) @@ -1228,10 +1228,10 @@ ExternalCallTest2::SetupRvsdg() }); auto llvmLifetimeStart = - rvsdg.add_import(impport(pointerType, "llvm.lifetime.start.p0", linkage::external_linkage)); + &GraphImport::Create(rvsdg, pointerType, "llvm.lifetime.start.p0", linkage::external_linkage); auto llvmLifetimeEnd = - rvsdg.add_import(impport(pointerType, "llvm.lifetime.end.p0", linkage::external_linkage)); - ExternalFArgument_ = rvsdg.add_import(impport(pointerType, "f", linkage::external_linkage)); + &GraphImport::Create(rvsdg, pointerType, "llvm.lifetime.end.p0", linkage::external_linkage); + ExternalFArgument_ = &GraphImport::Create(rvsdg, pointerType, "f", linkage::external_linkage); // Setup function g() LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); @@ -1964,10 +1964,16 @@ ImportTest::SetupRvsdg() return std::make_tuple(lambdaOutput, &call); }; - auto d1 = - graph->add_import(impport(jlm::rvsdg::bittype::Create(32), "d1", linkage::external_linkage)); - auto d2 = - graph->add_import(impport(jlm::rvsdg::bittype::Create(32), "d2", linkage::external_linkage)); + auto d1 = &GraphImport::Create( + *graph, + jlm::rvsdg::bittype::Create(32), + "d1", + linkage::external_linkage); + auto d2 = &GraphImport::Create( + *graph, + jlm::rvsdg::bittype::Create(32), + "d2", + linkage::external_linkage); auto f1 = SetupF1(d1); auto [f2, callF1] = SetupF2(f1, d1, d2); @@ -2738,14 +2744,20 @@ EscapedMemoryTest2::SetupRvsdg() auto SetupExternalFunction1Declaration = [&]() { - return rvsdg->add_import( - impport(externalFunction1Type, "ExternalFunction1", linkage::external_linkage)); + return &GraphImport::Create( + *rvsdg, + externalFunction1Type, + "ExternalFunction1", + linkage::external_linkage); }; auto SetupExternalFunction2Declaration = [&]() { - return rvsdg->add_import( - impport(externalFunction2Type, "ExternalFunction2", linkage::external_linkage)); + return &GraphImport::Create( + *rvsdg, + externalFunction2Type, + "ExternalFunction2", + linkage::external_linkage); }; auto SetupReturnAddressFunction = [&]() @@ -2904,8 +2916,11 @@ EscapedMemoryTest3::SetupRvsdg() auto SetupExternalFunctionDeclaration = [&]() { - return rvsdg->add_import( - impport(externalFunctionType, "externalFunction", linkage::external_linkage)); + return &GraphImport::Create( + *rvsdg, + externalFunctionType, + "externalFunction", + linkage::external_linkage); }; auto SetupGlobal = [&]() @@ -3391,8 +3406,11 @@ AllMemoryNodesTest::SetupRvsdg() nf->set_mutable(false); // Create imported symbol "imported" - Import_ = graph->add_import( - impport(jlm::rvsdg::bittype::Create(32), "imported", linkage::external_linkage)); + Import_ = &GraphImport::Create( + *graph, + rvsdg::bittype::Create(32), + "imported", + linkage::external_linkage); // Create global variable "global" Delta_ = delta::node::Create( @@ -3725,7 +3743,7 @@ VariadicFunctionTest1::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }); // Setup h() - ImportH_ = rvsdg.add_import(impport(lambdaHType, "h", linkage::external_linkage)); + ImportH_ = &GraphImport::Create(rvsdg, lambdaHType, "h", linkage::external_linkage); // Setup f() { @@ -3829,12 +3847,13 @@ VariadicFunctionTest2::SetupRvsdg() { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto llvmLifetimeStart = - rvsdg.add_import(impport(pointerType, "llvm.lifetime.start.p0", linkage::external_linkage)); + &GraphImport::Create(rvsdg, pointerType, "llvm.lifetime.start.p0", linkage::external_linkage); auto llvmLifetimeEnd = - rvsdg.add_import(impport(pointerType, "llvm.lifetime.end.p0", linkage::external_linkage)); + &GraphImport::Create(rvsdg, pointerType, "llvm.lifetime.end.p0", linkage::external_linkage); auto llvmVaStart = - rvsdg.add_import(impport(pointerType, "llvm.va_start", linkage::external_linkage)); - auto llvmVaEnd = rvsdg.add_import(impport(pointerType, "llvm.va_end", linkage::external_linkage)); + &GraphImport::Create(rvsdg, pointerType, "llvm.va_start", linkage::external_linkage); + auto llvmVaEnd = + &GraphImport::Create(rvsdg, pointerType, "llvm.va_end", linkage::external_linkage); // Setup function fst() { diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index 96a3e3956..8486d4c0c 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -2108,7 +2108,7 @@ class AllMemoryNodesTest final : public RvsdgTest return *Delta_->output(); } - [[nodiscard]] const jlm::rvsdg::argument & + [[nodiscard]] const llvm::GraphImport & GetImportOutput() const noexcept { JLM_ASSERT(Import_); @@ -2163,7 +2163,7 @@ class AllMemoryNodesTest final : public RvsdgTest jlm::llvm::delta::node * Delta_ = {}; - jlm::rvsdg::argument * Import_ = {}; + jlm::llvm::GraphImport * Import_ = {}; jlm::llvm::lambda::node * Lambda_ = {}; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 70e0b7789..d9f98e465 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -25,10 +25,10 @@ TestGamma() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto p = rvsdg.add_import({ jlm::rvsdg::ctltype::Create(2), "p" }); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); - auto z = rvsdg.add_import({ valueType, "z" }); + auto p = &jlm::tests::GraphImport::Create(rvsdg, jlm::rvsdg::ctltype::Create(2), "p"); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); + auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); auto gammaNode = jlm::rvsdg::gamma_node::create(p, 2); @@ -84,10 +84,10 @@ TestTheta() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto p = rvsdg.add_import({ jlm::rvsdg::ctltype::Create(2), "p" }); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); - auto z = rvsdg.add_import({ valueType, "z" }); + auto p = &jlm::tests::GraphImport::Create(rvsdg, jlm::rvsdg::ctltype::Create(2), "p"); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); + auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); auto thetaNode = jlm::rvsdg::theta_node::create(rvsdg.root()); @@ -134,7 +134,7 @@ TestLambda() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto x = rvsdg.add_import({ valueType, "x" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp index 5a65641b2..633657d8c 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp @@ -21,13 +21,13 @@ test() { using namespace jlm::llvm; + // Arrange auto vt = jlm::tests::valuetype::Create(); auto pt = PointerType::Create(); RvsdgModule rm(jlm::util::filepath(""), "", ""); - /* setup graph */ - auto imp = rm.Rvsdg().add_import(impport(vt, "", linkage::external_linkage)); + auto imp = &GraphImport::Create(rm.Rvsdg(), vt, "", linkage::external_linkage); phi::builder pb; pb.begin(rm.Rvsdg().root()); diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index 365fcd8ea..09eb7e382 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -56,11 +56,11 @@ TestCopy() auto pointerType = PointerType::Create(); jlm::rvsdg::graph graph; - auto address1 = graph.add_import({ pointerType, "address1" }); - auto memoryState1 = graph.add_import({ memoryType, "memoryState1" }); + auto address1 = &jlm::tests::GraphImport::Create(graph, pointerType, "address1"); + auto memoryState1 = &jlm::tests::GraphImport::Create(graph, memoryType, "memoryState1"); - auto address2 = graph.add_import({ pointerType, "address2" }); - auto memoryState2 = graph.add_import({ memoryType, "memoryState2" }); + auto address2 = &jlm::tests::GraphImport::Create(graph, pointerType, "address2"); + auto memoryState2 = &jlm::tests::GraphImport::Create(graph, memoryType, "memoryState2"); auto loadResults = LoadNonVolatileNode::Create(address1, { memoryState1 }, valueType, 4); @@ -89,7 +89,7 @@ TestLoadAllocaReduction() nf->set_mutable(false); nf->set_load_alloca_reducible(false); - auto size = graph.add_import({ bt, "v" }); + auto size = &jlm::tests::GraphImport::Create(graph, bt, "v"); auto alloca1 = alloca_op::create(bt, size, 4); auto alloca2 = alloca_op::create(bt, size, 4); @@ -132,8 +132,8 @@ TestMultipleOriginReduction() nf->set_mutable(false); nf->set_multiple_origin_reducible(false); - auto a = graph.add_import({ pt, "a" }); - auto s = graph.add_import({ mt, "s" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); auto load = LoadNonVolatileNode::Create(a, { s, s, s, s }, vt, 4)[0]; @@ -167,7 +167,7 @@ TestLoadStoreStateReduction() nf->set_mutable(false); nf->set_load_store_state_reducible(false); - auto size = graph.add_import({ bt, "v" }); + auto size = &jlm::tests::GraphImport::Create(graph, bt, "v"); auto alloca1 = alloca_op::create(bt, size, 4); auto alloca2 = alloca_op::create(bt, size, 4); @@ -215,9 +215,9 @@ TestLoadStoreReduction() nf->set_mutable(false); nf->set_load_store_reducible(false); - auto a = graph.add_import({ pt, "address" }); - auto v = graph.add_import({ vt, "value" }); - auto s = graph.add_import({ mt, "state" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "address"); + auto v = &jlm::tests::GraphImport::Create(graph, vt, "value"); + auto s = &jlm::tests::GraphImport::Create(graph, mt, "state"); auto s1 = StoreNonVolatileNode::Create(a, v, { s }, 4)[0]; auto load = LoadNonVolatileNode::Create(a, { s1 }, vt, 4); @@ -254,13 +254,13 @@ TestLoadLoadReduction() auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); - auto a1 = graph.add_import({ pt, "a1" }); - auto a2 = graph.add_import({ pt, "a2" }); - auto a3 = graph.add_import({ pt, "a3" }); - auto a4 = graph.add_import({ pt, "a4" }); - auto v1 = graph.add_import({ vt, "v1" }); - auto s1 = graph.add_import({ mt, "s1" }); - auto s2 = graph.add_import({ mt, "s2" }); + auto a1 = &jlm::tests::GraphImport::Create(graph, pt, "a1"); + auto a2 = &jlm::tests::GraphImport::Create(graph, pt, "a2"); + auto a3 = &jlm::tests::GraphImport::Create(graph, pt, "a3"); + auto a4 = &jlm::tests::GraphImport::Create(graph, pt, "a4"); + auto v1 = &jlm::tests::GraphImport::Create(graph, vt, "v1"); + auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); auto st1 = StoreNonVolatileNode::Create(a1, v1, { s1 }, 4); auto ld1 = LoadNonVolatileNode::Create(a2, { s1 }, vt, 4); @@ -408,13 +408,13 @@ NodeCopy() auto valueType = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto & address1 = *graph.add_import({ pointerType, "address1" }); - auto & iOState1 = *graph.add_import({ iOStateType, "iOState1" }); - auto & memoryState1 = *graph.add_import({ memoryType, "memoryState1" }); + auto & address1 = jlm::tests::GraphImport::Create(graph, pointerType, "address1"); + auto & iOState1 = jlm::tests::GraphImport::Create(graph, iOStateType, "iOState1"); + auto & memoryState1 = jlm::tests::GraphImport::Create(graph, memoryType, "memoryState1"); - auto & address2 = *graph.add_import({ pointerType, "address2" }); - auto & iOState2 = *graph.add_import({ iOStateType, "iOState2" }); - auto & memoryState2 = *graph.add_import({ memoryType, "memoryState2" }); + auto & address2 = jlm::tests::GraphImport::Create(graph, pointerType, "address2"); + auto & iOState2 = jlm::tests::GraphImport::Create(graph, iOStateType, "iOState2"); + auto & memoryState2 = jlm::tests::GraphImport::Create(graph, memoryType, "memoryState2"); auto & loadNode = LoadVolatileNode::CreateNode(address1, iOState1, { &memoryState1 }, valueType, 4); diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index 287b40b3e..61913e02e 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -141,15 +141,15 @@ StoreVolatileNodeCopy() auto valueType = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto & address1 = *graph.add_import({ pointerType, "address1" }); - auto & value1 = *graph.add_import({ valueType, "value1" }); - auto & ioState1 = *graph.add_import({ ioStateType, "ioState1" }); - auto & memoryState1 = *graph.add_import({ memoryType, "memoryState1" }); + auto & address1 = jlm::tests::GraphImport::Create(graph, pointerType, "address1"); + auto & value1 = jlm::tests::GraphImport::Create(graph, valueType, "value1"); + auto & ioState1 = jlm::tests::GraphImport::Create(graph, ioStateType, "ioState1"); + auto & memoryState1 = jlm::tests::GraphImport::Create(graph, memoryType, "memoryState1"); - auto & address2 = *graph.add_import({ pointerType, "address2" }); - auto & value2 = *graph.add_import({ valueType, "value2" }); - auto & ioState2 = *graph.add_import({ ioStateType, "ioState2" }); - auto & memoryState2 = *graph.add_import({ memoryType, "memoryState2" }); + auto & address2 = jlm::tests::GraphImport::Create(graph, pointerType, "address2"); + auto & value2 = jlm::tests::GraphImport::Create(graph, valueType, "value2"); + auto & ioState2 = jlm::tests::GraphImport::Create(graph, ioStateType, "ioState2"); + auto & memoryState2 = jlm::tests::GraphImport::Create(graph, memoryType, "memoryState2"); auto & storeNode = StoreVolatileNode::CreateNode(address1, value1, ioState1, { &memoryState1 }, 4); @@ -182,13 +182,13 @@ TestCopy() auto memoryStateType = MemoryStateType::Create(); jlm::rvsdg::graph graph; - auto address1 = graph.add_import({ pointerType, "address1" }); - auto value1 = graph.add_import({ valueType, "value1" }); - auto memoryState1 = graph.add_import({ memoryStateType, "state1" }); + auto address1 = &jlm::tests::GraphImport::Create(graph, pointerType, "address1"); + auto value1 = &jlm::tests::GraphImport::Create(graph, valueType, "value1"); + auto memoryState1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state1"); - auto address2 = graph.add_import({ pointerType, "address2" }); - auto value2 = graph.add_import({ valueType, "value2" }); - auto memoryState2 = graph.add_import({ memoryStateType, "state2" }); + auto address2 = &jlm::tests::GraphImport::Create(graph, pointerType, "address2"); + auto value2 = &jlm::tests::GraphImport::Create(graph, valueType, "value2"); + auto memoryState2 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state2"); auto storeResults = StoreNonVolatileNode::Create(address1, value1, { memoryState1 }, 4); @@ -219,11 +219,11 @@ TestStoreMuxReduction() snf->set_mutable(false); snf->set_store_mux_reducible(false); - auto a = graph.add_import({ pt, "a" }); - auto v = graph.add_import({ vt, "v" }); - auto s1 = graph.add_import({ mt, "s1" }); - auto s2 = graph.add_import({ mt, "s2" }); - auto s3 = graph.add_import({ mt, "s3" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); + auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); + auto s3 = &jlm::tests::GraphImport::Create(graph, mt, "s3"); auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); auto state = StoreNonVolatileNode::Create(a, v, { mux }, 4); @@ -268,9 +268,9 @@ TestMultipleOriginReduction() snf->set_mutable(false); snf->set_multiple_origin_reducible(false); - auto a = graph.add_import({ pt, "a" }); - auto v = graph.add_import({ vt, "v" }); - auto s = graph.add_import({ mt, "s" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); + auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); auto states = StoreNonVolatileNode::Create(a, v, { s, s, s, s }, 4); @@ -307,9 +307,9 @@ TestStoreAllocaReduction() snf->set_mutable(false); snf->set_store_alloca_reducible(false); - auto size = graph.add_import({ bt, "size" }); - auto value = graph.add_import({ vt, "value" }); - auto s = graph.add_import({ mt, "s" }); + auto size = &jlm::tests::GraphImport::Create(graph, bt, "size"); + auto value = &jlm::tests::GraphImport::Create(graph, vt, "value"); + auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); auto alloca1 = alloca_op::create(vt, size, 4); auto alloca2 = alloca_op::create(vt, size, 4); @@ -351,10 +351,10 @@ TestStoreStoreReduction() auto mt = MemoryStateType::Create(); jlm::rvsdg::graph graph; - auto a = graph.add_import({ pt, "address" }); - auto v1 = graph.add_import({ vt, "value" }); - auto v2 = graph.add_import({ vt, "value" }); - auto s = graph.add_import({ mt, "state" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "address"); + auto v1 = &jlm::tests::GraphImport::Create(graph, vt, "value"); + auto v2 = &jlm::tests::GraphImport::Create(graph, vt, "value"); + auto s = &jlm::tests::GraphImport::Create(graph, mt, "state"); auto s1 = StoreNonVolatileNode::Create(a, v1, { s }, 4)[0]; auto s2 = StoreNonVolatileNode::Create(a, v2, { s1 }, 4)[0]; diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 882bf624c..3c4b812a8 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -27,15 +27,15 @@ TestCopy() { valueType, iostatetype::Create(), MemoryStateType::Create() }); jlm::rvsdg::graph rvsdg; - auto function1 = rvsdg.add_import({ PointerType::Create(), "function1" }); - auto value1 = rvsdg.add_import({ valueType, "value1" }); - auto iOState1 = rvsdg.add_import({ iOStateType, "iOState1" }); - auto memoryState1 = rvsdg.add_import({ memoryStateType, "memoryState1" }); + auto function1 = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "function1"); + auto value1 = &jlm::tests::GraphImport::Create(rvsdg, valueType, "value1"); + auto iOState1 = &jlm::tests::GraphImport::Create(rvsdg, iOStateType, "iOState1"); + auto memoryState1 = &jlm::tests::GraphImport::Create(rvsdg, memoryStateType, "memoryState1"); - auto function2 = rvsdg.add_import({ PointerType::Create(), "function2" }); - auto value2 = rvsdg.add_import({ valueType, "value2" }); - auto iOState2 = rvsdg.add_import({ iOStateType, "iOState2" }); - auto memoryState2 = rvsdg.add_import({ memoryStateType, "memoryState2" }); + auto function2 = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "function2"); + auto value2 = &jlm::tests::GraphImport::Create(rvsdg, valueType, "value2"); + auto iOState2 = &jlm::tests::GraphImport::Create(rvsdg, iOStateType, "iOState2"); + auto memoryState2 = &jlm::tests::GraphImport::Create(rvsdg, memoryStateType, "memoryState2"); auto callResults = CallNode::Create(function1, functionType, { value1, iOState1, memoryState1 }); @@ -64,10 +64,10 @@ TestCallNodeAccessors() { valueType, iostatetype::Create(), MemoryStateType::Create() }); jlm::rvsdg::graph rvsdg; - auto f = rvsdg.add_import({ PointerType::Create(), "function" }); - auto v = rvsdg.add_import({ valueType, "value" }); - auto i = rvsdg.add_import({ iOStateType, "IOState" }); - auto m = rvsdg.add_import({ memoryStateType, "memoryState" }); + auto f = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "function"); + auto v = &jlm::tests::GraphImport::Create(rvsdg, valueType, "value"); + auto i = &jlm::tests::GraphImport::Create(rvsdg, iOStateType, "IOState"); + auto m = &jlm::tests::GraphImport::Create(rvsdg, memoryStateType, "memoryState"); // Act auto results = CallNode::Create(f, functionType, { v, i, m }); diff --git a/tests/jlm/llvm/ir/operators/TestFree.cpp b/tests/jlm/llvm/ir/operators/TestFree.cpp index 86019553c..6136bdac5 100644 --- a/tests/jlm/llvm/ir/operators/TestFree.cpp +++ b/tests/jlm/llvm/ir/operators/TestFree.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include @@ -69,9 +70,9 @@ TestRvsdgCreator() // Arrange jlm::rvsdg::graph rvsdg; - auto address = rvsdg.add_import({ PointerType::Create(), "p" }); - auto memoryState = rvsdg.add_import({ MemoryStateType::Create(), "m" }); - auto iOState = rvsdg.add_import({ iostatetype::Create(), "io" }); + auto address = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "p"); + auto memoryState = &jlm::tests::GraphImport::Create(rvsdg, MemoryStateType::Create(), "m"); + auto iOState = &jlm::tests::GraphImport::Create(rvsdg, iostatetype::Create(), "io"); // Act auto freeResults0 = FreeOperation::Create(address, {}, iOState); diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index a2ff63644..7046b526c 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -53,7 +53,7 @@ TestArgumentIterators() } { - auto rvsdgImport = rvsdgModule.Rvsdg().add_import({ vt, "" }); + auto rvsdgImport = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), vt, ""); auto functionType = FunctionType::Create({ vt, vt, vt }, { vt, vt }); @@ -121,7 +121,7 @@ TestRemoveLambdaInputsWhere() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto x = rvsdg.add_import({ valueType, "x" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); @@ -190,7 +190,7 @@ TestPruneLambdaInputs() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto x = rvsdg.add_import({ valueType, "x" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index ab0028083..79593e6c3 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -85,7 +85,7 @@ TestRemovePhiArgumentsWhere() auto valueType = jlm::tests::valuetype::Create(); RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); - auto x = rvsdgModule.Rvsdg().add_import({ valueType, "" }); + auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); phi::builder phiBuilder; phiBuilder.begin(rvsdgModule.Rvsdg().root()); @@ -168,7 +168,7 @@ TestPrunePhiArguments() auto valueType = jlm::tests::valuetype::Create(); RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); - auto x = rvsdgModule.Rvsdg().add_import({ valueType, "" }); + auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); phi::builder phiBuilder; phiBuilder.begin(rvsdgModule.Rvsdg().root()); diff --git a/tests/jlm/llvm/ir/operators/test-delta.cpp b/tests/jlm/llvm/ir/operators/test-delta.cpp index 1ddc4b978..a0f18a5a4 100644 --- a/tests/jlm/llvm/ir/operators/test-delta.cpp +++ b/tests/jlm/llvm/ir/operators/test-delta.cpp @@ -22,7 +22,7 @@ TestDeltaCreation() auto pointerType = PointerType::Create(); RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); - auto imp = rvsdgModule.Rvsdg().add_import({ valueType, "" }); + auto imp = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); auto delta1 = delta::node::Create( rvsdgModule.Rvsdg().root(), @@ -70,7 +70,7 @@ TestRemoveDeltaInputsWhere() auto valueType = jlm::tests::valuetype::Create(); RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); - auto x = rvsdgModule.Rvsdg().add_import({ valueType, "" }); + auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); auto deltaNode = delta::node::Create( rvsdgModule.Rvsdg().root(), @@ -137,7 +137,7 @@ TestPruneDeltaInputs() auto valueType = jlm::tests::valuetype::Create(); RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); - auto x = rvsdgModule.Rvsdg().add_import({ valueType, "" }); + auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); auto deltaNode = delta::node::Create( rvsdgModule.Rvsdg().root(), diff --git a/tests/jlm/llvm/ir/operators/test-sext.cpp b/tests/jlm/llvm/ir/operators/test-sext.cpp index a51396a01..5c99c110b 100644 --- a/tests/jlm/llvm/ir/operators/test-sext.cpp +++ b/tests/jlm/llvm/ir/operators/test-sext.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include @@ -20,7 +21,7 @@ test_bitunary_reduction() auto nf = jlm::llvm::sext_op::normal_form(&graph); nf->set_mutable(false); - auto x = graph.add_import({ bt32, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, bt32, "x"); auto y = jlm::rvsdg::bitnot_op::create(32, x); auto z = jlm::llvm::sext_op::create(64, y); @@ -47,8 +48,8 @@ test_bitbinary_reduction() auto nf = jlm::llvm::sext_op::normal_form(&graph); nf->set_mutable(false); - auto x = graph.add_import({ bt32, "x" }); - auto y = graph.add_import({ bt32, "y" }); + auto x = &jlm::tests::GraphImport::Create(graph, bt32, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, bt32, "y"); auto z = jlm::rvsdg::bitadd_op::create(32, x, y); auto w = jlm::llvm::sext_op::create(64, z); @@ -77,7 +78,7 @@ test_inverse_reduction() auto nf = jlm::llvm::sext_op::normal_form(&graph); nf->set_mutable(false); - auto x = graph.add_import({ bt64, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, bt64, "x"); auto y = jlm::llvm::trunc_op::create(32, x); auto z = jlm::llvm::sext_op::create(64, y); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 4ced963d5..516bc1259 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -33,8 +33,9 @@ TestRoot() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - graph.add_import({ jlm::tests::valuetype::Create(), "x" }); - auto y = graph.add_import({ jlm::tests::valuetype::Create(), "y" }); + + jlm::tests::GraphImport::Create(graph, jlm::tests::valuetype::Create(), "x"); + auto y = &jlm::tests::GraphImport::Create(graph, jlm::tests::valuetype::Create(), "y"); graph.add_export(y, { y->Type(), "z" }); // jlm::rvsdg::view(graph.root(), stdout); @@ -54,9 +55,9 @@ TestGamma() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto gamma = jlm::rvsdg::gamma_node::create(c, 2); auto ev1 = gamma->add_entryvar(x); @@ -93,8 +94,8 @@ TestGamma2() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto gamma = jlm::rvsdg::gamma_node::create(c, 2); gamma->add_entryvar(x); @@ -123,9 +124,9 @@ TestTheta() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); - auto z = graph.add_import({ vt, "z" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); + auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); @@ -166,9 +167,9 @@ TestNestedTheta() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto otheta = jlm::rvsdg::theta_node::create(graph.root()); @@ -210,11 +211,11 @@ TestEvolvingTheta() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x1 = graph.add_import({ vt, "x1" }); - auto x2 = graph.add_import({ vt, "x2" }); - auto x3 = graph.add_import({ vt, "x3" }); - auto x4 = graph.add_import({ vt, "x4" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x1 = &jlm::tests::GraphImport::Create(graph, vt, "x1"); + auto x2 = &jlm::tests::GraphImport::Create(graph, vt, "x2"); + auto x3 = &jlm::tests::GraphImport::Create(graph, vt, "x3"); + auto x4 = &jlm::tests::GraphImport::Create(graph, vt, "x4"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); @@ -248,8 +249,8 @@ TestLambda() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto lambda = lambda::node::create( graph.root(), @@ -284,9 +285,9 @@ TestPhi() RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule.Rvsdg(); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); - auto z = rvsdg.add_import({ valueType, "z" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); + auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); auto setupF1 = [&](jlm::rvsdg::region & region, phi::rvoutput & rv2, jlm::rvsdg::argument & dx) { @@ -396,9 +397,9 @@ TestDelta() RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule.Rvsdg(); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); - auto z = rvsdg.add_import({ valueType, "z" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); + auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); auto deltaNode = delta::node::Create(rvsdg.root(), valueType, "delta", linkage::external_linkage, "", false); diff --git a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp index ec148b70f..9bfd6bca1 100644 --- a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include @@ -26,10 +27,10 @@ TestSuccess() nf->set_mutable(false); nf->set_load_mux_reducible(false); - auto a = graph.add_import({ pt, "a" }); - auto s1 = graph.add_import({ mt, "s1" }); - auto s2 = graph.add_import({ mt, "s2" }); - auto s3 = graph.add_import({ mt, "s3" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); + auto s3 = &jlm::tests::GraphImport::Create(graph, mt, "s3"); auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); auto ld = LoadNonVolatileNode::Create(a, { mux }, vt, 4); @@ -80,9 +81,9 @@ TestWrongNumberOfOperands() nf->set_mutable(false); nf->set_load_mux_reducible(false); - auto a = graph.add_import({ pt, "a" }); - auto s1 = graph.add_import({ mt, "s1" }); - auto s2 = graph.add_import({ mt, "s2" }); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); auto merge = MemoryStateMergeOperation::Create(std::vector{ s1, s2 }); auto ld = LoadNonVolatileNode::Create(a, { merge, merge }, vt, 4); @@ -125,7 +126,7 @@ TestLoadWithoutStates() nf->set_mutable(false); nf->set_load_mux_reducible(false); - auto address = graph.add_import({ pointerType, "address" }); + auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); auto loadResults = LoadNonVolatileNode::Create(address, {}, valueType, 4); diff --git a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp index ead4064f8..b14e914e7 100644 --- a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include @@ -28,9 +29,9 @@ TestLoadStoreReductionWithDifferentValueOperandType() nf->set_mutable(false); nf->set_load_store_reducible(false); - auto address = graph.add_import({ pointerType, "address" }); - auto value = graph.add_import({ jlm::rvsdg::bittype::Create(32), "value" }); - auto memoryState = graph.add_import({ memoryStateType, "memoryState" }); + auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); + auto value = &jlm::tests::GraphImport::Create(graph, jlm::rvsdg::bittype::Create(32), "value"); + auto memoryState = &jlm::tests::GraphImport::Create(graph, memoryStateType, "memoryState"); auto storeResults = StoreNonVolatileNode::Create(address, value, { memoryState }, 4); auto loadResults = diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp index 6fa4c5429..a27c4375b 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp @@ -94,10 +94,11 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis auto & rootRegion = *rvsdg.root(); for (size_t n = 0; n < rootRegion.narguments(); n++) { - auto & argument = *rootRegion.argument(n); + auto & graphImport = *jlm::util::AssertedCast(rootRegion.argument(n)); - auto & importNode = aa::PointsToGraph::ImportNode::Create(*PointsToGraph_, argument); - auto & registerNode = aa::PointsToGraph::RegisterNode::Create(*PointsToGraph_, { &argument }); + auto & importNode = aa::PointsToGraph::ImportNode::Create(*PointsToGraph_, graphImport); + auto & registerNode = + aa::PointsToGraph::RegisterNode::Create(*PointsToGraph_, { &graphImport }); registerNode.AddEdge(importNode); } } diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 9fde908cd..dc5940282 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -31,9 +31,9 @@ test_simple() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); - auto z = graph.add_import({ vt, "z" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); + auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); auto n1 = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; auto n2 = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; @@ -76,10 +76,10 @@ test_gamma() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); - auto z = graph.add_import({ vt, "z" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); + auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); auto u1 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; auto u2 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; @@ -140,8 +140,8 @@ test_theta() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); auto region = theta->subregion(); @@ -193,8 +193,8 @@ test_theta2() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); auto region = theta->subregion(); @@ -237,8 +237,8 @@ test_theta3() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto theta1 = jlm::rvsdg::theta_node::create(graph.root()); auto r1 = theta1->subregion(); @@ -296,9 +296,9 @@ test_theta4() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); auto region = theta->subregion(); @@ -349,9 +349,9 @@ test_theta5() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); auto region = theta->subregion(); @@ -396,7 +396,7 @@ test_lambda() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto x = graph.add_import({ vt, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto lambda = lambda::node::create(graph.root(), ft, "f", linkage::external_linkage); @@ -431,7 +431,7 @@ test_phi() auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - auto x = graph.add_import({ vt, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); phi::builder pb; pb.begin(graph.root()); diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index 6895c37b3..a3917f785 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -26,7 +26,7 @@ test1() // Arrange RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto i = graph.add_import({ jlm::tests::valuetype::Create(), "i" }); + auto i = &jlm::tests::GraphImport::Create(graph, jlm::tests::valuetype::Create(), "i"); auto SetupF1 = [&]() { @@ -121,7 +121,7 @@ test2() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto i = graph.add_import({ pt, "i" }); + auto i = &jlm::tests::GraphImport::Create(graph, pt, "i"); auto SetupF1 = [&](const std::shared_ptr & functionType) { diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index 06d427064..5ecb9091a 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -25,9 +25,9 @@ test1() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto x = graph.add_import({ vt, "x" }); - auto y = graph.add_import({ vt, "y" }); - auto z = graph.add_import({ vt, "z" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); + auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); @@ -83,7 +83,7 @@ test2() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto x = graph.add_import({ vt, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index b22242351..22bc63b8a 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -30,8 +30,8 @@ test_pullin_top() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto n1 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; auto n2 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; @@ -63,8 +63,8 @@ test_pullin_bottom() auto ct = jlm::rvsdg::ctltype::Create(2); jlm::rvsdg::graph graph; - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto gamma = jlm::rvsdg::gamma_node::create(c, 2); @@ -93,7 +93,7 @@ test_pull() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto p = graph.add_import({ jlm::rvsdg::ctltype::Create(2), "" }); + auto p = &jlm::tests::GraphImport::Create(graph, jlm::rvsdg::ctltype::Create(2), ""); auto croot = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 35979c8e1..abe9eb7ca 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -30,9 +30,9 @@ test_gamma() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto s = graph.add_import({ st, "s" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto s = &jlm::tests::GraphImport::Create(graph, st, "s"); auto gamma = jlm::rvsdg::gamma_node::create(c, 2); auto evx = gamma->add_entryvar(x); @@ -68,9 +68,9 @@ test_theta() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto c = graph.add_import({ ct, "c" }); - auto x = graph.add_import({ vt, "x" }); - auto s = graph.add_import({ st, "s" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); + auto s = &jlm::tests::GraphImport::Create(graph, st, "s"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); @@ -112,10 +112,10 @@ test_push_theta_bottom() auto ct = jlm::rvsdg::ctltype::Create(2); jlm::rvsdg::graph graph; - auto c = graph.add_import({ ct, "c" }); - auto a = graph.add_import({ pt, "a" }); - auto v = graph.add_import({ vt, "v" }); - auto s = graph.add_import({ mt, "s" }); + auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); + auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); + auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index daa8740b5..1f85b0c1a 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -78,7 +78,7 @@ test_unrollinfo() { jlm::rvsdg::graph graph; - auto x = graph.add_import({ bt32, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, bt32, "x"); auto theta = create_theta(slt, add, x, x, x); auto ui = jlm::llvm::unrollinfo::create(theta); @@ -239,8 +239,8 @@ test_unknown_boundaries() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto x = graph.add_import({ bt, "x" }); - auto y = graph.add_import({ bt, "y" }); + auto x = &jlm::tests::GraphImport::Create(graph, bt, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, bt, "y"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); auto lv1 = theta->add_loopvar(x); diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index 8a6316d0f..8be81deb4 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -21,7 +21,7 @@ ArgumentNodeMismatch() auto valueType = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto import = graph.add_import({ valueType, "import" }); + auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); @@ -58,7 +58,7 @@ ArgumentInputTypeMismatch() auto stateType = jlm::tests::statetype::Create(); jlm::rvsdg::graph rvsdg; - auto x = rvsdg.add_import({ valueType, "import" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "import"); auto structuralNode = structural_node::create(rvsdg.root(), 1); auto structuralInput = jlm::rvsdg::structural_input::create(structuralNode, x, valueType); diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 248d651b4..e15d8245a 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -24,7 +24,7 @@ Contains() auto valueType = valuetype::Create(); jlm::rvsdg::graph graph; - auto import = graph.add_import({ valueType, "import" }); + auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = structural_node::create(graph.root(), 1); auto structuralInput1 = jlm::rvsdg::structural_input::create(structuralNode1, import, valueType); diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index f03e5d590..2b0536c00 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -21,7 +21,7 @@ ResultNodeMismatch() auto valueType = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto import = graph.add_import({ valueType, "import" }); + auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 2241b6862..cda0c9f16 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -5,6 +5,7 @@ */ #include "test-registry.hpp" +#include #include #include @@ -20,8 +21,8 @@ types_bitstring_arithmetic_test_bitand(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto c1 = create_bitconstant(graph.root(), 32, 5); @@ -48,8 +49,8 @@ types_bitstring_arithmetic_test_bitashr(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 16); auto c1 = create_bitconstant(graph.root(), 32, -16); @@ -87,8 +88,8 @@ types_bitstring_arithmetic_test_bitdifference(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto diff = bitsub_op::create(32, s0, s1); @@ -110,7 +111,7 @@ types_bitstring_arithmetic_test_bitnegate(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto neg0 = bitneg_op::create(32, s0); @@ -138,7 +139,7 @@ types_bitstring_arithmetic_test_bitnot(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto not0 = bitnot_op::create(32, s0); @@ -166,8 +167,8 @@ types_bitstring_arithmetic_test_bitor(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto c1 = create_bitconstant(graph.root(), 32, 5); @@ -194,8 +195,8 @@ types_bitstring_arithmetic_test_bitproduct(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto c1 = create_bitconstant(graph.root(), 32, 5); @@ -223,8 +224,8 @@ types_bitstring_arithmetic_test_bitshiproduct(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto shiproduct = bitsmulh_op::create(32, s0, s1); @@ -246,8 +247,8 @@ types_bitstring_arithmetic_test_bitshl(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 16); auto c1 = create_bitconstant(graph.root(), 32, 2); @@ -278,8 +279,8 @@ types_bitstring_arithmetic_test_bitshr(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 16); auto c1 = create_bitconstant(graph.root(), 32, 2); @@ -310,8 +311,8 @@ types_bitstring_arithmetic_test_bitsmod(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, -7); auto c1 = create_bitconstant(graph.root(), 32, 3); @@ -339,8 +340,8 @@ types_bitstring_arithmetic_test_bitsquotient(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 7); auto c1 = create_bitconstant(graph.root(), 32, -3); @@ -368,8 +369,8 @@ types_bitstring_arithmetic_test_bitsum(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto c1 = create_bitconstant(graph.root(), 32, 5); @@ -397,8 +398,8 @@ types_bitstring_arithmetic_test_bituhiproduct(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto uhiproduct = bitumulh_op::create(32, s0, s1); @@ -420,8 +421,8 @@ types_bitstring_arithmetic_test_bitumod(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 7); auto c1 = create_bitconstant(graph.root(), 32, 3); @@ -449,8 +450,8 @@ types_bitstring_arithmetic_test_bituquotient(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 7); auto c1 = create_bitconstant(graph.root(), 32, 3); @@ -478,8 +479,8 @@ types_bitstring_arithmetic_test_bitxor(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto c1 = create_bitconstant(graph.root(), 32, 5); @@ -522,8 +523,8 @@ types_bitstring_comparison_test_bitequal(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant_undefined(graph.root(), 32); @@ -556,8 +557,8 @@ types_bitstring_comparison_test_bitnotequal(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant_undefined(graph.root(), 32); @@ -590,8 +591,8 @@ types_bitstring_comparison_test_bitsgreater(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); @@ -628,8 +629,8 @@ types_bitstring_comparison_test_bitsgreatereq(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); @@ -669,8 +670,8 @@ types_bitstring_comparison_test_bitsless(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); @@ -707,8 +708,8 @@ types_bitstring_comparison_test_bitslesseq(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); @@ -748,8 +749,8 @@ types_bitstring_comparison_test_bitugreater(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); @@ -786,8 +787,8 @@ types_bitstring_comparison_test_bitugreatereq(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); @@ -827,8 +828,8 @@ types_bitstring_comparison_test_bituless(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); @@ -865,8 +866,8 @@ types_bitstring_comparison_test_bitulesseq(void) jlm::rvsdg::graph graph; - auto s0 = graph.add_import({ bittype::Create(32), "s0" }); - auto s1 = graph.add_import({ bittype::Create(32), "s1" }); + auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); auto c0 = create_bitconstant(graph.root(), 32, 4); auto c1 = create_bitconstant(graph.root(), 32, 5); auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); @@ -971,7 +972,7 @@ types_bitstring_test_normalize(void) jlm::rvsdg::graph graph; bittype bits32(32); - auto imp = graph.add_import({ bittype::Create(32), "imp" }); + auto imp = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "imp"); auto c0 = create_bitconstant(graph.root(), 32, 3); auto c1 = create_bitconstant(graph.root(), 32, 4); @@ -1043,8 +1044,8 @@ types_bitstring_test_reduction(void) graph.prune(); - auto x = graph.add_import({ bittype::Create(16), "x" }); - auto y = graph.add_import({ bittype::Create(16), "y" }); + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(16), "x"); + auto y = &jlm::tests::GraphImport::Create(graph, bittype::Create(16), "y"); { auto concat = jlm::rvsdg::bitconcat({ x, y }); @@ -1086,9 +1087,9 @@ types_bitstring_test_slice_concat(void) auto base_const1 = create_bitconstant(graph.root(), "00110111"); auto base_const2 = create_bitconstant(graph.root(), "11001000"); - auto base_x = graph.add_import({ bittype::Create(8), "x" }); - auto base_y = graph.add_import({ bittype::Create(8), "y" }); - auto base_z = graph.add_import({ bittype::Create(8), "z" }); + auto base_x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + auto base_y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); + auto base_z = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "z"); { /* slice of constant */ diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 259b10664..585f8d73f 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -20,10 +20,10 @@ test_flattened_binary_reduction() /* test paralell reduction */ { jlm::rvsdg::graph graph; - auto i0 = graph.add_import({ vt, "" }); - auto i1 = graph.add_import({ vt, "" }); - auto i2 = graph.add_import({ vt, "" }); - auto i3 = graph.add_import({ vt, "" }); + auto i0 = &jlm::tests::GraphImport::Create(graph, vt, ""); + auto i1 = &jlm::tests::GraphImport::Create(graph, vt, ""); + auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); + auto i3 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto o1 = simple_node::create_normalized(graph.root(), op, { i0, i1 })[0]; auto o2 = simple_node::create_normalized(graph.root(), op, { o1, i2 })[0]; @@ -54,10 +54,10 @@ test_flattened_binary_reduction() /* test linear reduction */ { jlm::rvsdg::graph graph; - auto i0 = graph.add_import({ vt, "" }); - auto i1 = graph.add_import({ vt, "" }); - auto i2 = graph.add_import({ vt, "" }); - auto i3 = graph.add_import({ vt, "" }); + auto i0 = &jlm::tests::GraphImport::Create(graph, vt, ""); + auto i1 = &jlm::tests::GraphImport::Create(graph, vt, ""); + auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); + auto i3 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto o1 = simple_node::create_normalized(graph.root(), op, { i0, i1 })[0]; auto o2 = simple_node::create_normalized(graph.root(), op, { o1, i2 })[0]; diff --git a/tests/jlm/rvsdg/test-cse.cpp b/tests/jlm/rvsdg/test-cse.cpp index dc697cdee..74a10ddf5 100644 --- a/tests/jlm/rvsdg/test-cse.cpp +++ b/tests/jlm/rvsdg/test-cse.cpp @@ -15,7 +15,7 @@ test_main() auto t = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto i = graph.add_import({ t, "i" }); + auto i = &jlm::tests::GraphImport::Create(graph, t, "i"); auto o1 = jlm::tests::test_op::create(graph.root(), {}, { t })->output(0); auto o2 = jlm::tests::test_op::create(graph.root(), { i }, { t })->output(0); diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 2678d9bfd..b335ec4f8 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -17,11 +17,11 @@ test_gamma(void) using namespace jlm::rvsdg; jlm::rvsdg::graph graph; - auto cmp = graph.add_import({ bittype::Create(2), "" }); - auto v0 = graph.add_import({ bittype::Create(32), "" }); - auto v1 = graph.add_import({ bittype::Create(32), "" }); - auto v2 = graph.add_import({ bittype::Create(32), "" }); - auto v3 = graph.add_import({ ctltype::Create(2), "" }); + auto cmp = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), ""); + auto v0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); + auto v1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); + auto v2 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); + auto v3 = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), ""); auto pred = match(2, { { 0, 0 }, { 1, 1 } }, 2, 3, cmp); @@ -58,9 +58,9 @@ test_predicate_reduction(void) bittype bits2(2); - auto v0 = graph.add_import({ bittype::Create(32), "" }); - auto v1 = graph.add_import({ bittype::Create(32), "" }); - auto v2 = graph.add_import({ bittype::Create(32), "" }); + auto v0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); + auto v1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); + auto v2 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); auto pred = jlm::rvsdg::control_constant(graph.root(), 3, 1); @@ -90,8 +90,8 @@ test_invariant_reduction(void) jlm::rvsdg::graph graph; gamma_op::normal_form(&graph)->set_invariant_reduction(true); - auto pred = graph.add_import({ ctltype::Create(2), "" }); - auto v = graph.add_import({ vtype, "" }); + auto pred = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), ""); + auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); auto gamma = jlm::rvsdg::gamma_node::create(pred, 2); auto ev = gamma->add_entryvar(v); @@ -115,7 +115,7 @@ test_control_constant_reduction() jlm::rvsdg::graph graph; gamma_op::normal_form(&graph)->set_control_constant_reduction(true); - auto x = graph.add_import({ bittype::Create(1), "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(1), "x"); auto c = match(1, { { 0, 0 } }, 1, 2, x); @@ -153,7 +153,7 @@ test_control_constant_reduction2() jlm::rvsdg::graph graph; gamma_op::normal_form(&graph)->set_control_constant_reduction(true); - auto import = graph.add_import({ bittype::Create(2), "import" }); + auto import = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), "import"); auto c = match(2, { { 3, 2 }, { 2, 1 }, { 1, 0 } }, 3, 4, import); @@ -186,11 +186,11 @@ TestRemoveGammaOutputsWhere() auto vt = jlm::tests::valuetype::Create(); ctltype ct(2); - auto predicate = rvsdg.add_import({ ctltype::Create(2), "" }); - auto v0 = rvsdg.add_import({ vt, "" }); - auto v1 = rvsdg.add_import({ vt, "" }); - auto v2 = rvsdg.add_import({ vt, "" }); - auto v3 = rvsdg.add_import({ vt, "" }); + auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), ""); + auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v2 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v3 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto gammaNode = gamma_node::create(predicate, 2); auto gammaInput0 = gammaNode->add_entryvar(v0); @@ -248,11 +248,11 @@ TestPruneOutputs() auto vt = jlm::tests::valuetype::Create(); ctltype ct(2); - auto predicate = rvsdg.add_import({ ctltype::Create(2), "" }); - auto v0 = rvsdg.add_import({ vt, "" }); - auto v1 = rvsdg.add_import({ vt, "" }); - auto v2 = rvsdg.add_import({ vt, "" }); - auto v3 = rvsdg.add_import({ vt, "" }); + auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), ""); + auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v2 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v3 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto gammaNode = gamma_node::create(predicate, 2); auto gammaInput0 = gammaNode->add_entryvar(v0); @@ -297,9 +297,9 @@ TestIsInvariant() auto vt = jlm::tests::valuetype::Create(); ctltype ct(2); - auto predicate = rvsdg.add_import({ ctltype::Create(2), "" }); - auto v0 = rvsdg.add_import({ vt, "" }); - auto v1 = rvsdg.add_import({ vt, "" }); + auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), ""); + auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); + auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto gammaNode = gamma_node::create(predicate, 2); auto gammaInput0 = gammaNode->add_entryvar(v0); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 9d7c6de2d..4c745eb08 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -33,7 +33,7 @@ test_recursive_prune() auto t = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto imp = graph.add_import({ t, "i" }); + auto imp = &jlm::tests::GraphImport::Create(graph, t, "i"); auto n1 = jlm::tests::test_op::create(graph.root(), { imp }, { t }); auto n2 = jlm::tests::test_op::create(graph.root(), { imp }, { t }); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index f5efac6e2..4ce6549ff 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -18,8 +18,8 @@ test_node_copy(void) auto vtype = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto s = graph.add_import({ stype, "" }); - auto v = graph.add_import({ vtype, "" }); + auto s = &jlm::tests::GraphImport::Create(graph, stype, ""); + auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); auto n1 = jlm::tests::structural_node::create(graph.root(), 3); auto i1 = structural_input::create(n1, s, stype); @@ -96,7 +96,7 @@ test_node_depth() auto vt = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto x = graph.add_import({ vt, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto null = jlm::tests::test_op::create(graph.root(), {}, { vt }); auto bin = jlm::tests::test_op::create(graph.root(), { null->output(0), x }, { vt }); @@ -187,7 +187,7 @@ TestRemoveInputsWhere() // Arrange jlm::rvsdg::graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto x = rvsdg.add_import({ valueType, "x" }); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto & node = jlm::tests::SimpleNode::Create(*rvsdg.root(), { x, x, x }, {}); auto input0 = node.input(0); diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index f5a9b67f7..0cca733a1 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -26,9 +26,9 @@ test_mux_mux_reduction() mnf->set_mutable(false); mnf->set_mux_mux_reducible(false); - auto x = graph.add_import({ st, "x" }); - auto y = graph.add_import({ st, "y" }); - auto z = graph.add_import({ st, "z" }); + auto x = &jlm::tests::GraphImport::Create(graph, st, "x"); + auto y = &jlm::tests::GraphImport::Create(graph, st, "y"); + auto z = &jlm::tests::GraphImport::Create(graph, st, "z"); auto mux1 = jlm::rvsdg::create_state_merge(st, { x, y }); auto mux2 = jlm::rvsdg::create_state_split(st, z, 2); @@ -66,7 +66,7 @@ test_multiple_origin_reduction() mnf->set_mutable(false); mnf->set_multiple_origin_reducible(false); - auto x = graph.add_import({ st, "x" }); + auto x = &jlm::tests::GraphImport::Create(graph, st, "x"); auto mux1 = jlm::rvsdg::create_state_merge(st, { x, x }); auto ex = graph.add_export(mux1, { mux1->Type(), "m" }); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index 598c24a46..a424ea04f 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -18,9 +18,9 @@ TestThetaCreation() jlm::rvsdg::graph graph; auto t = jlm::tests::valuetype::Create(); - auto imp1 = graph.add_import({ ctltype::Create(2), "imp1" }); - auto imp2 = graph.add_import({ t, "imp2" }); - auto imp3 = graph.add_import({ t, "imp3" }); + auto imp1 = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), "imp1"); + auto imp2 = &jlm::tests::GraphImport::Create(graph, t, "imp2"); + auto imp3 = &jlm::tests::GraphImport::Create(graph, t, "imp3"); auto theta = jlm::rvsdg::theta_node::create(graph.root()); @@ -57,9 +57,9 @@ TestRemoveThetaOutputsWhere() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = rvsdg.add_import({ ctltype::Create(2), "ctl" }); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto thetaNode = theta_node::create(rvsdg.root()); @@ -107,9 +107,9 @@ TestPruneThetaOutputs() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = rvsdg.add_import({ ctltype::Create(2), "ctl" }); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto thetaNode = theta_node::create(rvsdg.root()); @@ -142,9 +142,9 @@ TestRemoveThetaInputsWhere() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = rvsdg.add_import({ ctltype::Create(2), "ctl" }); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto thetaNode = theta_node::create(rvsdg.root()); @@ -198,9 +198,9 @@ TestPruneThetaInputs() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = rvsdg.add_import({ ctltype::Create(2), "ctl" }); - auto x = rvsdg.add_import({ valueType, "x" }); - auto y = rvsdg.add_import({ valueType, "y" }); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); + auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto thetaNode = theta_node::create(rvsdg.root()); diff --git a/tests/jlm/rvsdg/test-topdown.cpp b/tests/jlm/rvsdg/test-topdown.cpp index 67bc9a062..52b7acf2a 100644 --- a/tests/jlm/rvsdg/test-topdown.cpp +++ b/tests/jlm/rvsdg/test-topdown.cpp @@ -15,7 +15,7 @@ test_initialization() auto vtype = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto i = graph.add_import({ vtype, "i" }); + auto i = &jlm::tests::GraphImport::Create(graph, vtype, "i"); auto constant = jlm::tests::test_op::create(graph.root(), {}, { vtype }); auto unary = jlm::tests::test_op::create(graph.root(), { i }, { vtype }); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index e13bb2b2a..bf6113233 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -8,6 +8,12 @@ namespace jlm::tests { +GraphImport & +GraphImport::Copy(rvsdg::region & region, rvsdg::structural_input * input) +{ + return GraphImport::Create(*region.graph(), Type(), Name()); +} + /* unary operation */ unary_op::~unary_op() noexcept diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index a72663a2a..6983fab25 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -20,6 +20,29 @@ namespace jlm::tests { +/** + * Represents an import into the RVSDG of an external entity. + * It can be used for testing of graph imports. + */ +class GraphImport final : public rvsdg::GraphImport +{ + GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name) + : rvsdg::GraphImport(graph, std::move(type), std::move(name)) + {} + +public: + GraphImport & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + + static GraphImport & + Create(rvsdg::graph & graph, std::shared_ptr type, std::string name) + { + auto graphImport = new GraphImport(graph, std::move(type), std::move(name)); + graph.root()->append_argument(graphImport); + return *graphImport; + } +}; + /* unary operation */ class unary_op final : public rvsdg::unary_op From 0c3e9431342490038fdab8bdd0ac3eb6d3399c4c Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 18 Aug 2024 08:06:21 +0200 Subject: [PATCH 045/170] Add copy() method to result class (#583) This PR does the following: 1. Introduces a Copy() method to the result class. 2. The introduced method enables us to fix a bug in the copy() method of the region. Previously, results were not copied according to their result subtypes, but simply a new instance of result was created. 3. Extend unit test. This PR is a necessary step to get rid off ports. --- jlm/hls/ir/hls.cpp | 7 +++++++ jlm/hls/ir/hls.hpp | 3 +++ jlm/llvm/ir/operators/Phi.cpp | 7 +++++++ jlm/llvm/ir/operators/Phi.hpp | 3 +++ jlm/llvm/ir/operators/delta.cpp | 7 +++++++ jlm/llvm/ir/operators/delta.hpp | 3 +++ jlm/llvm/ir/operators/lambda.cpp | 7 +++++++ jlm/llvm/ir/operators/lambda.hpp | 3 +++ jlm/rvsdg/gamma.cpp | 7 +++++++ jlm/rvsdg/gamma.hpp | 3 +++ jlm/rvsdg/region.cpp | 22 +++++++++++++--------- jlm/rvsdg/region.hpp | 15 +++++++++++++++ jlm/rvsdg/theta.cpp | 7 +++++++ jlm/rvsdg/theta.hpp | 15 +++++++++------ tests/jlm/rvsdg/test-graph.cpp | 19 ++++++++++++++++--- tests/test-operation.hpp | 24 ++++++++++++++++++++++++ 16 files changed, 134 insertions(+), 18 deletions(-) diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 4eae808f2..2dee01f10 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -42,6 +42,13 @@ backedge_argument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * i return *backedge_argument::create(®ion, Type()); } +backedge_result & +backedge_result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +{ + JLM_ASSERT(output == nullptr); + return *backedge_result::create(&origin); +} + jlm::rvsdg::structural_output * loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 4affed7c5..6c5f8480d 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -659,6 +659,9 @@ class backedge_result : public jlm::rvsdg::result return argument_; } + backedge_result & + Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + private: backedge_result(jlm::rvsdg::output * origin) : jlm::rvsdg::result(origin->region(), origin, nullptr, origin->port()), diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index e8447c3b9..27275f7f4 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -192,5 +192,12 @@ cvargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) rvresult::~rvresult() {} +rvresult & +rvresult::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +{ + auto phiOutput = util::AssertedCast(output); + return *rvresult::create(origin.region(), &origin, phiOutput, origin.Type()); +} + } } diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index c37742cd4..a201093fb 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -814,6 +814,9 @@ class rvresult final : public jlm::rvsdg::result rvresult & operator=(rvresult &&) = delete; + rvresult & + Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + static rvresult * create( jlm::rvsdg::region * region, diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index 749cab5e2..6305e798b 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -184,5 +184,12 @@ cvargument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) result::~result() {} +result & +result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +{ + JLM_ASSERT(output == nullptr); + return *result::create(&origin); +} + } } diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 3598418f3..2fe6cf6c8 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -457,6 +457,9 @@ class result final : public rvsdg::result public: ~result() override; + result & + Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + private: result(rvsdg::output * origin) : rvsdg::result(origin->region(), origin, nullptr, origin->port()) diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 3c7349272..ac6f17c8f 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -440,4 +440,11 @@ cvargument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) result::~result() = default; +result & +result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +{ + JLM_ASSERT(output == nullptr); + return *result::create(&origin); +} + } diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 84cf4e606..2dc4d672c 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -635,6 +635,9 @@ class result final : public jlm::rvsdg::result public: ~result() override; + result & + Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + private: explicit result(jlm::rvsdg::output * origin) : jlm::rvsdg::result(origin->region(), origin, nullptr, origin->port()) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 28c648a85..6c28a9992 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -386,6 +386,13 @@ GammaArgument::Copy(rvsdg::region & region, structural_input * input) GammaResult::~GammaResult() noexcept = default; +GammaResult & +GammaResult::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +{ + auto gammaOutput = util::AssertedCast(output); + return GammaResult::Create(*origin.region(), origin, *gammaOutput); +} + } jlm::rvsdg::node_normal_form * diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index c6a9241ff..aedc7ff6e 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -501,6 +501,9 @@ class GammaResult final : public result : result(®ion, &origin, &gammaOutput, origin.Type()) {} + GammaResult & + Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + static GammaResult & Create(rvsdg::region & region, rvsdg::output & origin, gamma_output & gammaOutput) { diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 3cd27506b..12b0abbe6 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -148,6 +148,12 @@ result::result( } } +result & +result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +{ + return *result::create(origin.region(), &origin, output, port()); +} + jlm::rvsdg::result * result::create( jlm::rvsdg::region * region, @@ -283,7 +289,7 @@ region::copy(region * target, substitution_map & smap, bool copy_arguments, bool { smap.insert(this, target); - /* order nodes top-down */ + // order nodes top-down std::vector> context(nnodes()); for (const auto & node : nodes) { @@ -302,7 +308,7 @@ region::copy(region * target, substitution_map & smap, bool copy_arguments, bool } } - /* copy nodes */ + // copy nodes for (size_t n = 0; n < context.size(); n++) { for (const auto node : context[n]) @@ -312,17 +318,15 @@ region::copy(region * target, substitution_map & smap, bool copy_arguments, bool } } - /* copy results */ if (copy_results) { for (size_t n = 0; n < nresults(); n++) { - auto origin = smap.lookup(result(n)->origin()); - if (!origin) - origin = result(n)->origin(); - - auto output = dynamic_cast(smap.lookup(result(n)->output())); - result::create(target, origin, output, result(n)->port()); + auto oldResult = result(n); + auto newOrigin = smap.lookup(oldResult->origin()); + JLM_ASSERT(newOrigin != nullptr); + auto newOutput = dynamic_cast(smap.lookup(oldResult->output())); + oldResult->Copy(*newOrigin, newOutput); } } } diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index e2beb15d0..74d7fa850 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -138,6 +138,21 @@ class result : public input return output_; } + /** + * Creates a copy of the result with \p origin and structural_output \p output. The + * result is created with the same type as \p origin and in the same region as \p origin. + * + * @param origin The origin for the result. + * @param output The structural_output to the result, if any. + * + * @return A reference to the copied result. + * + * FIXME: This method should be made abstract once we enforced that no instances of result + * itself can be created any longer. + */ + virtual result & + Copy(rvsdg::output & origin, structural_output * output); + static jlm::rvsdg::result * create( jlm::rvsdg::region * region, diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 0781bf608..8c216e850 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -54,6 +54,13 @@ ThetaArgument::Copy(rvsdg::region & region, structural_input * input) ThetaResult::~ThetaResult() noexcept = default; +ThetaResult & +ThetaResult::Copy(rvsdg::output & origin, structural_output * output) +{ + auto thetaOutput = util::AssertedCast(output); + return ThetaResult::Create(origin, *thetaOutput); +} + /* theta node */ theta_node::~theta_node() diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 7c05f93cf..ba418d6a2 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -388,18 +388,21 @@ class ThetaResult final : public result public: ~ThetaResult() noexcept override; + ThetaResult & + Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + private: - ThetaResult(ThetaArgument & thetaArgument, theta_output & thetaOutput) - : result(thetaArgument.region(), &thetaArgument, &thetaOutput, thetaArgument.Type()) + ThetaResult(rvsdg::output & origin, theta_output & thetaOutput) + : result(origin.region(), &origin, &thetaOutput, origin.Type()) { - JLM_ASSERT(is(thetaArgument.region()->node())); + JLM_ASSERT(is(origin.region()->node())); } static ThetaResult & - Create(ThetaArgument & thetaArgument, theta_output & thetaOutput) + Create(rvsdg::output & origin, theta_output & thetaOutput) { - auto thetaResult = new ThetaResult(thetaArgument, thetaOutput); - thetaArgument.region()->append_result(thetaResult); + auto thetaResult = new ThetaResult(origin, thetaOutput); + origin.region()->append_result(thetaResult); return *thetaResult; } }; diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 4c745eb08..b326984ed 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -143,17 +143,30 @@ Copy() using namespace jlm::tests; // Arrange - auto type = jlm::tests::valuetype::Create(); + auto valueType = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - TestGraphArgument::Create(*graph.root(), type); + auto & argument = TestGraphArgument::Create(*graph.root(), valueType); + auto node = test_op::create(graph.root(), { &argument }, { valueType }); + TestGraphResult::Create(*node->output(0)); // Act auto newGraph = graph.copy(); // Assert assert(newGraph->root()->narguments() == 1); - assert(is(newGraph->root()->argument(0))); + auto copiedArgument = newGraph->root()->argument(0); + assert(is(copiedArgument)); + + assert(newGraph->root()->nnodes() == 1); + auto copiedNode = newGraph->root()->nodes.first(); + assert(copiedNode->ninputs() == 1 && copiedNode->noutputs() == 1); + assert(copiedNode->input(0)->origin() == copiedArgument); + + assert(newGraph->root()->nresults() == 1); + auto copiedResult = newGraph->root()->result(0); + assert(is(*copiedResult)); + assert(copiedResult->origin() == copiedNode->output(0)); return 0; } diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 6983fab25..3faff562a 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -391,6 +391,30 @@ class TestGraphArgument final : public jlm::rvsdg::argument } }; +class TestGraphResult final : public jlm::rvsdg::result +{ +private: + explicit TestGraphResult(jlm::rvsdg::output & origin) + : jlm::rvsdg::result(origin.region(), &origin, nullptr, origin.Type()) + {} + +public: + TestGraphResult & + Copy(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) override + { + JLM_ASSERT(output == nullptr); + return Create(origin); + } + + static TestGraphResult & + Create(jlm::rvsdg::output & origin) + { + auto graphResult = new TestGraphResult(origin); + origin.region()->append_result(graphResult); + return *graphResult; + } +}; + } #endif From d77c368689d5cce5ac8b43e28ded9dca1b51bd21 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Sun, 18 Aug 2024 10:49:52 +0200 Subject: [PATCH 046/170] [AndersenAgnostic] Add lazy cycle detection as an optional technique for Andersen (#581) When propagating along the edge a->b, it is because a has something new. If b already has everything a has, that might indicate that there is a path from b to a. Lazy cycle detection looks for such a path using a DFS from b, and unifies everything that is on such a path b->*->a. It tries at most once per edge. --- jlm/llvm/Makefile.sub | 2 + jlm/llvm/opt/alias-analyses/Andersen.cpp | 31 ++- jlm/llvm/opt/alias-analyses/Andersen.hpp | 19 ++ .../opt/alias-analyses/LazyCycleDetection.hpp | 187 ++++++++++++++++++ .../opt/alias-analyses/PointerObjectSet.cpp | 74 ++++++- .../opt/alias-analyses/PointerObjectSet.hpp | 22 ++- .../alias-analyses/TestLazyCycleDetection.cpp | 110 +++++++++++ .../alias-analyses/TestPointerObjectSet.cpp | 3 +- 8 files changed, 432 insertions(+), 16 deletions(-) create mode 100644 jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp create mode 100644 tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 1a283ad20..edac263e5 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -69,6 +69,7 @@ libllvm_HEADERS = \ jlm/llvm/opt/inlining.hpp \ jlm/llvm/opt/cne.hpp \ jlm/llvm/opt/push.hpp \ + jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp \ jlm/llvm/opt/alias-analyses/MemoryNodeProvider.hpp \ jlm/llvm/opt/alias-analyses/OnlineCycleDetection.hpp \ jlm/llvm/opt/alias-analyses/Optimization.hpp \ @@ -177,6 +178,7 @@ libllvm_TESTS += \ tests/jlm/llvm/ir/TestAnnotation \ tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider \ tests/jlm/llvm/opt/alias-analyses/TestAndersen \ + tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection \ tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder \ tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet \ tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph \ diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 954059748..805e5fde0 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -45,6 +45,8 @@ Andersen::Configuration::ToString() const str << "OnlineCD_"; if (EnableHybridCycleDetection_) str << "HybridCD_"; + if (EnableLazyCycleDetection_) + str << "LazyCD_"; } else { @@ -61,15 +63,22 @@ Andersen::Configuration::GetAllConfigurations() { std::vector configs; + auto PickLazyCycleDetection = [&](Configuration config) + { + config.EnableLazyCycleDetection(false); + configs.push_back(config); + config.EnableLazyCycleDetection(true); + configs.push_back(config); + }; auto PickHybridCycleDetection = [&](Configuration config) { config.EnableHybridCycleDetection(false); - configs.push_back(config); + PickLazyCycleDetection(config); // Hybrid Cycle Detection can only be enabled when OVS is enabled if (config.IsOfflineVariableSubstitutionEnabled()) { config.EnableHybridCycleDetection(true); - configs.push_back(config); + PickLazyCycleDetection(config); } }; auto PickOnlineCycleDetection = [&](Configuration config) @@ -77,7 +86,7 @@ Andersen::Configuration::GetAllConfigurations() config.EnableOnlineCycleDetection(false); PickHybridCycleDetection(config); config.EnableOnlineCycleDetection(true); - // OnlineCD can not be combined with HybridCD + // OnlineCD can not be combined with HybridCD or LazyCD configs.push_back(config); }; auto PickWorklistPolicy = [&](Configuration config) @@ -161,6 +170,10 @@ class Andersen::Statistics final : public util::Statistics static constexpr const char * NumHybridCycleUnifications_ = "#HybridCycleUnifications"; + static constexpr const char * NumLazyCycleDetectionAttempts_ = "#LazyCycleDetectionAttempts"; + static constexpr const char * NumLazyCyclesDetected_ = "#LazyCyclesDetected"; + static constexpr const char * NumLazyCycleUnifications_ = "#LazyCycleUnifications"; + // After solving statistics static constexpr const char * NumEscapedMemoryObjects_ = "#EscapedMemoryObjects"; static constexpr const char * NumUnificationRoots_ = "#UnificationRoots"; @@ -304,6 +317,15 @@ class Andersen::Statistics final : public util::Statistics if (statistics.NumHybridCycleUnifications) AddMeasurement(NumHybridCycleUnifications_, *statistics.NumHybridCycleUnifications); + + if (statistics.NumLazyCyclesDetectionAttempts) + AddMeasurement(NumLazyCycleDetectionAttempts_, *statistics.NumLazyCyclesDetectionAttempts); + + if (statistics.NumLazyCyclesDetected) + AddMeasurement(NumLazyCyclesDetected_, *statistics.NumLazyCyclesDetected); + + if (statistics.NumLazyCycleUnifications) + AddMeasurement(NumLazyCycleUnifications_, *statistics.NumLazyCycleUnifications); } void @@ -1090,7 +1112,8 @@ Andersen::SolveConstraints( auto worklistStatistics = constraints.SolveUsingWorklist( config.GetWorklistSoliverPolicy(), config.IsOnlineCycleDetectionEnabled(), - config.IsHybridCycleDetectionEnabled()); + config.IsHybridCycleDetectionEnabled(), + config.IsLazyCycleDetectionEnabled()); statistics.StopConstraintSolvingWorklistStatistics(worklistStatistics); } else diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index 5fe496639..a00296779 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -161,6 +161,23 @@ class Andersen final : public AliasAnalysis return EnableHybridCycleDetection_; } + /** + * Enables or disables lazy cycle detection in the Worklist solver, as described by + * Hardekopf and Lin, 2007: "The Ant & the Grasshopper" + * It detects some cycles, so it can not be combined with techniques that find all cycles. + */ + void + EnableLazyCycleDetection(bool enable) noexcept + { + EnableLazyCycleDetection_ = enable; + } + + [[nodiscard]] bool + IsLazyCycleDetectionEnabled() const noexcept + { + return EnableLazyCycleDetection_; + } + [[nodiscard]] std::string ToString() const; @@ -179,6 +196,7 @@ class Andersen final : public AliasAnalysis PointerObjectConstraintSet::WorklistSolverPolicy::LeastRecentlyFired); config.EnableOnlineCycleDetection(false); config.EnableHybridCycleDetection(true); + config.EnableLazyCycleDetection(true); return config; } @@ -213,6 +231,7 @@ class Andersen final : public AliasAnalysis PointerObjectConstraintSet::WorklistSolverPolicy::LeastRecentlyFired; bool EnableOnlineCycleDetection_ = false; bool EnableHybridCycleDetection_ = false; + bool EnableLazyCycleDetection_ = false; }; ~Andersen() noexcept override = default; diff --git a/jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp b/jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp new file mode 100644 index 000000000..9a08026f9 --- /dev/null +++ b/jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp @@ -0,0 +1,187 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_LLVM_OPT_ALIAS_ANALYSES_LAZYCYCLEDETECTION_HPP +#define JLM_LLVM_OPT_ALIAS_ANALYSES_LAZYCYCLEDETECTION_HPP + +#include +#include + +#include +#include +#include +#include + +namespace jlm::llvm::aa +{ + +/** + * Implements Lazy Cycle Detection, as described by + * Hardekopf and Lin, 2007: "The And and the Grasshopper" + * @tparam GetSuccessorsFunctor is a function returning the superset edge successors of a given node + * @tparam UnifyPointerObjectsFunctor the functor to be called to unify a cycle, when found + */ +template +class LazyCycleDetector +{ + +public: + LazyCycleDetector( + PointerObjectSet & set, + const GetSuccessorsFunctor & GetSuccessors, + const UnifyPointerObjectsFunctor & unifyPointerObjects) + : Set_(set), + GetSuccessors_(GetSuccessors), + UnifyPointerObjects_(unifyPointerObjects) + {} + + /** + * Call before calling any other method + */ + void + Initialize() + { + NodeStates_.resize(Set_.NumPointerObjects(), NodeStateNotVisited); + } + + bool + IsInitialized() const noexcept + { + return NodeStates_.size() == Set_.NumPointerObjects(); + } + + /** + * Call when an edge subset -> superset was visited, and zero pointees had to be propagated. + * Only call if subset has at least one new pointee. + * If a path from superset to subset is found, there is a cycle, that gets unified. + * @param subset the tail of the added edge, must be unification root + * @param superset the head of the added edge, must be unification root + * @return the root of the unification if unification happened, otherwise nullopt + */ + std::optional + OnPropagatedNothing(PointerObjectIndex subset, PointerObjectIndex superset) + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(subset)); + JLM_ASSERT(Set_.IsUnificationRoot(superset)); + + // Add this edge to the list of checked edges, or return if it was already there + if (!CheckedEdges_.Insert({ subset, superset })) + return std::nullopt; + + NumCycleDetectAttempts_++; + + JLM_ASSERT(DfsStack_.empty()); + DfsStack_.push(superset); + + // Reset all node states + std::fill(NodeStates_.begin(), NodeStates_.end(), NodeStateNotVisited); + + while (!DfsStack_.empty()) + { + auto node = DfsStack_.top(); + if (NodeStates_[node] == NodeStateNotVisited) + { + NodeStates_[node] = NodeStateVisited; + // Make sure all successors get visited + for (auto successor : GetSuccessors_(node)) + { + auto successorRoot = Set_.GetUnificationRoot(successor); + + // Cycle found! Do not add the subset to the dfs stack + if (successorRoot == subset) + continue; + + if (NodeStates_[successorRoot] != NodeStateNotVisited) + continue; + + DfsStack_.push(successorRoot); + } + } + else if (NodeStates_[node] == NodeStateVisited) + { + DfsStack_.pop(); + NodeStates_[node] = NodeStatePopped; + + // Check if any successors are unified with the subset. If so, join it! + for (auto successor : GetSuccessors_(node)) + { + auto successorRoot = Set_.GetUnificationRoot(successor); + if (successorRoot == subset) + { + subset = UnifyPointerObjects_(node, subset); + NumCycleUnifications_++; + break; + } + } + } + else + { + // The node has already been visited for a second time + DfsStack_.pop(); + } + } + + JLM_ASSERT(Set_.IsUnificationRoot(subset)); + superset = Set_.GetUnificationRoot(superset); + if (subset == superset) + { + NumCyclesDetected_++; + return subset; + } + return std::nullopt; + } + + /** + * @return the number of DFSs performed to look for cycles + */ + [[nodiscard]] size_t + NumCycleDetectionAttempts() const noexcept + { + return NumCycleDetectAttempts_; + } + + /** + * @return the number of cycles detected by Lazy cycle detection + */ + [[nodiscard]] size_t + NumCyclesDetected() const noexcept + { + return NumCyclesDetected_; + } + + /** + * @return the number of unifications made while eliminating found cycles + */ + [[nodiscard]] size_t + NumCycleUnifications() const noexcept + { + return NumCycleUnifications_; + } + +private: + PointerObjectSet & Set_; + const GetSuccessorsFunctor & GetSuccessors_; + const UnifyPointerObjectsFunctor & UnifyPointerObjects_; + + // A set of all checked simple edges first -> second, to avoid checking again + util::HashSet> CheckedEdges_; + + // The dfs stack, which may contain the same node multiple times + std::stack DfsStack_; + // Possible states of nodes during the DFS + static constexpr uint8_t NodeStateNotVisited = 0; + static constexpr uint8_t NodeStateVisited = 1; + static constexpr uint8_t NodeStatePopped = 2; + std::vector NodeStates_; + + size_t NumCycleDetectAttempts_ = 0; + size_t NumCyclesDetected_ = 0; + size_t NumCycleUnifications_ = 0; +}; + +} + +#endif // JLM_LLVM_OPT_ALIAS_ANALYSES_LAZYCYCLEDETECTION_HPP diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index 1bea24408..657c07799 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -1387,7 +1388,11 @@ PointerObjectConstraintSet::NormalizeConstraints() return reduction; } -template +template< + typename Worklist, + bool EnableOnlineCycleDetection, + bool EnableHybridCycleDetection, + bool EnableLazyCycleDetection> void PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { @@ -1398,6 +1403,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if constexpr (EnableOnlineCycleDetection) { static_assert(!EnableHybridCycleDetection, "OnlineCD can not be combined with HybridCD"); + static_assert(!EnableLazyCycleDetection, "OnlineCD can not be combined with LazyCD"); } // Create auxiliary subset graph. @@ -1518,6 +1524,11 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if constexpr (EnableOnlineCycleDetection) onlineCycleDetector.InitializeTopologicalOrdering(); + // If lazy cycle detection is enabled, initialize it here + LazyCycleDetector lazyCycleDetector(Set_, GetSupersetEdgeSuccessors, UnifyPointerObjects); + if constexpr (EnableLazyCycleDetection) + lazyCycleDetector.Initialize(); + if constexpr (EnableHybridCycleDetection) statistics.NumHybridCycleUnifications = 0; @@ -1573,8 +1584,17 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } // A new edge was added, propagate points to-sets. If the superset changes, add to the worklist - if (MakePointsToSetSuperset(superset, subset)) + bool anyPropagation = MakePointsToSetSuperset(superset, subset); + if (anyPropagation) worklist.PushWorkItem(superset); + + // If nothing was propagated by adding the edge, try lazy cycle detection + if (EnableLazyCycleDetection && !Set_.GetPointsToSet(subset).IsEmpty() && !anyPropagation) + { + const auto optUnificationRoot = lazyCycleDetector.OnPropagatedNothing(subset, superset); + if (optUnificationRoot) + worklist.PushWorkItem(*optUnificationRoot); + } }; // A temporary place to store new subset edges, to avoid modifying sets while they are iterated @@ -1701,8 +1721,23 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) // The current it-edge should be kept as is, prepare "it" for the next iteration. ++it; - if (MakePointsToSetSuperset(supersetParent, node)) + bool modified = MakePointsToSetSuperset(supersetParent, node); + + if (modified) worklist.PushWorkItem(supersetParent); + + if (EnableLazyCycleDetection && !nodePointees.IsEmpty() && !modified) + { + // If nothing was propagated along this edge, check if there is a cycle + // If a cycle is detected, this function eliminates it by unifying, and returns the root + auto optUnificationRoot = lazyCycleDetector.OnPropagatedNothing(node, supersetParent); + if (optUnificationRoot) + { + // The new unification root is pushed, and handling of the current work item is aborted. + worklist.PushWorkItem(*optUnificationRoot); + return; + } + } } // Stores on the form *n = value. @@ -1770,13 +1805,21 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) statistics.NumOnlineCyclesDetected = onlineCycleDetector.NumOnlineCyclesDetected(); statistics.NumOnlineCycleUnifications = onlineCycleDetector.NumOnlineCycleUnifications(); } + + if constexpr (EnableLazyCycleDetection) + { + statistics.NumLazyCyclesDetectionAttempts = lazyCycleDetector.NumCycleDetectionAttempts(); + statistics.NumLazyCyclesDetected = lazyCycleDetector.NumCyclesDetected(); + statistics.NumLazyCycleUnifications = lazyCycleDetector.NumCycleUnifications(); + } } PointerObjectConstraintSet::WorklistStatistics PointerObjectConstraintSet::SolveUsingWorklist( WorklistSolverPolicy policy, bool enableOnlineCycleDetection, - bool enableHybridCycleDetection) + bool enableHybridCycleDetection, + bool enableLazyCycleDetection) { // Takes all parameters as compile time types. @@ -1784,20 +1827,26 @@ PointerObjectConstraintSet::SolveUsingWorklist( // the rest are instances of std::bool_constant, either std::true_type or std::false_type const auto Dispatch = [&](auto tWorklist, auto tOnlineCycleDetection, - auto tHybridCycleDetection) -> WorklistStatistics + auto tHybridCycleDetection, + auto tLazyCycleDetection) -> WorklistStatistics { using Worklist = std::remove_pointer_t; constexpr bool vOnlineCycleDetection = decltype(tOnlineCycleDetection)::value; constexpr bool vHybridCycleDetection = decltype(tHybridCycleDetection)::value; + constexpr bool vLazyCycleDetection = decltype(tLazyCycleDetection)::value; - if constexpr (vOnlineCycleDetection && vHybridCycleDetection) + if constexpr (vOnlineCycleDetection && (vHybridCycleDetection || vLazyCycleDetection)) { - JLM_UNREACHABLE("Can not enable hybrid cycle detection with online cycle detection"); + JLM_UNREACHABLE("Can not enable hybrid or lazy cycle detection with online cycle detection"); } else { WorklistStatistics statistics(policy); - RunWorklistSolver(statistics); + RunWorklistSolver< + Worklist, + vOnlineCycleDetection, + vHybridCycleDetection, + vLazyCycleDetection>(statistics); return statistics; } }; @@ -1832,11 +1881,18 @@ PointerObjectConstraintSet::SolveUsingWorklist( else hybridCycleDetectionVariant = std::false_type{}; + std::variant lazyCycleDetectionVariant; + if (enableLazyCycleDetection) + lazyCycleDetectionVariant = std::true_type{}; + else + lazyCycleDetectionVariant = std::false_type{}; + return std::visit( Dispatch, policyVariant, onlineCycleDetectionVariant, - hybridCycleDetectionVariant); + hybridCycleDetectionVariant, + lazyCycleDetectionVariant); } const char * diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index d85f33760..316168904 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -813,6 +813,16 @@ class PointerObjectConstraintSet final * The number of unifications performed due to hybrid cycle detection. */ std::optional NumHybridCycleUnifications; + + /** + * The number of DFSs started in attempts at detecting cycles, + * the number of cycles detected by lazy cycle detection, + * and number of unifications made to eliminate the cycles, + * if Lazy Cycle Detection is enabled. + */ + std::optional NumLazyCyclesDetectionAttempts; + std::optional NumLazyCyclesDetected; + std::optional NumLazyCycleUnifications; }; explicit PointerObjectConstraintSet(PointerObjectSet & set) @@ -942,16 +952,19 @@ class PointerObjectConstraintSet final * These papers also describe a set of techniques that potentially improve solving performance: * - Online Cycle Detection (Pearce, 2003) * - Hybrid Cycle Detection (Hardekopf 2007) + * - Lazy Cycle Detection (Hardekopf 2007) * @param policy the worklist iteration order policy to use * @param enableOnlineCycleDetection if true, online cycle detection will be performed. * @param enableHybridCycleDetection if true, hybrid cycle detection will be performed. + * @param enableLazyCycleDetection if true, lazy cycle detection will be performed. * @return an instance of WorklistStatistics describing solver statistics */ WorklistStatistics SolveUsingWorklist( WorklistSolverPolicy policy, bool enableOnlineCycleDetection, - bool enableHybridCycleDetection); + bool enableHybridCycleDetection, + bool enableLazyCycleDetection); /** * Iterates over and applies constraints until all points-to-sets satisfy them. @@ -995,9 +1008,14 @@ class PointerObjectConstraintSet final * @tparam Worklist a type supporting the worklist interface with PointerObjectIndex as work items * @tparam EnableOnlineCycleDetection if true, online cycle detection is enabled. * @tparam EnableHybridCycleDetection if true, hybrid cycle detection is enabled. + * @tparam EnableLazyCycleDetection if true, lazy cycle detection is enabled. * @see SolveUsingWorklist() for the public interface. */ - template + template< + typename Worklist, + bool EnableOnlineCycleDetection, + bool EnableHybridCycleDetection, + bool EnableLazyCycleDetection> void RunWorklistSolver(WorklistStatistics & statistics); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection.cpp b/tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection.cpp new file mode 100644 index 000000000..549bd911d --- /dev/null +++ b/tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection.cpp @@ -0,0 +1,110 @@ +/* + * Copyright 2023, 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#include + +#include + +#include +#include +#include + +#include +#include + +static int +TestUnifiesCycles() +{ + using namespace jlm; + using namespace jlm::llvm::aa; + + // Arrange + PointerObjectSet set; + for (int i = 0; i < 6; i++) + { + (void)set.CreateDummyRegisterPointerObject(); + } + + // Create a graph that looks like + // --> 1 --> 2 --> 3 + // / | + // 0 | + // \ V + // --> 5 --> 4 + std::vector> successors(set.NumPointerObjects()); + successors[0].Insert(1); + successors[1].Insert(2); + successors[2].Insert(3); + successors[2].Insert(4); + successors[0].Insert(5); + successors[5].Insert(4); + + auto GetSuccessors = [&](PointerObjectIndex i) + { + assert(set.IsUnificationRoot(i)); + return successors[i].Items(); + }; + + auto UnifyPointerObjects = [&](PointerObjectIndex a, PointerObjectIndex b) + { + assert(set.IsUnificationRoot(a)); + assert(set.IsUnificationRoot(b)); + assert(a != b); + auto newRoot = set.UnifyPointerObjects(a, b); + auto notRoot = a + b - newRoot; + + successors[newRoot].UnionWith(successors[notRoot]); + return newRoot; + }; + + LazyCycleDetector lcd(set, GetSuccessors, UnifyPointerObjects); + lcd.Initialize(); + + // Act 1 - an edge that is not a part of a cycle + lcd.OnPropagatedNothing(0, 1); + + // Assert that nothing happened + assert(lcd.NumCycleDetectionAttempts() == 1); + assert(lcd.NumCyclesDetected() == 0); + assert(lcd.NumCycleUnifications() == 0); + + // Act 2 - Try the same edge again + lcd.OnPropagatedNothing(0, 1); + + // Assert that the second attempt is ignored + assert(lcd.NumCycleDetectionAttempts() == 1); + assert(lcd.NumCyclesDetected() == 0); + assert(lcd.NumCycleUnifications() == 0); + + // Act 3 - add the edge 3->1 that creates a cycle 3-1-2-3 + successors[3].Insert(1); + lcd.OnPropagatedNothing(3, 1); + + // Assert that the cycle was found and unified + assert(lcd.NumCycleDetectionAttempts() == 2); + assert(lcd.NumCyclesDetected() == 1); + assert(lcd.NumCycleUnifications() == 2); + assert(set.GetUnificationRoot(1) == set.GetUnificationRoot(2)); + assert(set.GetUnificationRoot(1) == set.GetUnificationRoot(3)); + + // Act 4 - add the edge 4 -> 0, creating two cycles 4-0-5-4 and 4-0-(1/2/3)-4 + successors[4].Insert(0); + lcd.OnPropagatedNothing(4, 0); + + // Assert that both cycles were found. + // They are only counted as one cycle, but everything should be unified now + assert(lcd.NumCyclesDetected() == 2); + assert(lcd.NumCycleUnifications() == set.NumPointerObjects() - 1); + for (PointerObjectIndex i = 1; i < set.NumPointerObjects(); i++) + { + assert(set.GetUnificationRoot(0) == set.GetUnificationRoot(i)); + } + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestLazyCycleDetection-TestUnifiesCycles", + TestUnifiesCycles) diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 74be2b70f..c611a9140 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -890,7 +890,8 @@ TestPointerObjectSet() TestPointerObjectConstraintSetSolve( config.GetWorklistSoliverPolicy(), config.IsOnlineCycleDetectionEnabled(), - config.IsHybridCycleDetectionEnabled()); + config.IsHybridCycleDetectionEnabled(), + config.IsLazyCycleDetectionEnabled()); } TestClonePointerObjectConstraintSet(); From 7562126eca8f8330be502d613856ff7c5352e0b4 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 20 Aug 2024 20:58:40 +0200 Subject: [PATCH 047/170] Remove expport class (#585) This PR does the following: 1. Introduces the GraphExport class and its subclasses to model the export of internal entities from the module. These classes serve as a replacement for the old expport class. 2. Replaces all usages of expport with GraphExport 3. Removes old expport classes. This is one of the steps necessary in order to completely remove ports from the code base. --- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 +- .../InterProceduralGraphConversion.cpp | 4 +- jlm/llvm/ir/RvsdgModule.cpp | 7 + jlm/llvm/ir/RvsdgModule.hpp | 27 ++- jlm/rvsdg/graph.cpp | 21 +- jlm/rvsdg/graph.hpp | 55 ++---- tests/TestRvsdgs.cpp | 94 ++++----- .../rvsdg2rhls/DeadNodeEliminationTests.cpp | 2 +- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 4 +- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 4 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 2 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 14 +- .../rvsdg2rhls/test-loop-passthrough.cpp | 2 +- .../backend/llvm/r2j/test-empty-gamma.cpp | 6 +- .../backend/llvm/r2j/test-partial-gamma.cpp | 2 +- .../backend/llvm/r2j/test-recursive-data.cpp | 2 +- tests/jlm/llvm/ir/operators/LoadTests.cpp | 37 ++-- tests/jlm/llvm/ir/operators/StoreTests.cpp | 19 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 8 +- tests/jlm/llvm/ir/operators/TestLambda.cpp | 12 +- tests/jlm/llvm/ir/operators/TestPhi.cpp | 2 +- tests/jlm/llvm/ir/operators/test-delta.cpp | 4 +- tests/jlm/llvm/ir/operators/test-sext.cpp | 13 +- .../opt/InvariantValueRedirectionTests.cpp | 8 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 24 +-- tests/jlm/llvm/opt/TestLoadMuxReduction.cpp | 25 +-- tests/jlm/llvm/opt/TestLoadStoreReduction.cpp | 7 +- tests/jlm/llvm/opt/test-cne.cpp | 64 +++--- tests/jlm/llvm/opt/test-inlining.cpp | 4 +- tests/jlm/llvm/opt/test-inversion.cpp | 16 +- tests/jlm/llvm/opt/test-pull.cpp | 10 +- tests/jlm/llvm/opt/test-push.cpp | 8 +- tests/jlm/llvm/opt/test-unroll.cpp | 4 +- tests/jlm/rvsdg/bitstring/bitstring.cpp | 184 +++++++++--------- tests/jlm/rvsdg/test-binary.cpp | 8 +- tests/jlm/rvsdg/test-bottomup.cpp | 4 +- tests/jlm/rvsdg/test-cse.cpp | 20 +- tests/jlm/rvsdg/test-gamma.cpp | 30 +-- tests/jlm/rvsdg/test-graph.cpp | 8 +- tests/jlm/rvsdg/test-nodes.cpp | 2 +- tests/jlm/rvsdg/test-statemux.cpp | 8 +- tests/jlm/rvsdg/test-theta.cpp | 10 +- tests/jlm/rvsdg/test-topdown.cpp | 10 +- tests/test-operation.cpp | 7 + tests/test-operation.hpp | 23 +++ 45 files changed, 419 insertions(+), 410 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 66c533430..952473bd0 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -212,7 +212,7 @@ convert_alloca(jlm::rvsdg::region * region) cout = jlm::rvsdg::simple_node::create_normalized(db->subregion(), cop, {})[0]; } auto delta = db->finalize(cout); - region->graph()->add_export(delta, { delta_type, delta_name }); + jlm::llvm::GraphExport::Create(*delta, delta_name); auto delta_local = route_to_region(delta, region); node->output(0)->divert_users(delta_local); // TODO: check that the input to alloca is a bitconst 1 @@ -375,7 +375,7 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) smap.insert(ln->input(i)->origin(), &graphImport); // add export for delta to rm // TODO: check if not already exported and maybe adjust linkage? - rm.Rvsdg().add_export(odn->output(), { odn->output()->Type(), odn->name() }); + jlm::llvm::GraphExport::Create(*odn->output(), odn->name()); } else { diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 6abd1cb7e..71cd342e7 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -1116,7 +1116,7 @@ ConvertStronglyConnectedComponent( regionalizedVariableMap.GetTopVariableMap().insert(ipgNodeVariable, output); if (requiresExport(*ipgNode)) - graph.add_export(output, { output->Type(), ipgNodeVariable->name() }); + GraphExport::Create(*output, ipgNodeVariable->name()); return; } @@ -1179,7 +1179,7 @@ ConvertStronglyConnectedComponent( auto recursionVariable = recursionVariables[ipgNodeVariable]; regionalizedVariableMap.GetTopVariableMap().insert(ipgNodeVariable, recursionVariable); if (requiresExport(*ipgNode)) - graph.add_export(recursionVariable, { recursionVariable->Type(), ipgNodeVariable->name() }); + GraphExport::Create(*recursionVariable, ipgNodeVariable->name()); } } diff --git a/jlm/llvm/ir/RvsdgModule.cpp b/jlm/llvm/ir/RvsdgModule.cpp index 7d69f6b94..7d253d1fc 100644 --- a/jlm/llvm/ir/RvsdgModule.cpp +++ b/jlm/llvm/ir/RvsdgModule.cpp @@ -14,4 +14,11 @@ GraphImport::Copy(rvsdg::region & region, rvsdg::structural_input * input) return GraphImport::Create(*region.graph(), ValueType(), Name(), Linkage()); } +GraphExport & +GraphExport::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +{ + JLM_ASSERT(output == nullptr); + return GraphExport::Create(origin, Name()); +} + } diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 8891b383a..3f161cb3b 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -65,14 +65,29 @@ class GraphImport final : public rvsdg::GraphImport std::shared_ptr ValueType_; }; -static inline bool -is_export(const jlm::rvsdg::input * input) +/** + * Represents an export from the RVSDG of an internal entity. + * It is used to model externally visible entities from LLVM modules. + */ +class GraphExport final : public rvsdg::GraphExport { - auto graph = input->region()->graph(); +private: + GraphExport(rvsdg::output & origin, std::string name) + : rvsdg::GraphExport(origin, std::move(name)) + {} - auto result = dynamic_cast(input); - return result && result->region() == graph->root(); -} +public: + GraphExport & + Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + + static GraphExport & + Create(rvsdg::output & origin, std::string name) + { + auto graphExport = new GraphExport(origin, std::move(name)); + origin.region()->graph()->root()->append_result(graphExport); + return *graphExport; + } +}; /** * An LLVM module utilizing the RVSDG representation. diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index bb868f466..47b63c289 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -22,26 +22,11 @@ GraphImport::GraphImport( Name_(std::move(name)) {} -/* expport */ - -expport::~expport() +GraphExport::GraphExport(rvsdg::output & origin, std::string name) + : result(origin.region()->graph()->root(), &origin, nullptr, origin.Type()), + Name_(std::move(name)) {} -bool -expport::operator==(const port & other) const noexcept -{ - auto p = dynamic_cast(&other); - return p && p->type() == type() && p->name() == name(); -} - -std::unique_ptr -expport::copy() const -{ - return std::unique_ptr(new expport(*this)); -} - -/* graph */ - graph::~graph() { JLM_ASSERT(!has_active_trackers(this)); diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index 5524236c0..b00ae85d5 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -41,52 +41,25 @@ class GraphImport : public argument std::string Name_; }; -/* expport class */ - -class expport : public port +/** + * Represents an export from the RVSDG of an internal entity. + */ +class GraphExport : public result { -public: - virtual ~expport(); - - expport(std::shared_ptr type, const std::string & name) - : port(std::move(type)), - name_(name) - {} - - expport(const expport & other) - : port(other), - name_(other.name_) - {} - - expport(expport && other) - : port(other), - name_(std::move(other.name_)) - {} - - expport & - operator=(const expport &) = delete; - - expport & - operator=(expport &&) = delete; +protected: + GraphExport(rvsdg::output & origin, std::string name); - const std::string & - name() const noexcept +public: + [[nodiscard]] const std::string & + Name() const noexcept { - return name_; + return Name_; } - virtual bool - operator==(const port &) const noexcept override; - - virtual std::unique_ptr - copy() const override; - private: - std::string name_; + std::string Name_; }; -/* graph */ - class graph { public: @@ -119,12 +92,6 @@ class graph jlm::rvsdg::node_normal_form * node_normal_form(const std::type_info & type) noexcept; - inline jlm::rvsdg::input * - add_export(jlm::rvsdg::output * operand, const expport & port) - { - return result::create(root(), operand, nullptr, port); - } - inline void prune() { diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 937cc1838..89753a7f3 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -45,7 +45,7 @@ StoreTest1::SetupRvsdg() fct->finalize({ c_amp_d[0] }); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* extract nodes */ @@ -102,7 +102,7 @@ StoreTest2::SetupRvsdg() fct->finalize({ p_amp_y[0] }); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* extract nodes */ @@ -144,7 +144,7 @@ LoadTest1::SetupRvsdg() fct->finalize(ld2); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* extract nodes */ @@ -201,7 +201,7 @@ LoadTest2::SetupRvsdg() fct->finalize({ y_star_p[0] }); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* extract nodes */ @@ -248,7 +248,7 @@ LoadFromUndefTest::SetupRvsdg() 4); Lambda_->finalize(loadResults); - rvsdg.add_export(Lambda_->output(), { pointerType, "f" }); + GraphExport::Create(*Lambda_->output(), "f"); /* * Extract nodes @@ -300,7 +300,7 @@ GetElementPtrTest::SetupRvsdg() fct->finalize({ sum, ldy[1] }); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* * Assign nodes @@ -333,7 +333,7 @@ BitCastTest::SetupRvsdg() fct->finalize({ cast }); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* * Assign nodes @@ -399,7 +399,7 @@ Bits2PtrTest::SetupRvsdg() { valueArgument, iOStateArgument, memoryStateArgument }); lambda->finalize({ call.GetIoStateOutput(), call.GetMemoryStateOutput() }); - graph->add_export(lambda->output(), { PointerType::Create(), "testfct" }); + GraphExport::Create(*lambda->output(), "testfct"); return std::make_tuple(lambda, &call); }; @@ -447,7 +447,7 @@ ConstantPointerNullTest::SetupRvsdg() fct->finalize({ st[0] }); - graph->add_export(fct->output(), { pointerType, "f" }); + GraphExport::Create(*fct->output(), "f"); /* * Assign nodes @@ -584,7 +584,7 @@ CallTest1::SetupRvsdg() auto sum = jlm::rvsdg::bitadd_op::create(32, callF.Result(0), callG.Result(0)); lambda->finalize({ sum, callG.GetIoStateOutput(), callG.GetMemoryStateOutput() }); - graph->add_export(lambda->output(), { PointerType::Create(), "h" }); + GraphExport::Create(*lambda->output(), "h"); auto allocaX = jlm::rvsdg::node_output::node(x[0]); auto allocaY = jlm::rvsdg::node_output::node(y[0]); @@ -716,7 +716,7 @@ CallTest2::SetupRvsdg() { create2.Result(0), destroy1.GetIoStateOutput(), destroy1.GetMemoryStateOutput() }); lambda->finalize({ destroy2.GetIoStateOutput(), destroy2.GetMemoryStateOutput() }); - graph->add_export(lambda->output(), { PointerType::Create(), "test" }); + GraphExport::Create(*lambda->output(), "test"); return std::make_tuple(lambda, &create1, &create2, &destroy1, &destroy2); }; @@ -828,7 +828,7 @@ IndirectCallTest1::SetupRvsdg() auto lambdaOutput = lambda->finalize({ add, call_three.GetIoStateOutput(), call_three.GetMemoryStateOutput() }); - graph->add_export(lambda->output(), { pointerType, "test" }); + GraphExport::Create(*lambda->output(), "test"); return std::make_tuple(lambdaOutput, &call_three, &call_four); }; @@ -1023,7 +1023,7 @@ IndirectCallTest2::SetupRvsdg() auto lambdaOutput = lambda->finalize({ sum, callY.GetIoStateOutput(), callY.GetMemoryStateOutput() }); - graph->add_export(lambdaOutput, { PointerType::Create(), "test" }); + GraphExport::Create(*lambdaOutput, "test"); return std::make_tuple( lambdaOutput, @@ -1059,7 +1059,7 @@ IndirectCallTest2::SetupRvsdg() { pzAlloca[0], iOStateArgument, pzMerge }); auto lambdaOutput = lambda->finalize(callX.Results()); - graph->add_export(lambdaOutput, { PointerType::Create(), "test2" }); + GraphExport::Create(*lambdaOutput, "test2"); return std::make_tuple( lambdaOutput, @@ -1173,7 +1173,7 @@ ExternalCallTest1::SetupRvsdg() { loadPath[0], loadMode[0], iOStateArgument, loadMode[1] }); lambda->finalize(callG.Results()); - rvsdg->add_export(lambda->output(), { pointerType, "f" }); + GraphExport::Create(*lambda->output(), "f"); return std::make_tuple(lambda, &callG); }; @@ -1339,7 +1339,7 @@ GammaTest::SetupRvsdg() fct->finalize({ sum, ld2[1] }); - graph->add_export(fct->output(), { PointerType::Create(), "f" }); + GraphExport::Create(*fct->output(), "f"); /* * Assign nodes @@ -1503,7 +1503,7 @@ GammaTest2::SetupRvsdg() { predicate, allocaXResults[0], allocaYResults[0], iOStateArgument, storeYResults[0] }); lambda->finalize(call.Results()); - rvsdg->add_export(lambda->output(), { PointerType::Create(), functionName }); + GraphExport::Create(*lambda->output(), functionName); return std::make_tuple( lambda->output(), @@ -1584,7 +1584,7 @@ ThetaTest::SetupRvsdg() thetanode->set_predicate(predicate); fct->finalize({ s }); - graph->add_export(fct->output(), { PointerType::Create(), "f" }); + GraphExport::Create(*fct->output(), "f"); /* * Assign nodes @@ -1665,7 +1665,7 @@ DeltaTest1::SetupRvsdg() auto & callG = CallNode::CreateNode(cvg, g->node()->Type(), { cvf, iOStateArgument, st[0] }); auto lambdaOutput = lambda->finalize(callG.Results()); - graph->add_export(lambda->output(), { PointerType::Create(), "h" }); + GraphExport::Create(*lambda->output(), "h"); return std::make_tuple(lambdaOutput, &callG, jlm::rvsdg::node_output::node(five)); }; @@ -1773,7 +1773,7 @@ DeltaTest2::SetupRvsdg() st = StoreNonVolatileNode::Create(cvd2, b42, { call.GetMemoryStateOutput() }, 4); auto lambdaOutput = lambda->finalize(call.Results()); - graph->add_export(lambdaOutput, { PointerType::Create(), "f2" }); + GraphExport::Create(*lambdaOutput, "f2"); return std::make_tuple(lambdaOutput, &call); }; @@ -1880,7 +1880,7 @@ DeltaTest3::SetupRvsdg() { iOStateArgument, memoryStateArgument }); auto lambdaOutput = lambda->finalize({ call.GetIoStateOutput(), call.GetMemoryStateOutput() }); - graph->add_export(lambdaOutput, { PointerType::Create(), "test" }); + GraphExport::Create(*lambdaOutput, "test"); return std::make_tuple(lambdaOutput, &call); }; @@ -1959,7 +1959,7 @@ ImportTest::SetupRvsdg() st = StoreNonVolatileNode::Create(cvd2, b21, { call.GetMemoryStateOutput() }, 4); auto lambdaOutput = lambda->finalize(call.Results()); - graph->add_export(lambda->output(), { PointerType::Create(), "f2" }); + GraphExport::Create(*lambda->output(), "f2"); return std::make_tuple(lambdaOutput, &call); }; @@ -2127,7 +2127,7 @@ PhiTest1::SetupRvsdg() CallNode::CreateNode(fibcv, fibFunctionType, { ten, gep, iOStateArgument, state }); auto lambdaOutput = lambda->finalize(call.Results()); - graph->add_export(lambdaOutput, { PointerType::Create(), "test" }); + GraphExport::Create(*lambdaOutput, "test"); return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(allocaResults[0])); }; @@ -2446,7 +2446,7 @@ PhiTest2::SetupRvsdg() { pTestAlloca[0], iOStateArgument, pTestMerge }); auto lambdaOutput = lambda->finalize(callA.Results()); - graph->add_export(lambdaOutput, { PointerType::Create(), "test" }); + GraphExport::Create(*lambdaOutput, "test"); return std::make_tuple( lambdaOutput, @@ -2549,7 +2549,7 @@ PhiWithDeltaTest::SetupRvsdg() myArrayRecVar->result()->divert_to(deltaOutput); auto phiNode = pb.end(); - rvsdg.add_export(phiNode->output(0), { PointerType::Create(), "myArray" }); + GraphExport::Create(*phiNode->output(0), "myArray"); return rvsdgModule; } @@ -2586,7 +2586,7 @@ ExternalMemoryTest::SetupRvsdg() auto storeTwo = StoreNonVolatileNode::Create(y, two, { storeOne[0] }, 4); LambdaF->finalize(storeTwo); - graph->add_export(LambdaF->output(), { pointerType, "f" }); + GraphExport::Create(*LambdaF->output(), "f"); return module; } @@ -2654,7 +2654,7 @@ EscapedMemoryTest1::SetupRvsdg() auto contextVariableX = deltaNode->add_ctxvar(&deltaX); auto deltaOutput = deltaNode->finalize(contextVariableX); - rvsdg->add_export(deltaOutput, { pointerType, "y" }); + GraphExport::Create(*deltaOutput, "y"); return deltaOutput; }; @@ -2690,7 +2690,7 @@ EscapedMemoryTest1::SetupRvsdg() auto lambdaOutput = lambda->finalize({ loadResults2[0], iOStateArgument, storeResults[0] }); - rvsdg->add_export(lambdaOutput, { pointerType, "test" }); + GraphExport::Create(*lambdaOutput, "test"); return std::make_tuple( lambdaOutput, @@ -2785,7 +2785,7 @@ EscapedMemoryTest2::SetupRvsdg() auto lambdaOutput = lambda->finalize({ mallocResults[0], iOStateArgument, mergeResults }); - rvsdg->add_export(lambdaOutput, { pointerType, "ReturnAddress" }); + GraphExport::Create(*lambdaOutput, "ReturnAddress"); return std::make_tuple(lambdaOutput, jlm::rvsdg::node_output::node(mallocResults[0])); }; @@ -2821,7 +2821,7 @@ EscapedMemoryTest2::SetupRvsdg() auto lambdaOutput = lambda->finalize(call.Results()); - rvsdg->add_export(lambdaOutput, { pointerType, "CallExternalFunction1" }); + GraphExport::Create(*lambdaOutput, "CallExternalFunction1"); return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(mallocResults[0])); }; @@ -2858,7 +2858,7 @@ EscapedMemoryTest2::SetupRvsdg() auto lambdaOutput = lambda->finalize({ loadResults[0], call.GetIoStateOutput(), loadResults[1] }); - rvsdg->add_export(lambdaOutput, { pointerType, "CallExternalFunction2" }); + GraphExport::Create(*lambdaOutput, "CallExternalFunction2"); return std::make_tuple( lambdaOutput, @@ -2937,7 +2937,7 @@ EscapedMemoryTest3::SetupRvsdg() auto deltaOutput = delta->finalize(constant); - rvsdg->add_export(deltaOutput, { pointerType, "global" }); + GraphExport::Create(*deltaOutput, "global"); return deltaOutput; }; @@ -2971,7 +2971,7 @@ EscapedMemoryTest3::SetupRvsdg() auto lambdaOutput = lambda->finalize({ loadResults[0], call.GetIoStateOutput(), loadResults[1] }); - rvsdg->add_export(lambdaOutput, { pointerType, "test" }); + GraphExport::Create(*lambdaOutput, "test"); return std::make_tuple( lambdaOutput, @@ -3027,7 +3027,7 @@ MemcpyTest::SetupRvsdg() auto deltaOutput = delta->finalize(constantDataArray); - rvsdg->add_export(deltaOutput, { PointerType::Create(), "localArray" }); + GraphExport::Create(*deltaOutput, "localArray"); return deltaOutput; }; @@ -3046,7 +3046,7 @@ MemcpyTest::SetupRvsdg() auto deltaOutput = delta->finalize(constantAggregateZero); - rvsdg->add_export(deltaOutput, { PointerType::Create(), "globalArray" }); + GraphExport::Create(*deltaOutput, "globalArray"); return deltaOutput; }; @@ -3082,7 +3082,7 @@ MemcpyTest::SetupRvsdg() auto lambdaOutput = lambda->finalize({ loadResults[0], iOStateArgument, loadResults[1] }); - rvsdg->add_export(lambdaOutput, { PointerType::Create(), "f" }); + GraphExport::Create(*lambdaOutput, "f"); return lambdaOutput; }; @@ -3122,7 +3122,7 @@ MemcpyTest::SetupRvsdg() auto lambdaOutput = lambda->finalize(call.Results()); - rvsdg->add_export(lambdaOutput, { PointerType::Create(), "g" }); + GraphExport::Create(*lambdaOutput, "g"); return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(memcpyResults[0])); }; @@ -3231,7 +3231,7 @@ MemcpyTest2::SetupRvsdg() auto lambdaOutput = lambda->finalize(call.Results()); - rvsdg->add_export(lambdaOutput, { PointerType::Create(), "f" }); + GraphExport::Create(*lambdaOutput, "f"); return std::make_tuple(lambdaOutput, &call); }; @@ -3296,7 +3296,7 @@ MemcpyTest3::SetupRvsdg() auto lambdaOutput = Lambda_->finalize({ iOStateArgument, memcpyResults[0] }); - rvsdg->add_export(lambdaOutput, { PointerType::Create(), "f" }); + GraphExport::Create(*lambdaOutput, "f"); Alloca_ = rvsdg::node_output::node(allocaResults[0]); Memcpy_ = rvsdg::node_output::node(memcpyResults[0]); @@ -3334,7 +3334,7 @@ LinkedListTest::SetupRvsdg() ConstantPointerNullOperation::Create(delta->subregion(), pointerType); auto deltaOutput = delta->finalize(constantPointerNullResult); - rvsdg.add_export(deltaOutput, { PointerType::Create(), "myList" }); + GraphExport::Create(*deltaOutput, "myList"); return deltaOutput; }; @@ -3372,7 +3372,7 @@ LinkedListTest::SetupRvsdg() auto load4 = LoadNonVolatileNode::Create(alloca[0], { store2[0] }, pointerType, 4); auto lambdaOutput = lambda->finalize({ load4[0], iOStateArgument, load4[1] }); - rvsdg.add_export(lambdaOutput, { pointerType, "next" }); + GraphExport::Create(*lambdaOutput, "next"); return std::make_tuple(jlm::rvsdg::node_output::node(alloca[0]), lambdaOutput); }; @@ -3481,8 +3481,8 @@ AllMemoryNodesTest::SetupRvsdg() Lambda_->finalize({ storeOutputs[0] }); - graph->add_export(Delta_->output(), { pointerType, "global" }); - graph->add_export(Lambda_->output(), { pointerType, "f" }); + GraphExport::Create(*Delta_->output(), "global"); + GraphExport::Create(*Lambda_->output(), "f"); return module; } @@ -3522,7 +3522,7 @@ NAllocaNodesTest::SetupRvsdg() Function_->finalize({ latestMemoryState }); - graph->add_export(Function_->output(), { pointerType, "f" }); + GraphExport::Create(*Function_->output(), "f"); return module; } @@ -3595,7 +3595,7 @@ EscapingLocalFunctionTest::SetupRvsdg() // Return &localFunc, pass memory state directly through ExportedFunc_->finalize({ localFuncCtxVar, ExportedFunc_->fctargument(0) }); - graph->add_export(ExportedFunc_->output(), { pointerType, "exportedFunc" }); + GraphExport::Create(*ExportedFunc_->output(), "exportedFunc"); return module; } @@ -3628,7 +3628,7 @@ FreeNullTest::SetupRvsdg() LambdaMain_->finalize({ FreeResults[1], FreeResults[0] }); - graph->add_export(LambdaMain_->output(), { PointerType::Create(), "main" }); + GraphExport::Create(*LambdaMain_->output(), "main"); return module; } @@ -3703,7 +3703,7 @@ LambdaCallArgumentMismatch::SetupRvsdg() auto lambdaOutput = lambda->finalize(call.Results()); - rvsdg.add_export(lambdaOutput, { PointerType::Create(), "main" }); + GraphExport::Create(*lambdaOutput, "main"); return std::make_tuple(lambdaOutput, &call); }; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp index 86ab92316..53d48e77c 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp @@ -71,7 +71,7 @@ TestDeadLoopNodeOutput() auto lambdaOutput = lambdaNode->finalize({ output0 }); - rvsdg.add_export(lambdaOutput, { jlm::llvm::PointerType::Create(), "f" }); + jlm::llvm::GraphExport::Create(*lambdaOutput, "f"); // Act EliminateDeadNodes(rvsdgModule); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 9e0e4197b..0fc973e3f 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -46,7 +46,7 @@ TestFork() loop->set_predicate(match); auto f = lambda->finalize({ loop->output(0), loop->output(1), loop->output(2) }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + jlm::llvm::GraphExport::Create(*f, ""); rvsdg::view(rm.Rvsdg(), stdout); @@ -112,7 +112,7 @@ TestConstantFork() loop->set_predicate(match); auto f = lambda->finalize({ loop->output(0) }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + jlm::llvm::GraphExport::Create(*f, ""); rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index af207b968..1f1aef3f8 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -35,7 +35,7 @@ TestWithMatch() auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); auto f = lambda->finalize({ ex }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + jlm::llvm::GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); @@ -71,7 +71,7 @@ TestWithoutMatch() auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); auto f = lambda->finalize({ ex }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + jlm::llvm::GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index d5109d751..b16296120 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -48,7 +48,7 @@ TestUnknownBoundaries() theta->set_predicate(match); auto f = lambda->finalize({ theta->output(0), theta->output(1), theta->output(2) }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + jlm::llvm::GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index d9f98e465..c0a56dc5b 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -51,11 +51,11 @@ TestGamma() auto gammaOutput5 = gammaNode->add_exitvar({ gammaInput6->argument(0), gammaInput7->argument(1) }); - rvsdg.add_export(gammaOutput1, { valueType, "" }); - rvsdg.add_export(gammaOutput2, { valueType, "" }); - rvsdg.add_export(gammaOutput3, { valueType, "" }); - rvsdg.add_export(gammaOutput4, { valueType, "" }); - rvsdg.add_export(gammaOutput5, { valueType, "" }); + GraphExport::Create(*gammaOutput1, ""); + GraphExport::Create(*gammaOutput2, ""); + GraphExport::Create(*gammaOutput3, ""); + GraphExport::Create(*gammaOutput4, ""); + GraphExport::Create(*gammaOutput5, ""); // Act jlm::hls::RemoveUnusedStates(*rvsdgModule); @@ -106,7 +106,7 @@ TestTheta() { valueType }) .output(0); - rvsdg.add_export(result, { valueType, "f" }); + GraphExport::Create(*result, "f"); // Act jlm::hls::RemoveUnusedStates(*rvsdgModule); @@ -152,7 +152,7 @@ TestLambda() auto lambdaOutput = lambdaNode->finalize({ argument0, result1, argument2, result3 }); - rvsdg.add_export(lambdaOutput, { PointerType::Create(), "f" }); + GraphExport::Create(*lambdaOutput, "f"); // Act jlm::hls::RemoveUnusedStates(*rvsdgModule); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp index 0571ce0aa..76d0e3b06 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp @@ -55,7 +55,7 @@ test() auto loop_out = loop->add_loopvar(lambda->fctargument(1)); auto f = lambda->finalize({ loop_out }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + jlm::llvm::GraphExport::Create(*f, ""); rvsdg::view(rm.Rvsdg(), stdout); hls::DotHLS dhls; diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp index fff008140..1a58ab1fa 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp @@ -41,7 +41,7 @@ test_with_match() auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); auto f = lambda->finalize({ ex }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); @@ -83,7 +83,7 @@ test_without_match() auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); auto f = lambda->finalize({ ex }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); @@ -128,7 +128,7 @@ test_gamma3() auto ex = gamma->add_exitvar({ ev1->argument(0), ev1->argument(1), ev2->argument(2) }); auto f = lambda->finalize({ ex }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp index 5230b326f..dddc66e81 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp @@ -39,7 +39,7 @@ test() auto f = lambda->finalize({ ex }); - rm.Rvsdg().add_export(f, { f->Type(), "" }); + GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp index 633657d8c..63fc9f232 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp @@ -59,7 +59,7 @@ test() r2->set_rvorigin(delta2); auto phi = pb.end(); - rm.Rvsdg().add_export(phi->output(0), { phi->output(0)->Type(), "" }); + GraphExport::Create(*phi->output(0), ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index 09eb7e382..d484d216e 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -14,6 +14,7 @@ #include #include #include +#include static int OperationEquality() @@ -97,7 +98,7 @@ TestLoadAllocaReduction() auto value = LoadNonVolatileNode::Create(alloca1[0], { alloca1[1], alloca2[1], mux[0] }, bt, 4)[0]; - auto ex = graph.add_export(value, { value->Type(), "l" }); + auto & ex = GraphExport::Create(*value, "l"); // jlm::rvsdg::view(graph.root(), stdout); @@ -110,7 +111,7 @@ TestLoadAllocaReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex->origin()); + auto node = jlm::rvsdg::node_output::node(ex.origin()); assert(is(node)); assert(node->ninputs() == 3); assert(node->input(1)->origin() == alloca1[1]); @@ -137,7 +138,7 @@ TestMultipleOriginReduction() auto load = LoadNonVolatileNode::Create(a, { s, s, s, s }, vt, 4)[0]; - auto ex = graph.add_export(load, { load->Type(), "l" }); + auto & ex = GraphExport::Create(*load, "l"); // jlm::rvsdg::view(graph.root(), stdout); @@ -149,7 +150,7 @@ TestMultipleOriginReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex->origin()); + auto node = jlm::rvsdg::node_output::node(ex.origin()); assert(is(node)); assert(node->ninputs() == 2); } @@ -177,8 +178,8 @@ TestLoadStoreStateReduction() auto value1 = LoadNonVolatileNode::Create(alloca1[0], { store1[0], store2[0] }, bt, 4)[0]; auto value2 = LoadNonVolatileNode::Create(alloca1[0], { store1[0] }, bt, 8)[0]; - auto ex1 = graph.add_export(value1, { value1->Type(), "l1" }); - auto ex2 = graph.add_export(value2, { value2->Type(), "l2" }); + auto & ex1 = GraphExport::Create(*value1, "l1"); + auto & ex2 = GraphExport::Create(*value2, "l2"); // jlm::rvsdg::view(graph.root(), stdout); @@ -191,11 +192,11 @@ TestLoadStoreStateReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex1->origin()); + auto node = jlm::rvsdg::node_output::node(ex1.origin()); assert(is(node)); assert(node->ninputs() == 2); - node = jlm::rvsdg::node_output::node(ex2->origin()); + node = jlm::rvsdg::node_output::node(ex2.origin()); assert(is(node)); assert(node->ninputs() == 2); } @@ -222,8 +223,8 @@ TestLoadStoreReduction() auto s1 = StoreNonVolatileNode::Create(a, v, { s }, 4)[0]; auto load = LoadNonVolatileNode::Create(a, { s1 }, vt, 4); - auto x1 = graph.add_export(load[0], { load[0]->Type(), "value" }); - auto x2 = graph.add_export(load[1], { load[1]->Type(), "state" }); + auto & x1 = GraphExport::Create(*load[0], "value"); + auto & x2 = GraphExport::Create(*load[1], "state"); // jlm::rvsdg::view(graph.root(), stdout); @@ -236,8 +237,8 @@ TestLoadStoreReduction() // Assert assert(graph.root()->nnodes() == 1); - assert(x1->origin() == v); - assert(x2->origin() == s1); + assert(x1.origin() == v); + assert(x2.origin() == s1); } static void @@ -268,9 +269,9 @@ TestLoadLoadReduction() auto ld3 = LoadNonVolatileNode::Create(a4, { st1[0], ld1[1], ld2[1] }, vt, 4); - auto x1 = graph.add_export(ld3[1], { mt, "s" }); - auto x2 = graph.add_export(ld3[2], { mt, "s" }); - auto x3 = graph.add_export(ld3[3], { mt, "s" }); + auto & x1 = GraphExport::Create(*ld3[1], "s"); + auto & x2 = GraphExport::Create(*ld3[2], "s"); + auto & x3 = GraphExport::Create(*ld3[3], "s"); jlm::rvsdg::view(graph.root(), stdout); @@ -285,15 +286,15 @@ TestLoadLoadReduction() // Assert assert(graph.root()->nnodes() == 6); - auto ld = jlm::rvsdg::node_output::node(x1->origin()); + auto ld = jlm::rvsdg::node_output::node(x1.origin()); assert(is(ld)); - auto mx1 = jlm::rvsdg::node_output::node(x2->origin()); + auto mx1 = jlm::rvsdg::node_output::node(x2.origin()); assert(is(mx1) && mx1->ninputs() == 2); assert(mx1->input(0)->origin() == ld1[1] || mx1->input(0)->origin() == ld->output(2)); assert(mx1->input(1)->origin() == ld1[1] || mx1->input(1)->origin() == ld->output(2)); - auto mx2 = jlm::rvsdg::node_output::node(x3->origin()); + auto mx2 = jlm::rvsdg::node_output::node(x3.origin()); assert(is(mx2) && mx2->ninputs() == 2); assert(mx2->input(0)->origin() == ld2[1] || mx2->input(0)->origin() == ld->output(3)); assert(mx2->input(1)->origin() == ld2[1] || mx2->input(1)->origin() == ld->output(3)); diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index 61913e02e..7af40fa2d 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -13,6 +13,7 @@ #include #include #include +#include static int StoreNonVolatileOperationEquality() @@ -228,7 +229,7 @@ TestStoreMuxReduction() auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); auto state = StoreNonVolatileNode::Create(a, v, { mux }, 4); - auto ex = graph.add_export(state[0], { state[0]->Type(), "s" }); + auto & ex = GraphExport::Create(*state[0], "s"); // jlm::rvsdg::view(graph.root(), stdout); @@ -241,7 +242,7 @@ TestStoreMuxReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto muxnode = jlm::rvsdg::node_output::node(ex->origin()); + auto muxnode = jlm::rvsdg::node_output::node(ex.origin()); assert(is(muxnode)); assert(muxnode->ninputs() == 3); auto n0 = jlm::rvsdg::node_output::node(muxnode->input(0)->origin()); @@ -274,7 +275,7 @@ TestMultipleOriginReduction() auto states = StoreNonVolatileNode::Create(a, v, { s, s, s, s }, 4); - auto ex = graph.add_export(states[0], { states[0]->Type(), "s" }); + auto & ex = GraphExport::Create(*states[0], "s"); // jlm::rvsdg::view(graph.root(), stdout); @@ -287,7 +288,7 @@ TestMultipleOriginReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex->origin()); + auto node = jlm::rvsdg::node_output::node(ex.origin()); assert(jlm::rvsdg::is(node->operation()) && node->ninputs() == 3); } @@ -316,9 +317,9 @@ TestStoreAllocaReduction() auto states1 = StoreNonVolatileNode::Create(alloca1[0], value, { alloca1[1], alloca2[1], s }, 4); auto states2 = StoreNonVolatileNode::Create(alloca2[0], value, states1, 4); - graph.add_export(states2[0], { states2[0]->Type(), "s1" }); - graph.add_export(states2[1], { states2[1]->Type(), "s2" }); - graph.add_export(states2[2], { states2[2]->Type(), "s3" }); + GraphExport::Create(*states2[0], "s1"); + GraphExport::Create(*states2[1], "s2"); + GraphExport::Create(*states2[2], "s3"); // jlm::rvsdg::view(graph.root(), stdout); @@ -359,7 +360,7 @@ TestStoreStoreReduction() auto s1 = StoreNonVolatileNode::Create(a, v1, { s }, 4)[0]; auto s2 = StoreNonVolatileNode::Create(a, v2, { s1 }, 4)[0]; - auto ex = graph.add_export(s2, { s2->Type(), "state" }); + auto & ex = GraphExport::Create(*s2, "state"); jlm::rvsdg::view(graph.root(), stdout); @@ -373,7 +374,7 @@ TestStoreStoreReduction() // Assert assert(graph.root()->nnodes() == 1); - assert(jlm::rvsdg::node_output::node(ex->origin())->input(1)->origin() == v2); + assert(jlm::rvsdg::node_output::node(ex.origin())->input(1)->origin() == v2); } static int diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 3c4b812a8..1518c9e21 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -134,7 +134,7 @@ TestCallTypeClassifierIndirectCall() lambda->finalize(callResults); - graph->add_export(lambda->output(), { PointerType::Create(), "f" }); + GraphExport::Create(*lambda->output(), "f"); return std::make_tuple( jlm::util::AssertedCast(jlm::rvsdg::node_output::node(callResults[0])), @@ -235,7 +235,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto g = SetupFunctionG(); auto [f, callNode] = SetupFunctionF(g); - graph->add_export(f->output(), { PointerType::Create(), "f" }); + GraphExport::Create(*f->output(), "f"); // jlm::rvsdg::view(graph->root(), stdout); @@ -349,7 +349,7 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto g = SetupFunctionG(); auto [f, callNode] = SetupFunctionF(g); - graph->add_export(f, { PointerType::Create(), "f" }); + GraphExport::Create(*f, "f"); jlm::rvsdg::view(graph->root(), stdout); @@ -464,7 +464,7 @@ TestCallTypeClassifierRecursiveDirectCall() fibrv->result()->divert_to(lambdaOutput); pb.end(); - graph->add_export(fibrv, { pt, "fib" }); + GraphExport::Create(*fibrv, "fib"); return std::make_tuple( lambdaOutput, diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index 7046b526c..0736368ce 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -275,7 +275,7 @@ TestCallSummaryComputationExport() auto result = tests::create_testop(lambdaNode->subregion(), {}, { vt })[0]; auto lambdaOutput = lambdaNode->finalize({ result }); - auto rvsdgExport = rvsdg.add_export(lambdaOutput, { jlm::llvm::PointerType::Create(), "f" }); + auto & rvsdgExport = jlm::llvm::GraphExport::Create(*lambdaOutput, "f"); // Act auto callSummary = lambdaNode->ComputeCallSummary(); @@ -283,7 +283,7 @@ TestCallSummaryComputationExport() // Assert assert(callSummary->IsExported()); assert(callSummary->IsOnlyExported()); - assert(callSummary->GetRvsdgExport() == rvsdgExport); + assert(callSummary->GetRvsdgExport() == &rvsdgExport); assert(callSummary->IsDead() == false); assert(callSummary->HasOnlyDirectCalls() == false); @@ -335,7 +335,7 @@ TestCallSummaryComputationDirectCalls() { iOStateArgument, memoryStateArgument }); auto lambdaOutput = lambdaNode->finalize(callResults); - rvsdg.add_export(lambdaOutput, { jlm::llvm::PointerType::Create(), "y" }); + jlm::llvm::GraphExport::Create(*lambdaOutput, "y"); return lambdaOutput; }; @@ -365,7 +365,7 @@ TestCallSummaryComputationDirectCalls() { vt })[0]; auto lambdaOutput = lambdaNode->finalize({ result, callYResults[1], callYResults[2] }); - rvsdg.add_export(lambdaOutput, { jlm::llvm::PointerType::Create(), "z" }); + jlm::llvm::GraphExport::Create(*lambdaOutput, "z"); return lambdaOutput; }; @@ -471,7 +471,7 @@ TestCallSummaryComputationFunctionPointerInDelta() auto argument = deltaNode->add_ctxvar(lambdaNode->output()); deltaNode->finalize(argument); - rvsdg->add_export(deltaNode->output(), { PointerType::Create(), "fp" }); + GraphExport::Create(*deltaNode->output(), "fp"); // Act auto callSummary = lambdaNode->ComputeCallSummary(); @@ -506,7 +506,7 @@ TestCallSummaryComputationLambdaResult() auto lambdaGArgument = lambdaNodeF->add_ctxvar(lambdaOutputG); auto lambdaOutputF = lambdaNodeF->finalize({ lambdaGArgument }); - rvsdg.add_export(lambdaOutputF, { pointerType, "f" }); + GraphExport::Create(*lambdaOutputF, "f"); // Act auto callSummary = lambdaNodeG->ComputeCallSummary(); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index 79593e6c3..47e50ed41 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -67,7 +67,7 @@ TestPhiCreation() rv3->set_rvorigin(lambdaOutput2); auto phi = pb.end(); - graph.add_export(phi->output(0), { phi->output(0)->Type(), "dummy" }); + GraphExport::Create(*phi->output(0), "dummy"); graph.normalize(); graph.prune(); diff --git a/tests/jlm/llvm/ir/operators/test-delta.cpp b/tests/jlm/llvm/ir/operators/test-delta.cpp index a0f18a5a4..cea0ac86f 100644 --- a/tests/jlm/llvm/ir/operators/test-delta.cpp +++ b/tests/jlm/llvm/ir/operators/test-delta.cpp @@ -44,8 +44,8 @@ TestDeltaCreation() false); auto d2 = delta2->finalize(jlm::tests::create_testop(delta2->subregion(), {}, { valueType })[0]); - rvsdgModule.Rvsdg().add_export(d1, { d1->Type(), "" }); - rvsdgModule.Rvsdg().add_export(d2, { d2->Type(), "" }); + GraphExport::Create(*d1, ""); + GraphExport::Create(*d2, ""); jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); diff --git a/tests/jlm/llvm/ir/operators/test-sext.cpp b/tests/jlm/llvm/ir/operators/test-sext.cpp index 5c99c110b..195047452 100644 --- a/tests/jlm/llvm/ir/operators/test-sext.cpp +++ b/tests/jlm/llvm/ir/operators/test-sext.cpp @@ -11,6 +11,7 @@ #include #include +#include static inline void test_bitunary_reduction() @@ -26,7 +27,7 @@ test_bitunary_reduction() auto y = jlm::rvsdg::bitnot_op::create(32, x); auto z = jlm::llvm::sext_op::create(64, y); - auto ex = graph.add_export(z, { z->Type(), "x" }); + auto & ex = jlm::llvm::GraphExport::Create(*z, "x"); // jlm::rvsdg::view(graph, stdout); @@ -36,7 +37,7 @@ test_bitunary_reduction() // jlm::rvsdg::view(graph, stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex->origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); } static inline void @@ -54,7 +55,7 @@ test_bitbinary_reduction() auto z = jlm::rvsdg::bitadd_op::create(32, x, y); auto w = jlm::llvm::sext_op::create(64, z); - auto ex = graph.add_export(w, { w->Type(), "x" }); + auto & ex = jlm::llvm::GraphExport::Create(*w, "x"); // jlm::rvsdg::view(graph, stdout); @@ -64,7 +65,7 @@ test_bitbinary_reduction() // jlm::rvsdg::view(graph, stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex->origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); } static inline void @@ -83,7 +84,7 @@ test_inverse_reduction() auto y = jlm::llvm::trunc_op::create(32, x); auto z = jlm::llvm::sext_op::create(64, y); - auto ex = graph.add_export(z, { z->Type(), "x" }); + auto & ex = jlm::llvm::GraphExport::Create(*z, "x"); jlm::rvsdg::view(graph, stdout); @@ -93,7 +94,7 @@ test_inverse_reduction() jlm::rvsdg::view(graph, stdout); - assert(ex->origin() == x); + assert(ex.origin() == x); } static int diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index 342d8b465..c1b34f4ad 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -66,7 +66,7 @@ TestGamma() auto lambdaOutput = lambdaNode->finalize({ gammaNode1->output(0), gammaNode1->output(1) }); - rvsdg.add_export(lambdaOutput, { lambdaOutput->Type(), "test" }); + GraphExport::Create(*lambdaOutput, "test"); // Act RunInvariantValueRedirection(*rvsdgModule); @@ -119,7 +119,7 @@ TestTheta() auto lambdaOutput = lambdaNode->finalize({ thetaOutput1, thetaOutput2, thetaOutput3 }); - rvsdg.add_export(lambdaOutput, { lambdaOutput->Type(), "test" }); + GraphExport::Create(*lambdaOutput, "test"); // Act RunInvariantValueRedirection(*rvsdgModule); @@ -202,7 +202,7 @@ TestCall() { controlResult, xArgument, yArgument, ioStateArgument, memoryStateArgument }); lambdaOutputTest2 = lambdaNode->finalize(outputs(&callNode)); - rvsdg.add_export(lambdaOutputTest2, { lambdaOutputTest2->Type(), "test2" }); + GraphExport::Create(*lambdaOutputTest2, "test2"); } // Act @@ -307,7 +307,7 @@ TestCallWithMemoryStateNodes() lambdaOutputTest2 = lambdaNode->finalize( { callNode.output(0), callNode.GetIoStateOutput(), &lambdaExitMergeResult }); - rvsdg.add_export(lambdaOutputTest2, { lambdaOutputTest2->Type(), "test2" }); + GraphExport::Create(*lambdaOutputTest2, "test2"); } // Act diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 516bc1259..eac59e739 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -36,7 +36,7 @@ TestRoot() jlm::tests::GraphImport::Create(graph, jlm::tests::valuetype::Create(), "x"); auto y = &jlm::tests::GraphImport::Create(graph, jlm::tests::valuetype::Create(), "y"); - graph.add_export(y, { y->Type(), "z" }); + GraphExport::Create(*y, "z"); // jlm::rvsdg::view(graph.root(), stdout); RunDeadNodeElimination(rm); @@ -70,8 +70,8 @@ TestGamma() gamma->add_exitvar({ ev2->argument(0), t }); gamma->add_exitvar({ ev3->argument(0), ev1->argument(1) }); - graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "z" }); - graph.add_export(gamma->output(2), { gamma->output(2)->Type(), "w" }); + GraphExport::Create(*gamma->output(0), "z"); + GraphExport::Create(*gamma->output(2), "w"); // jlm::rvsdg::view(graph.root(), stdout); RunDeadNodeElimination(rm); @@ -105,7 +105,7 @@ TestGamma2() gamma->add_exitvar({ n1, n2 }); - graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "x" }); + GraphExport::Create(*gamma->output(0), "x"); // jlm::rvsdg::view(graph, stdout); RunDeadNodeElimination(rm); @@ -145,8 +145,8 @@ TestTheta() auto c = jlm::tests::create_testop(theta->subregion(), {}, { ct })[0]; theta->set_predicate(c); - graph.add_export(theta->output(0), { theta->output(0)->Type(), "a" }); - graph.add_export(theta->output(3), { theta->output(0)->Type(), "b" }); + GraphExport::Create(*theta->output(0), "a"); + GraphExport::Create(*theta->output(3), "b"); // jlm::rvsdg::view(graph.root(), stdout); RunDeadNodeElimination(rm); @@ -192,7 +192,7 @@ TestNestedTheta() otheta->set_predicate(lvo1->argument()); - graph.add_export(otheta->output(2), { otheta->output(2)->Type(), "y" }); + GraphExport::Create(*otheta->output(2), "y"); // jlm::rvsdg::view(graph, stdout); RunDeadNodeElimination(rm); @@ -231,7 +231,7 @@ TestEvolvingTheta() theta->set_predicate(lv0->argument()); - graph.add_export(lv1, { lv1->Type(), "x1" }); + GraphExport::Create(*lv1, "x1"); // jlm::rvsdg::view(graph, stdout); RunDeadNodeElimination(rm); @@ -264,7 +264,7 @@ TestLambda() auto output = lambda->finalize({ lambda->fctargument(0), cv2 }); - graph.add_export(output, { output->Type(), "f" }); + GraphExport::Create(*output, "f"); // jlm::rvsdg::view(graph.root(), stdout); RunDeadNodeElimination(rm); @@ -362,8 +362,8 @@ TestPhi() rv4->set_rvorigin(f4); auto phiNode = phiBuilder.end(); - rvsdg.add_export(phiNode->output(0), { phiNode->output(0)->Type(), "f1" }); - rvsdg.add_export(phiNode->output(3), { phiNode->output(3)->Type(), "f4" }); + GraphExport::Create(*phiNode->output(0), "f1"); + GraphExport::Create(*phiNode->output(3), "f4"); // Act RunDeadNodeElimination(rvsdgModule); @@ -415,7 +415,7 @@ TestDelta() jlm::tests::SimpleNode::Create(*deltaNode->subregion(), { zArgument }, {}); auto deltaOutput = deltaNode->finalize(result); - rvsdg.add_export(deltaOutput, { PointerType::Create(), "" }); + GraphExport::Create(*deltaOutput, ""); // Act RunDeadNodeElimination(rvsdgModule); diff --git a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp index 9bfd6bca1..50dd5cd64 100644 --- a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp @@ -9,6 +9,7 @@ #include #include +#include #include @@ -35,8 +36,8 @@ TestSuccess() auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); auto ld = LoadNonVolatileNode::Create(a, { mux }, vt, 4); - auto ex1 = graph.add_export(ld[0], { ld[0]->Type(), "v" }); - auto ex2 = graph.add_export(ld[1], { ld[1]->Type(), "s" }); + auto & ex1 = GraphExport::Create(*ld[0], "v"); + auto & ex2 = GraphExport::Create(*ld[1], "s"); // jlm::rvsdg::view(graph.root(), stdout); @@ -49,14 +50,14 @@ TestSuccess() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto load = jlm::rvsdg::node_output::node(ex1->origin()); + auto load = jlm::rvsdg::node_output::node(ex1.origin()); assert(is(load)); assert(load->ninputs() == 4); assert(load->input(1)->origin() == s1); assert(load->input(2)->origin() == s2); assert(load->input(3)->origin() == s3); - auto merge = jlm::rvsdg::node_output::node(ex2->origin()); + auto merge = jlm::rvsdg::node_output::node(ex2.origin()); assert(is(merge)); assert(merge->ninputs() == 3); for (size_t n = 0; n < merge->ninputs(); n++) @@ -88,9 +89,9 @@ TestWrongNumberOfOperands() auto merge = MemoryStateMergeOperation::Create(std::vector{ s1, s2 }); auto ld = LoadNonVolatileNode::Create(a, { merge, merge }, vt, 4); - auto ex1 = graph.add_export(ld[0], { ld[0]->Type(), "v" }); - auto ex2 = graph.add_export(ld[1], { ld[1]->Type(), "s1" }); - auto ex3 = graph.add_export(ld[2], { ld[2]->Type(), "s2" }); + auto & ex1 = GraphExport::Create(*ld[0], "v"); + auto & ex2 = GraphExport::Create(*ld[1], "s1"); + auto & ex3 = GraphExport::Create(*ld[2], "s2"); jlm::rvsdg::view(graph.root(), stdout); @@ -107,9 +108,9 @@ TestWrongNumberOfOperands() // The LoadMux reduction should not be performed, as the current implementation does not correctly // take care of the two identical load state operands originating from the merge node. assert(ld.size() == 3); - assert(ex1->origin() == ld[0]); - assert(ex2->origin() == ld[1]); - assert(ex3->origin() == ld[2]); + assert(ex1.origin() == ld[0]); + assert(ex2.origin() == ld[1]); + assert(ex3.origin() == ld[2]); } static void @@ -130,7 +131,7 @@ TestLoadWithoutStates() auto loadResults = LoadNonVolatileNode::Create(address, {}, valueType, 4); - auto ex = graph.add_export(loadResults[0], { valueType, "v" }); + auto & ex = GraphExport::Create(*loadResults[0], "v"); jlm::rvsdg::view(graph.root(), stdout); @@ -143,7 +144,7 @@ TestLoadWithoutStates() jlm::rvsdg::view(graph.root(), stdout); // Assert - auto load = jlm::rvsdg::node_output::node(ex->origin()); + auto load = jlm::rvsdg::node_output::node(ex.origin()); assert(is(load)); assert(load->ninputs() == 1); } diff --git a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp index b14e914e7..6c26558c4 100644 --- a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp @@ -8,6 +8,7 @@ #include #include +#include #include #include @@ -37,8 +38,8 @@ TestLoadStoreReductionWithDifferentValueOperandType() auto loadResults = LoadNonVolatileNode::Create(address, storeResults, jlm::rvsdg::bittype::Create(8), 4); - auto exportedValue = graph.add_export(loadResults[0], { jlm::rvsdg::bittype::Create(8), "v" }); - graph.add_export(loadResults[1], { memoryStateType, "s" }); + auto & exportedValue = GraphExport::Create(*loadResults[0], "v"); + GraphExport::Create(*loadResults[1], "s"); jlm::rvsdg::view(graph.root(), stdout); @@ -51,7 +52,7 @@ TestLoadStoreReductionWithDifferentValueOperandType() jlm::rvsdg::view(graph.root(), stdout); // Assert - auto load = jlm::rvsdg::node_output::node(exportedValue->origin()); + auto load = jlm::rvsdg::node_output::node(exportedValue.origin()); assert(is(load)); assert(load->ninputs() == 2); auto store = jlm::rvsdg::node_output::node(load->input(1)->origin()); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index dc5940282..5cfb9c86f 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -45,13 +45,13 @@ test_simple() auto b3 = jlm::tests::create_testop(graph.root(), { n1, z }, { vt })[0]; auto b4 = jlm::tests::create_testop(graph.root(), { n2, z }, { vt })[0]; - graph.add_export(n1, { n1->Type(), "n1" }); - graph.add_export(n2, { n2->Type(), "n2" }); - graph.add_export(u1, { n2->Type(), "u1" }); - graph.add_export(b1, { n2->Type(), "b1" }); - graph.add_export(b2, { n2->Type(), "b2" }); - graph.add_export(b3, { n2->Type(), "b3" }); - graph.add_export(b4, { n2->Type(), "b4" }); + GraphExport::Create(*n1, "n1"); + GraphExport::Create(*n2, "n2"); + GraphExport::Create(*u1, "u1"); + GraphExport::Create(*b1, "b1"); + GraphExport::Create(*b2, "b2"); + GraphExport::Create(*b3, "b3"); + GraphExport::Create(*b4, "b4"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::cne cne; @@ -104,9 +104,9 @@ test_gamma() gamma->add_exitvar({ n3, ev3->argument(1) }); gamma->add_exitvar({ ev5->argument(0), ev4->argument(1) }); - graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "x1" }); - graph.add_export(gamma->output(1), { gamma->output(1)->Type(), "x2" }); - graph.add_export(gamma->output(2), { gamma->output(2)->Type(), "y" }); + GraphExport::Create(*gamma->output(0), "x1"); + GraphExport::Create(*gamma->output(1), "x2"); + GraphExport::Create(*gamma->output(2), "y"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::cne cne; @@ -161,9 +161,9 @@ test_theta() theta->set_predicate(lv1->argument()); - graph.add_export(theta->output(1), { theta->output(1)->Type(), "lv2" }); - graph.add_export(theta->output(2), { theta->output(2)->Type(), "lv3" }); - graph.add_export(theta->output(3), { theta->output(3)->Type(), "lv4" }); + GraphExport::Create(*theta->output(1), "lv2"); + GraphExport::Create(*theta->output(2), "lv3"); + GraphExport::Create(*theta->output(3), "lv4"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::cne cne; @@ -212,8 +212,8 @@ test_theta2() theta->set_predicate(lv1->argument()); - graph.add_export(theta->output(1), { theta->output(1)->Type(), "lv2" }); - graph.add_export(theta->output(2), { theta->output(2)->Type(), "lv3" }); + GraphExport::Create(*theta->output(1), "lv2"); + GraphExport::Create(*theta->output(2), "lv3"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::cne cne; @@ -266,9 +266,9 @@ test_theta3() theta1->set_predicate(lv1->argument()); - graph.add_export(theta1->output(1), { theta1->output(1)->Type(), "lv2" }); - graph.add_export(theta1->output(2), { theta1->output(2)->Type(), "lv3" }); - graph.add_export(theta1->output(3), { theta1->output(3)->Type(), "lv4" }); + GraphExport::Create(*theta1->output(1), "lv2"); + GraphExport::Create(*theta1->output(2), "lv3"); + GraphExport::Create(*theta1->output(3), "lv4"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::cne cne; @@ -321,17 +321,17 @@ test_theta4() theta->set_predicate(lv1->argument()); - auto ex1 = graph.add_export(theta->output(1), { theta->output(1)->Type(), "lv2" }); - auto ex2 = graph.add_export(theta->output(2), { theta->output(2)->Type(), "lv3" }); - graph.add_export(theta->output(3), { theta->output(3)->Type(), "lv4" }); - graph.add_export(theta->output(4), { theta->output(4)->Type(), "lv5" }); + auto & ex1 = GraphExport::Create(*theta->output(1), "lv2"); + auto & ex2 = GraphExport::Create(*theta->output(2), "lv3"); + GraphExport::Create(*theta->output(3), "lv4"); + GraphExport::Create(*theta->output(4), "lv5"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); // jlm::rvsdg::view(graph, stdout); - assert(ex1->origin() != ex2->origin()); + assert(ex1.origin() != ex2.origin()); assert(lv2->argument()->nusers() != 0 && lv3->argument()->nusers() != 0); assert(lv6->result()->origin() == lv7->result()->origin()); } @@ -367,18 +367,18 @@ test_theta5() theta->set_predicate(lv0->argument()); - auto ex1 = graph.add_export(theta->output(1), { theta->output(1)->Type(), "lv1" }); - auto ex2 = graph.add_export(theta->output(2), { theta->output(2)->Type(), "lv2" }); - auto ex3 = graph.add_export(theta->output(3), { theta->output(3)->Type(), "lv3" }); - auto ex4 = graph.add_export(theta->output(4), { theta->output(4)->Type(), "lv4" }); + auto & ex1 = GraphExport::Create(*theta->output(1), "lv1"); + auto & ex2 = GraphExport::Create(*theta->output(2), "lv2"); + auto & ex3 = GraphExport::Create(*theta->output(3), "lv3"); + auto & ex4 = GraphExport::Create(*theta->output(4), "lv4"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); // jlm::rvsdg::view(graph, stdout); - assert(ex1->origin() == ex2->origin()); - assert(ex3->origin() == ex4->origin()); + assert(ex1.origin() == ex2.origin()); + assert(ex3.origin() == ex4.origin()); assert(region->result(4)->origin() == region->result(5)->origin()); assert(region->result(2)->origin() == region->result(3)->origin()); } @@ -407,7 +407,7 @@ test_lambda() auto output = lambda->finalize({ b1 }); - graph.add_export(output, { output->Type(), "f" }); + GraphExport::Create(*output, "f"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::cne cne; @@ -456,8 +456,8 @@ test_phi() auto phi = pb.end(); - graph.add_export(phi->output(0), { phi->output(0)->Type(), "f1" }); - graph.add_export(phi->output(1), { phi->output(1)->Type(), "f2" }); + GraphExport::Create(*phi->output(0), "f1"); + GraphExport::Create(*phi->output(1), "f2"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::cne cne; diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index a3917f785..b5e27d5ba 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -87,7 +87,7 @@ test1() auto f1 = SetupF1(); auto f2 = SetupF2(f1); - graph.add_export(f2, { f2->Type(), "f2" }); + GraphExport::Create(*f2, "f2"); // jlm::rvsdg::view(graph.root(), stdout); @@ -152,7 +152,7 @@ test2() auto f1 = SetupF1(functionType1); auto f2 = SetupF2(f1); - graph.add_export(f2, { f2->Type(), "f2" }); + GraphExport::Create(*f2, "f2"); jlm::rvsdg::view(graph.root(), stdout); diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index 5ecb9091a..b5cad8218 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -61,18 +61,18 @@ test1() theta->set_predicate(predicate); - auto ex1 = graph.add_export(theta->output(0), { theta->output(0)->Type(), "x" }); - auto ex2 = graph.add_export(theta->output(1), { theta->output(1)->Type(), "y" }); - auto ex3 = graph.add_export(theta->output(2), { theta->output(2)->Type(), "z" }); + auto & ex1 = GraphExport::Create(*theta->output(0), "x"); + auto & ex2 = GraphExport::Create(*theta->output(1), "y"); + auto & ex3 = GraphExport::Create(*theta->output(2), "z"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::tginversion tginversion; tginversion.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex1->origin()))); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex2->origin()))); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex3->origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex1.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex2.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex3.origin()))); } static inline void @@ -110,14 +110,14 @@ test2() theta->set_predicate(predicate); - auto ex = graph.add_export(theta->output(0), { theta->output(0)->Type(), "x" }); + auto & ex = GraphExport::Create(*theta->output(0), "x"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::tginversion tginversion; tginversion.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex->origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); } static int diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index 22bc63b8a..2c2434ccc 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -45,8 +45,8 @@ test_pullin_top() auto ev = gamma->add_entryvar(n5); gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); - graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "x" }); - graph.add_export(n2, { n2->Type(), "y" }); + GraphExport::Create(*gamma->output(0), "x"); + GraphExport::Create(*n2, "y"); // jlm::rvsdg::view(graph, stdout); pullin_top(gamma); @@ -74,13 +74,13 @@ test_pullin_bottom() auto b1 = jlm::tests::create_testop(graph.root(), { gamma->output(0), x }, { vt })[0]; auto b2 = jlm::tests::create_testop(graph.root(), { gamma->output(0), b1 }, { vt })[0]; - auto xp = graph.add_export(b2, { b2->Type(), "x" }); + auto & xp = jlm::llvm::GraphExport::Create(*b2, "x"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::pullin_bottom(gamma); // jlm::rvsdg::view(graph, stdout); - assert(jlm::rvsdg::node_output::node(xp->origin()) == gamma); + assert(jlm::rvsdg::node_output::node(xp.origin()) == gamma); assert(gamma->subregion(0)->nnodes() == 2); assert(gamma->subregion(1)->nnodes() == 2); } @@ -113,7 +113,7 @@ test_pull() auto g1xv = gamma1->add_exitvar({ cg1, g2xv }); - graph.add_export(g1xv, { g1xv->Type(), "" }); + GraphExport::Create(*g1xv, ""); jlm::rvsdg::view(graph, stdout); jlm::llvm::pullin pullin; diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index abe9eb7ca..38dda2c74 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -44,7 +44,7 @@ test_gamma() gamma->add_exitvar({ state, evs->argument(1) }); - graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "x" }); + GraphExport::Create(*gamma->output(0), "x"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::pushout pushout; @@ -92,7 +92,7 @@ test_theta() theta->set_predicate(lv1->argument()); - graph.add_export(theta->output(0), { theta->output(0)->Type(), "c" }); + GraphExport::Create(*theta->output(0), "c"); // jlm::rvsdg::view(graph.root(), stdout); jlm::llvm::pushout pushout; @@ -130,13 +130,13 @@ test_push_theta_bottom() lvs->result()->divert_to(s1); theta->set_predicate(lvc->argument()); - auto ex = graph.add_export(lvs, { lvs->Type(), "s" }); + auto & ex = GraphExport::Create(*lvs, "s"); jlm::rvsdg::view(graph, stdout); jlm::llvm::push_bottom(theta); jlm::rvsdg::view(graph, stdout); - auto storenode = jlm::rvsdg::node_output::node(ex->origin()); + auto storenode = jlm::rvsdg::node_output::node(ex.origin()); assert(jlm::rvsdg::is(storenode)); assert(storenode->input(0)->origin() == a); assert(jlm::rvsdg::is( diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 1f85b0c1a..bec3997a4 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -255,14 +255,14 @@ test_unknown_boundaries() theta->set_predicate(match); - auto ex1 = graph.add_export(lv1, { lv1->Type(), "x" }); + auto & ex1 = GraphExport::Create(*lv1, "x"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::loopunroll loopunroll(2); loopunroll.run(rm, statisticsCollector); // jlm::rvsdg::view(graph, stdout); - auto node = jlm::rvsdg::node_output::node(ex1->origin()); + auto node = jlm::rvsdg::node_output::node(ex1.origin()); assert(jlm::rvsdg::is(node)); node = jlm::rvsdg::node_output::node(node->input(1)->origin()); assert(jlm::rvsdg::is(node)); diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index cda0c9f16..4f86d00c8 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -30,8 +30,8 @@ types_bitstring_arithmetic_test_bitand(void) auto and0 = bitand_op::create(32, s0, s1); auto and1 = bitand_op::create(32, c0, c1); - graph.add_export(and0, { and0->Type(), "dummy" }); - graph.add_export(and1, { and1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*and0, "dummy"); + jlm::tests::GraphExport::Create(*and1, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -63,11 +63,11 @@ types_bitstring_arithmetic_test_bitashr(void) auto ashr3 = bitashr_op::create(32, c1, c2); auto ashr4 = bitashr_op::create(32, c1, c3); - graph.add_export(ashr0, { ashr0->Type(), "dummy" }); - graph.add_export(ashr1, { ashr1->Type(), "dummy" }); - graph.add_export(ashr2, { ashr2->Type(), "dummy" }); - graph.add_export(ashr3, { ashr3->Type(), "dummy" }); - graph.add_export(ashr4, { ashr4->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*ashr0, "dummy"); + jlm::tests::GraphExport::Create(*ashr1, "dummy"); + jlm::tests::GraphExport::Create(*ashr2, "dummy"); + jlm::tests::GraphExport::Create(*ashr3, "dummy"); + jlm::tests::GraphExport::Create(*ashr4, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -93,7 +93,7 @@ types_bitstring_arithmetic_test_bitdifference(void) auto diff = bitsub_op::create(32, s0, s1); - graph.add_export(diff, { diff->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*diff, "dummy"); graph.normalize(); graph.prune(); @@ -118,9 +118,9 @@ types_bitstring_arithmetic_test_bitnegate(void) auto neg1 = bitneg_op::create(32, c0); auto neg2 = bitneg_op::create(32, neg1); - graph.add_export(neg0, { neg0->Type(), "dummy" }); - graph.add_export(neg1, { neg1->Type(), "dummy" }); - graph.add_export(neg2, { neg2->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*neg0, "dummy"); + jlm::tests::GraphExport::Create(*neg1, "dummy"); + jlm::tests::GraphExport::Create(*neg2, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -146,9 +146,9 @@ types_bitstring_arithmetic_test_bitnot(void) auto not1 = bitnot_op::create(32, c0); auto not2 = bitnot_op::create(32, not1); - graph.add_export(not0, { not0->Type(), "dummy" }); - graph.add_export(not1, { not1->Type(), "dummy" }); - graph.add_export(not2, { not2->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*not0, "dummy"); + jlm::tests::GraphExport::Create(*not1, "dummy"); + jlm::tests::GraphExport::Create(*not2, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -176,8 +176,8 @@ types_bitstring_arithmetic_test_bitor(void) auto or0 = bitor_op::create(32, s0, s1); auto or1 = bitor_op::create(32, c0, c1); - graph.add_export(or0, { or0->Type(), "dummy" }); - graph.add_export(or1, { or1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*or0, "dummy"); + jlm::tests::GraphExport::Create(*or1, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -204,8 +204,8 @@ types_bitstring_arithmetic_test_bitproduct(void) auto product0 = bitmul_op::create(32, s0, s1); auto product1 = bitmul_op::create(32, c0, c1); - graph.add_export(product0, { product0->Type(), "dummy" }); - graph.add_export(product1, { product1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*product0, "dummy"); + jlm::tests::GraphExport::Create(*product1, "dummy"); graph.normalize(); graph.prune(); @@ -229,7 +229,7 @@ types_bitstring_arithmetic_test_bitshiproduct(void) auto shiproduct = bitsmulh_op::create(32, s0, s1); - graph.add_export(shiproduct, { shiproduct->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*shiproduct, "dummy"); graph.normalize(); graph.prune(); @@ -258,9 +258,9 @@ types_bitstring_arithmetic_test_bitshl(void) auto shl1 = bitshl_op::create(32, c0, c1); auto shl2 = bitshl_op::create(32, c0, c2); - graph.add_export(shl0, { shl0->Type(), "dummy" }); - graph.add_export(shl1, { shl1->Type(), "dummy" }); - graph.add_export(shl2, { shl2->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*shl0, "dummy"); + jlm::tests::GraphExport::Create(*shl1, "dummy"); + jlm::tests::GraphExport::Create(*shl2, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -290,9 +290,9 @@ types_bitstring_arithmetic_test_bitshr(void) auto shr1 = bitshr_op::create(32, c0, c1); auto shr2 = bitshr_op::create(32, c0, c2); - graph.add_export(shr0, { shr0->Type(), "dummy" }); - graph.add_export(shr1, { shr1->Type(), "dummy" }); - graph.add_export(shr2, { shr2->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*shr0, "dummy"); + jlm::tests::GraphExport::Create(*shr1, "dummy"); + jlm::tests::GraphExport::Create(*shr2, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -320,8 +320,8 @@ types_bitstring_arithmetic_test_bitsmod(void) auto smod0 = bitsmod_op::create(32, s0, s1); auto smod1 = bitsmod_op::create(32, c0, c1); - graph.add_export(smod0, { smod0->Type(), "dummy" }); - graph.add_export(smod1, { smod1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*smod0, "dummy"); + jlm::tests::GraphExport::Create(*smod1, "dummy"); graph.normalize(); graph.prune(); @@ -349,8 +349,8 @@ types_bitstring_arithmetic_test_bitsquotient(void) auto squot0 = bitsdiv_op::create(32, s0, s1); auto squot1 = bitsdiv_op::create(32, c0, c1); - graph.add_export(squot0, { squot0->Type(), "dummy" }); - graph.add_export(squot1, { squot1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*squot0, "dummy"); + jlm::tests::GraphExport::Create(*squot1, "dummy"); graph.normalize(); graph.prune(); @@ -378,8 +378,8 @@ types_bitstring_arithmetic_test_bitsum(void) auto sum0 = bitadd_op::create(32, s0, s1); auto sum1 = bitadd_op::create(32, c0, c1); - graph.add_export(sum0, { sum0->Type(), "dummy" }); - graph.add_export(sum1, { sum1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*sum0, "dummy"); + jlm::tests::GraphExport::Create(*sum1, "dummy"); graph.normalize(); graph.prune(); @@ -403,7 +403,7 @@ types_bitstring_arithmetic_test_bituhiproduct(void) auto uhiproduct = bitumulh_op::create(32, s0, s1); - graph.add_export(uhiproduct, { uhiproduct->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*uhiproduct, "dummy"); graph.normalize(); graph.prune(); @@ -430,8 +430,8 @@ types_bitstring_arithmetic_test_bitumod(void) auto umod0 = bitumod_op::create(32, s0, s1); auto umod1 = bitumod_op::create(32, c0, c1); - graph.add_export(umod0, { umod0->Type(), "dummy" }); - graph.add_export(umod1, { umod1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*umod0, "dummy"); + jlm::tests::GraphExport::Create(*umod1, "dummy"); graph.normalize(); graph.prune(); @@ -459,8 +459,8 @@ types_bitstring_arithmetic_test_bituquotient(void) auto uquot0 = bitudiv_op::create(32, s0, s1); auto uquot1 = bitudiv_op::create(32, c0, c1); - graph.add_export(uquot0, { uquot0->Type(), "dummy" }); - graph.add_export(uquot1, { uquot1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*uquot0, "dummy"); + jlm::tests::GraphExport::Create(*uquot1, "dummy"); graph.normalize(); graph.prune(); @@ -488,8 +488,8 @@ types_bitstring_arithmetic_test_bitxor(void) auto xor0 = bitxor_op::create(32, s0, s1); auto xor1 = bitxor_op::create(32, c0, c1); - graph.add_export(xor0, { xor0->Type(), "dummy" }); - graph.add_export(xor1, { xor1->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*xor0, "dummy"); + jlm::tests::GraphExport::Create(*xor1, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -534,10 +534,10 @@ types_bitstring_comparison_test_bitequal(void) auto equal2 = biteq_op::create(32, c0, c1); auto equal3 = biteq_op::create(32, c0, c2); - graph.add_export(equal0, { equal0->Type(), "dummy" }); - graph.add_export(equal1, { equal1->Type(), "dummy" }); - graph.add_export(equal2, { equal2->Type(), "dummy" }); - graph.add_export(equal3, { equal3->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*equal0, "dummy"); + jlm::tests::GraphExport::Create(*equal1, "dummy"); + jlm::tests::GraphExport::Create(*equal2, "dummy"); + jlm::tests::GraphExport::Create(*equal3, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -568,10 +568,10 @@ types_bitstring_comparison_test_bitnotequal(void) auto nequal2 = bitne_op::create(32, c0, c1); auto nequal3 = bitne_op::create(32, c0, c2); - graph.add_export(nequal0, { nequal0->Type(), "dummy" }); - graph.add_export(nequal1, { nequal1->Type(), "dummy" }); - graph.add_export(nequal2, { nequal2->Type(), "dummy" }); - graph.add_export(nequal3, { nequal3->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*nequal0, "dummy"); + jlm::tests::GraphExport::Create(*nequal1, "dummy"); + jlm::tests::GraphExport::Create(*nequal2, "dummy"); + jlm::tests::GraphExport::Create(*nequal3, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -604,11 +604,11 @@ types_bitstring_comparison_test_bitsgreater(void) auto sgreater3 = bitsgt_op::create(32, s0, c2); auto sgreater4 = bitsgt_op::create(32, c3, s1); - graph.add_export(sgreater0, { sgreater0->Type(), "dummy" }); - graph.add_export(sgreater1, { sgreater1->Type(), "dummy" }); - graph.add_export(sgreater2, { sgreater2->Type(), "dummy" }); - graph.add_export(sgreater3, { sgreater3->Type(), "dummy" }); - graph.add_export(sgreater4, { sgreater4->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*sgreater0, "dummy"); + jlm::tests::GraphExport::Create(*sgreater1, "dummy"); + jlm::tests::GraphExport::Create(*sgreater2, "dummy"); + jlm::tests::GraphExport::Create(*sgreater3, "dummy"); + jlm::tests::GraphExport::Create(*sgreater4, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -643,12 +643,12 @@ types_bitstring_comparison_test_bitsgreatereq(void) auto sgreatereq4 = bitsge_op::create(32, c2, s0); auto sgreatereq5 = bitsge_op::create(32, s1, c3); - graph.add_export(sgreatereq0, { sgreatereq0->Type(), "dummy" }); - graph.add_export(sgreatereq1, { sgreatereq1->Type(), "dummy" }); - graph.add_export(sgreatereq2, { sgreatereq2->Type(), "dummy" }); - graph.add_export(sgreatereq3, { sgreatereq3->Type(), "dummy" }); - graph.add_export(sgreatereq4, { sgreatereq4->Type(), "dummy" }); - graph.add_export(sgreatereq5, { sgreatereq5->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*sgreatereq0, "dummy"); + jlm::tests::GraphExport::Create(*sgreatereq1, "dummy"); + jlm::tests::GraphExport::Create(*sgreatereq2, "dummy"); + jlm::tests::GraphExport::Create(*sgreatereq3, "dummy"); + jlm::tests::GraphExport::Create(*sgreatereq4, "dummy"); + jlm::tests::GraphExport::Create(*sgreatereq5, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -683,11 +683,11 @@ types_bitstring_comparison_test_bitsless(void) auto sless3 = bitslt_op::create(32, c2, s0); auto sless4 = bitslt_op::create(32, s1, c3); - graph.add_export(sless0, { sless0->Type(), "dummy" }); - graph.add_export(sless1, { sless1->Type(), "dummy" }); - graph.add_export(sless2, { sless2->Type(), "dummy" }); - graph.add_export(sless3, { sless3->Type(), "dummy" }); - graph.add_export(sless4, { sless4->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*sless0, "dummy"); + jlm::tests::GraphExport::Create(*sless1, "dummy"); + jlm::tests::GraphExport::Create(*sless2, "dummy"); + jlm::tests::GraphExport::Create(*sless3, "dummy"); + jlm::tests::GraphExport::Create(*sless4, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -722,12 +722,12 @@ types_bitstring_comparison_test_bitslesseq(void) auto slesseq4 = bitsle_op::create(32, s0, c2); auto slesseq5 = bitsle_op::create(32, c3, s1); - graph.add_export(slesseq0, { slesseq0->Type(), "dummy" }); - graph.add_export(slesseq1, { slesseq1->Type(), "dummy" }); - graph.add_export(slesseq2, { slesseq2->Type(), "dummy" }); - graph.add_export(slesseq3, { slesseq3->Type(), "dummy" }); - graph.add_export(slesseq4, { slesseq4->Type(), "dummy" }); - graph.add_export(slesseq5, { slesseq5->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*slesseq0, "dummy"); + jlm::tests::GraphExport::Create(*slesseq1, "dummy"); + jlm::tests::GraphExport::Create(*slesseq2, "dummy"); + jlm::tests::GraphExport::Create(*slesseq3, "dummy"); + jlm::tests::GraphExport::Create(*slesseq4, "dummy"); + jlm::tests::GraphExport::Create(*slesseq5, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -762,11 +762,11 @@ types_bitstring_comparison_test_bitugreater(void) auto ugreater3 = bitugt_op::create(32, s0, c2); auto ugreater4 = bitugt_op::create(32, c3, s1); - graph.add_export(ugreater0, { ugreater0->Type(), "dummy" }); - graph.add_export(ugreater1, { ugreater1->Type(), "dummy" }); - graph.add_export(ugreater2, { ugreater2->Type(), "dummy" }); - graph.add_export(ugreater3, { ugreater3->Type(), "dummy" }); - graph.add_export(ugreater4, { ugreater4->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*ugreater0, "dummy"); + jlm::tests::GraphExport::Create(*ugreater1, "dummy"); + jlm::tests::GraphExport::Create(*ugreater2, "dummy"); + jlm::tests::GraphExport::Create(*ugreater3, "dummy"); + jlm::tests::GraphExport::Create(*ugreater4, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -801,12 +801,12 @@ types_bitstring_comparison_test_bitugreatereq(void) auto ugreatereq4 = bituge_op::create(32, c2, s0); auto ugreatereq5 = bituge_op::create(32, s1, c3); - graph.add_export(ugreatereq0, { ugreatereq0->Type(), "dummy" }); - graph.add_export(ugreatereq1, { ugreatereq1->Type(), "dummy" }); - graph.add_export(ugreatereq2, { ugreatereq2->Type(), "dummy" }); - graph.add_export(ugreatereq3, { ugreatereq3->Type(), "dummy" }); - graph.add_export(ugreatereq4, { ugreatereq4->Type(), "dummy" }); - graph.add_export(ugreatereq5, { ugreatereq5->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*ugreatereq0, "dummy"); + jlm::tests::GraphExport::Create(*ugreatereq1, "dummy"); + jlm::tests::GraphExport::Create(*ugreatereq2, "dummy"); + jlm::tests::GraphExport::Create(*ugreatereq3, "dummy"); + jlm::tests::GraphExport::Create(*ugreatereq4, "dummy"); + jlm::tests::GraphExport::Create(*ugreatereq5, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -841,11 +841,11 @@ types_bitstring_comparison_test_bituless(void) auto uless3 = bitult_op::create(32, c2, s0); auto uless4 = bitult_op::create(32, s1, c3); - graph.add_export(uless0, { uless0->Type(), "dummy" }); - graph.add_export(uless1, { uless1->Type(), "dummy" }); - graph.add_export(uless2, { uless2->Type(), "dummy" }); - graph.add_export(uless3, { uless3->Type(), "dummy" }); - graph.add_export(uless4, { uless4->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*uless0, "dummy"); + jlm::tests::GraphExport::Create(*uless1, "dummy"); + jlm::tests::GraphExport::Create(*uless2, "dummy"); + jlm::tests::GraphExport::Create(*uless3, "dummy"); + jlm::tests::GraphExport::Create(*uless4, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -880,12 +880,12 @@ types_bitstring_comparison_test_bitulesseq(void) auto ulesseq4 = bitule_op::create(32, s0, c2); auto ulesseq5 = bitule_op::create(32, c3, s1); - graph.add_export(ulesseq0, { ulesseq0->Type(), "dummy" }); - graph.add_export(ulesseq1, { ulesseq1->Type(), "dummy" }); - graph.add_export(ulesseq2, { ulesseq2->Type(), "dummy" }); - graph.add_export(ulesseq3, { ulesseq3->Type(), "dummy" }); - graph.add_export(ulesseq4, { ulesseq4->Type(), "dummy" }); - graph.add_export(ulesseq5, { ulesseq5->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*ulesseq0, "dummy"); + jlm::tests::GraphExport::Create(*ulesseq1, "dummy"); + jlm::tests::GraphExport::Create(*ulesseq2, "dummy"); + jlm::tests::GraphExport::Create(*ulesseq3, "dummy"); + jlm::tests::GraphExport::Create(*ulesseq4, "dummy"); + jlm::tests::GraphExport::Create(*ulesseq5, "dummy"); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); @@ -989,13 +989,13 @@ types_bitstring_test_normalize(void) assert(sum1->operation() == bitadd_op(32)); assert(sum1->ninputs() == 2); - auto exp = graph.add_export(sum1->output(0), { sum1->output(0)->Type(), "dummy" }); + auto & exp = jlm::tests::GraphExport::Create(*sum1->output(0), "dummy"); sum_nf->set_mutable(true); graph.normalize(); graph.prune(); - auto origin = dynamic_cast(exp->origin()); + auto origin = dynamic_cast(exp.origin()); assert(origin->node()->operation() == bitadd_op(32)); assert(origin->node()->ninputs() == 2); auto op1 = origin->node()->input(0)->origin(); diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 585f8d73f..0383ab27c 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -29,7 +29,7 @@ test_flattened_binary_reduction() auto o2 = simple_node::create_normalized(graph.root(), op, { o1, i2 })[0]; auto o3 = simple_node::create_normalized(graph.root(), op, { o2, i3 })[0]; - auto ex = graph.add_export(o3, { o3->Type(), "" }); + auto & ex = jlm::tests::GraphExport::Create(*o3, ""); graph.prune(); jlm::rvsdg::view(graph, stdout); @@ -41,7 +41,7 @@ test_flattened_binary_reduction() assert(graph.root()->nnodes() == 3); - auto node0 = node_output::node(ex->origin()); + auto node0 = node_output::node(ex.origin()); assert(is(node0)); auto node1 = node_output::node(node0->input(0)->origin()); @@ -63,7 +63,7 @@ test_flattened_binary_reduction() auto o2 = simple_node::create_normalized(graph.root(), op, { o1, i2 })[0]; auto o3 = simple_node::create_normalized(graph.root(), op, { o2, i3 })[0]; - auto ex = graph.add_export(o3, { o3->Type(), "" }); + auto & ex = jlm::tests::GraphExport::Create(*o3, ""); graph.prune(); jlm::rvsdg::view(graph, stdout); @@ -75,7 +75,7 @@ test_flattened_binary_reduction() assert(graph.root()->nnodes() == 3); - auto node0 = node_output::node(ex->origin()); + auto node0 = node_output::node(ex.origin()); assert(is(node0)); auto node1 = node_output::node(node0->input(0)->origin()); diff --git a/tests/jlm/rvsdg/test-bottomup.cpp b/tests/jlm/rvsdg/test-bottomup.cpp index 1b5a32ccf..f6138ca8c 100644 --- a/tests/jlm/rvsdg/test-bottomup.cpp +++ b/tests/jlm/rvsdg/test-bottomup.cpp @@ -17,7 +17,7 @@ test_initialization() auto n1 = jlm::tests::test_op::create(graph.root(), {}, {}); auto n2 = jlm::tests::test_op::create(graph.root(), {}, { vtype }); - graph.add_export(n2->output(0), { n2->output(0)->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); bool n1_visited = false; bool n2_visited = false; @@ -41,7 +41,7 @@ test_basic_traversal() auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); - graph.add_export(n2->output(0), { n2->output(0)->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { jlm::rvsdg::node * tmp; diff --git a/tests/jlm/rvsdg/test-cse.cpp b/tests/jlm/rvsdg/test-cse.cpp index 74a10ddf5..93fba8b14 100644 --- a/tests/jlm/rvsdg/test-cse.cpp +++ b/tests/jlm/rvsdg/test-cse.cpp @@ -20,8 +20,8 @@ test_main() auto o1 = jlm::tests::test_op::create(graph.root(), {}, { t })->output(0); auto o2 = jlm::tests::test_op::create(graph.root(), { i }, { t })->output(0); - auto e1 = graph.add_export(o1, { o1->Type(), "o1" }); - auto e2 = graph.add_export(o2, { o2->Type(), "o2" }); + auto & e1 = jlm::tests::GraphExport::Create(*o1, "o1"); + auto & e2 = jlm::tests::GraphExport::Create(*o2, "o2"); auto nf = dynamic_cast( graph.node_normal_form(typeid(jlm::tests::test_op))); @@ -30,27 +30,27 @@ test_main() auto o3 = jlm::tests::create_testop(graph.root(), {}, { t })[0]; auto o4 = jlm::tests::create_testop(graph.root(), { i }, { t })[0]; - auto e3 = graph.add_export(o3, { o3->Type(), "o3" }); - auto e4 = graph.add_export(o4, { o4->Type(), "o4" }); + auto & e3 = jlm::tests::GraphExport::Create(*o3, "o3"); + auto & e4 = jlm::tests::GraphExport::Create(*o4, "o4"); nf->set_mutable(true); graph.normalize(); - assert(e1->origin() == e3->origin()); - assert(e2->origin() == e4->origin()); + assert(e1.origin() == e3.origin()); + assert(e2.origin() == e4.origin()); auto o5 = jlm::tests::create_testop(graph.root(), {}, { t })[0]; - assert(o5 == e1->origin()); + assert(o5 == e1.origin()); auto o6 = jlm::tests::create_testop(graph.root(), { i }, { t })[0]; - assert(o6 == e2->origin()); + assert(o6 == e2.origin()); nf->set_cse(false); auto o7 = jlm::tests::create_testop(graph.root(), {}, { t })[0]; - assert(o7 != e1->origin()); + assert(o7 != e1.origin()); graph.normalize(); - assert(o7 != e1->origin()); + assert(o7 != e1.origin()); return 0; } diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index b335ec4f8..b2f013be0 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -31,7 +31,7 @@ test_gamma(void) auto ev2 = gamma->add_entryvar(v2); gamma->add_exitvar({ ev0->argument(0), ev1->argument(1), ev2->argument(2) }); - graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*gamma->output(0), "dummy"); assert(gamma && gamma->operation() == jlm::rvsdg::gamma_op(3)); @@ -70,11 +70,11 @@ test_predicate_reduction(void) auto ev2 = gamma->add_entryvar(v2); gamma->add_exitvar({ ev0->argument(0), ev1->argument(1), ev2->argument(2) }); - auto r = graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "" }); + auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); graph.normalize(); // jlm::rvsdg::view(graph.root(), stdout); - assert(r->origin() == v1); + assert(r.origin() == v1); graph.prune(); assert(graph.root()->nnodes() == 0); @@ -97,11 +97,11 @@ test_invariant_reduction(void) auto ev = gamma->add_entryvar(v); gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); - auto r = graph.add_export(gamma->output(0), { gamma->output(0)->Type(), "" }); + auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); graph.normalize(); // jlm::rvsdg::view(graph.root(), stdout); - assert(r->origin() == v); + assert(r.origin() == v); graph.prune(); assert(graph.root()->nnodes() == 0); @@ -130,19 +130,19 @@ test_control_constant_reduction() auto xv1 = gamma->add_exitvar({ t, f }); auto xv2 = gamma->add_exitvar({ n0, n1 }); - auto ex1 = graph.add_export(xv1, { xv1->Type(), "" }); - auto ex2 = graph.add_export(xv2, { xv2->Type(), "" }); + auto & ex1 = jlm::tests::GraphExport::Create(*xv1, ""); + auto & ex2 = jlm::tests::GraphExport::Create(*xv2, ""); jlm::rvsdg::view(graph.root(), stdout); graph.normalize(); jlm::rvsdg::view(graph.root(), stdout); - auto match = node_output::node(ex1->origin()); + auto match = node_output::node(ex1.origin()); assert(match && is(match->operation())); auto & match_op = to_match_op(match->operation()); assert(match_op.default_alternative() == 0); - assert(node_output::node(ex2->origin()) == gamma); + assert(node_output::node(ex2.origin()) == gamma); } static void @@ -166,13 +166,13 @@ test_control_constant_reduction2() auto xv = gamma->add_exitvar({ t1, t2, t3, f }); - auto ex = graph.add_export(xv, { xv->Type(), "" }); + auto & ex = jlm::tests::GraphExport::Create(*xv, ""); jlm::rvsdg::view(graph.root(), stdout); graph.normalize(); jlm::rvsdg::view(graph.root(), stdout); - auto match = node_output::node(ex->origin()); + auto match = node_output::node(ex.origin()); assert(is(match)); } @@ -207,8 +207,8 @@ TestRemoveGammaOutputsWhere() auto gammaOutput3 = gammaNode->add_exitvar({ gammaInput3->argument(0), gammaInput3->argument(1) }); - rvsdg.add_export(gammaOutput0, { gammaOutput0->Type(), "" }); - rvsdg.add_export(gammaOutput2, { gammaOutput2->Type(), "" }); + jlm::tests::GraphExport::Create(*gammaOutput0, ""); + jlm::tests::GraphExport::Create(*gammaOutput2, ""); // Act & Assert assert(gammaNode->noutputs() == 4); @@ -267,8 +267,8 @@ TestPruneOutputs() gammaNode->add_exitvar({ gammaInput2->argument(0), gammaInput2->argument(1) }); gammaNode->add_exitvar({ gammaInput3->argument(0), gammaInput3->argument(1) }); - rvsdg.add_export(gammaOutput0, { gammaOutput0->Type(), "" }); - rvsdg.add_export(gammaOutput2, { gammaOutput2->Type(), "" }); + jlm::tests::GraphExport::Create(*gammaOutput0, ""); + jlm::tests::GraphExport::Create(*gammaOutput2, ""); // Act gammaNode->PruneOutputs(); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index b326984ed..ff24405e4 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -48,8 +48,8 @@ test_recursive_prune() auto n6 = jlm::tests::structural_node::create(n3->subregion(0), 1); - graph.add_export(n2->output(0), { n2->output(0)->Type(), "n2" }); - graph.add_export(o1, { o1->Type(), "n3" }); + jlm::tests::GraphExport::Create(*n2->output(0), "n2"); + jlm::tests::GraphExport::Create(*o1, "n3"); jlm::rvsdg::view(graph.root(), stdout); graph.prune(); @@ -97,8 +97,8 @@ test_prune_replace(void) auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0) }, { type }); - graph.add_export(n2->output(0), { n2->output(0)->Type(), "n2" }); - graph.add_export(n3->output(0), { n2->output(0)->Type(), "n3" }); + jlm::tests::GraphExport::Create(*n2->output(0), "n2"); + jlm::tests::GraphExport::Create(*n3->output(0), "n3"); auto n4 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 4ce6549ff..b08446d19 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -102,7 +102,7 @@ test_node_depth() auto bin = jlm::tests::test_op::create(graph.root(), { null->output(0), x }, { vt }); auto un = jlm::tests::test_op::create(graph.root(), { bin->output(0) }, { vt }); - graph.add_export(un->output(0), { un->output(0)->Type(), "x" }); + jlm::tests::GraphExport::Create(*un->output(0), "x"); jlm::rvsdg::view(graph.root(), stdout); diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index 0cca733a1..ce03f73e2 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -34,7 +34,7 @@ test_mux_mux_reduction() auto mux2 = jlm::rvsdg::create_state_split(st, z, 2); auto mux3 = jlm::rvsdg::create_state_merge(st, { mux1, mux2[0], mux2[1], z }); - auto ex = graph.add_export(mux3, { mux3->Type(), "m" }); + auto & ex = jlm::tests::GraphExport::Create(*mux3, "m"); // jlm::rvsdg::view(graph.root(), stdout); @@ -45,7 +45,7 @@ test_mux_mux_reduction() // jlm::rvsdg::view(graph.root(), stdout); - auto node = node_output::node(ex->origin()); + auto node = node_output::node(ex.origin()); assert(node->ninputs() == 4); assert(node->input(0)->origin() == x); assert(node->input(1)->origin() == y); @@ -68,7 +68,7 @@ test_multiple_origin_reduction() auto x = &jlm::tests::GraphImport::Create(graph, st, "x"); auto mux1 = jlm::rvsdg::create_state_merge(st, { x, x }); - auto ex = graph.add_export(mux1, { mux1->Type(), "m" }); + auto & ex = jlm::tests::GraphExport::Create(*mux1, "m"); view(graph.root(), stdout); @@ -79,7 +79,7 @@ test_multiple_origin_reduction() view(graph.root(), stdout); - assert(node_output::node(ex->origin())->ninputs() == 1); + assert(node_output::node(ex.origin())->ninputs() == 1); } static int diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index a424ea04f..1433a4c62 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -32,7 +32,7 @@ TestThetaCreation() lv3->result()->divert_to(lv3->argument()); theta->set_predicate(lv1->argument()); - graph.add_export(theta->output(0), { theta->output(0)->Type(), "exp" }); + jlm::tests::GraphExport::Create(*theta->output(0), "exp"); auto theta2 = static_cast(theta)->copy(graph.root(), { imp1, imp2, imp3 }); jlm::rvsdg::view(graph.root(), stdout); @@ -68,7 +68,7 @@ TestRemoveThetaOutputsWhere() auto thetaOutput2 = thetaNode->add_loopvar(y); thetaNode->set_predicate(thetaOutput0->argument()); - rvsdg.add_export(thetaOutput0, { ctltype::Create(2), "" }); + jlm::tests::GraphExport::Create(*thetaOutput0, ""); // Act & Assert auto deadInputs = thetaNode->RemoveThetaOutputsWhere( @@ -118,7 +118,7 @@ TestPruneThetaOutputs() thetaNode->add_loopvar(y); thetaNode->set_predicate(thetaOutput0->argument()); - rvsdg.add_export(thetaOutput0, { ctltype::Create(2), "" }); + jlm::tests::GraphExport::Create(*thetaOutput0, ""); // Act auto deadInputs = thetaNode->PruneThetaOutputs(); @@ -159,7 +159,7 @@ TestRemoveThetaInputsWhere() thetaOutput1->result()->divert_to(result); thetaOutput2->result()->divert_to(result); - rvsdg.add_export(thetaOutput0, { ctltype::Create(2), "" }); + jlm::tests::GraphExport::Create(*thetaOutput0, ""); // Act & Assert auto deadOutputs = thetaNode->RemoveThetaInputsWhere( @@ -215,7 +215,7 @@ TestPruneThetaInputs() thetaOutput1->result()->divert_to(result); thetaOutput2->result()->divert_to(result); - rvsdg.add_export(thetaOutput0, { ctltype::Create(2), "" }); + jlm::tests::GraphExport::Create(*thetaOutput0, ""); // Act auto deadOutputs = thetaNode->PruneThetaInputs(); diff --git a/tests/jlm/rvsdg/test-topdown.cpp b/tests/jlm/rvsdg/test-topdown.cpp index 52b7acf2a..3e56c8f3c 100644 --- a/tests/jlm/rvsdg/test-topdown.cpp +++ b/tests/jlm/rvsdg/test-topdown.cpp @@ -21,9 +21,9 @@ test_initialization() auto unary = jlm::tests::test_op::create(graph.root(), { i }, { vtype }); auto binary = jlm::tests::test_op::create(graph.root(), { i, unary->output(0) }, { vtype }); - graph.add_export(constant->output(0), { constant->output(0)->Type(), "c" }); - graph.add_export(unary->output(0), { unary->output(0)->Type(), "u" }); - graph.add_export(binary->output(0), { binary->output(0)->Type(), "b" }); + jlm::tests::GraphExport::Create(*constant->output(0), "c"); + jlm::tests::GraphExport::Create(*unary->output(0), "u"); + jlm::tests::GraphExport::Create(*binary->output(0), "b"); bool unary_visited = false; bool binary_visited = false; @@ -52,7 +52,7 @@ test_basic_traversal() auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); - graph.add_export(n2->output(0), { n2->output(0)->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { jlm::rvsdg::node * tmp; @@ -105,7 +105,7 @@ test_traversal_insertion() auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); - graph.add_export(n2->output(0), { n2->output(0)->Type(), "dummy" }); + jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { jlm::rvsdg::node * node; diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index bf6113233..98b204110 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -14,6 +14,13 @@ GraphImport::Copy(rvsdg::region & region, rvsdg::structural_input * input) return GraphImport::Create(*region.graph(), Type(), Name()); } +GraphExport & +GraphExport::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +{ + JLM_ASSERT(output == nullptr); + return GraphExport::Create(origin, Name()); +} + /* unary operation */ unary_op::~unary_op() noexcept diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 3faff562a..a052c02e3 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -43,6 +43,29 @@ class GraphImport final : public rvsdg::GraphImport } }; +/** + * Represents an export from the RVSDG of an internal entity. + * It can be used for testing of graph exports. + */ +class GraphExport final : public rvsdg::GraphExport +{ + GraphExport(rvsdg::output & origin, std::string name) + : rvsdg::GraphExport(origin, std::move(name)) + {} + +public: + GraphExport & + Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + + static GraphExport & + Create(rvsdg::output & origin, std::string name) + { + auto graphExport = new GraphExport(origin, std::move(name)); + origin.region()->graph()->root()->append_result(graphExport); + return *graphExport; + } +}; + /* unary operation */ class unary_op final : public rvsdg::unary_op From 24633f44497c8813e6b0a3bb1ea9ca881596e8f8 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Tue, 20 Aug 2024 22:51:35 +0200 Subject: [PATCH 048/170] [AndersenAgnostic] Add difference propagation (#587) Adds difference propagation, which is one of the more "invasive" techniques, as it needs to intercept modifications to the points-to sets. When iterating over a points-to set, you sometimes only need the set of pointees that have been added since last visit, while you other times need all pointees. For the flags, we get away with not intercepting the setting of them. Difference propagation can just remember if the flag was set the last time the work item was visited, which only takes up 2 bits per PointerObject. --- jlm/llvm/Makefile.sub | 2 + jlm/llvm/opt/alias-analyses/Andersen.cpp | 17 +- jlm/llvm/opt/alias-analyses/Andersen.hpp | 19 ++ .../alias-analyses/DifferencePropagation.hpp | 237 ++++++++++++++++++ .../opt/alias-analyses/PointerObjectSet.cpp | 150 +++++++++-- .../opt/alias-analyses/PointerObjectSet.hpp | 30 ++- .../TestDifferencePropagation.cpp | 114 +++++++++ .../alias-analyses/TestPointerObjectSet.cpp | 3 +- 8 files changed, 543 insertions(+), 29 deletions(-) create mode 100644 jlm/llvm/opt/alias-analyses/DifferencePropagation.hpp create mode 100644 tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index edac263e5..06a636c4a 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -69,6 +69,7 @@ libllvm_HEADERS = \ jlm/llvm/opt/inlining.hpp \ jlm/llvm/opt/cne.hpp \ jlm/llvm/opt/push.hpp \ + jlm/llvm/opt/alias-analyses/DifferencePropagation.hpp \ jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp \ jlm/llvm/opt/alias-analyses/MemoryNodeProvider.hpp \ jlm/llvm/opt/alias-analyses/OnlineCycleDetection.hpp \ @@ -178,6 +179,7 @@ libllvm_TESTS += \ tests/jlm/llvm/ir/TestAnnotation \ tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider \ tests/jlm/llvm/opt/alias-analyses/TestAndersen \ + tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation \ tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection \ tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder \ tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet \ diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 805e5fde0..05f4b1ac5 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -47,6 +47,8 @@ Andersen::Configuration::ToString() const str << "HybridCD_"; if (EnableLazyCycleDetection_) str << "LazyCD_"; + if (EnableDifferencePropagation_) + str << "DP_"; } else { @@ -62,13 +64,19 @@ std::vector Andersen::Configuration::GetAllConfigurations() { std::vector configs; - + auto PickDifferencePropagation = [&](Configuration config) + { + config.EnableDifferencePropagation(false); + configs.push_back(config); + config.EnableDifferencePropagation(true); + configs.push_back(config); + }; auto PickLazyCycleDetection = [&](Configuration config) { config.EnableLazyCycleDetection(false); - configs.push_back(config); + PickDifferencePropagation(config); config.EnableLazyCycleDetection(true); - configs.push_back(config); + PickDifferencePropagation(config); }; auto PickHybridCycleDetection = [&](Configuration config) { @@ -1113,7 +1121,8 @@ Andersen::SolveConstraints( config.GetWorklistSoliverPolicy(), config.IsOnlineCycleDetectionEnabled(), config.IsHybridCycleDetectionEnabled(), - config.IsLazyCycleDetectionEnabled()); + config.IsLazyCycleDetectionEnabled(), + config.IsDifferencePropagationEnabled()); statistics.StopConstraintSolvingWorklistStatistics(worklistStatistics); } else diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index a00296779..5f5a0d514 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -178,6 +178,23 @@ class Andersen final : public AliasAnalysis return EnableLazyCycleDetection_; } + /** + * Enables or disables difference propagation in the Worklist solver, as described by + * Pearce, 2003: "Online cycle detection and difference propagation for pointer analysis" + * Only used by the worklist solver. + */ + void + EnableDifferencePropagation(bool enable) noexcept + { + EnableDifferencePropagation_ = enable; + } + + [[nodiscard]] bool + IsDifferencePropagationEnabled() const noexcept + { + return EnableDifferencePropagation_; + } + [[nodiscard]] std::string ToString() const; @@ -197,6 +214,7 @@ class Andersen final : public AliasAnalysis config.EnableOnlineCycleDetection(false); config.EnableHybridCycleDetection(true); config.EnableLazyCycleDetection(true); + config.EnableDifferencePropagation(true); return config; } @@ -232,6 +250,7 @@ class Andersen final : public AliasAnalysis bool EnableOnlineCycleDetection_ = false; bool EnableHybridCycleDetection_ = false; bool EnableLazyCycleDetection_ = false; + bool EnableDifferencePropagation_ = false; }; ~Andersen() noexcept override = default; diff --git a/jlm/llvm/opt/alias-analyses/DifferencePropagation.hpp b/jlm/llvm/opt/alias-analyses/DifferencePropagation.hpp new file mode 100644 index 000000000..0c9054c4f --- /dev/null +++ b/jlm/llvm/opt/alias-analyses/DifferencePropagation.hpp @@ -0,0 +1,237 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_LLVM_OPT_ALIAS_ANALYSES_DIFFERENCEPROPAGATION_HPP +#define JLM_LLVM_OPT_ALIAS_ANALYSES_DIFFERENCEPROPAGATION_HPP + +#include + +#include + +namespace jlm::llvm::aa +{ + +class DifferencePropagation +{ + +public: + explicit DifferencePropagation(PointerObjectSet & set) + : Set_(set) + {} + + /** + * Must be called before using any other methods. + */ + void + Initialize() + { + NewPointees_.resize(Set_.NumPointerObjects()); + NewPointeesTracked_.resize(Set_.NumPointerObjects(), false); + PointsToExternalFlagSeen_.resize(Set_.NumPointerObjects(), false); + PointeesEscapeFlagSeen_.resize(Set_.NumPointerObjects(), false); + } + + [[nodiscard]] bool + IsInitialized() const noexcept + { + return NewPointees_.size() == Set_.NumPointerObjects(); + } + + /** + * Starts tracking any pointees added to \p index from this point onwards. + * @param index the index of the PointerObject, must be a unification root. + */ + void + ClearNewPointees(PointerObjectIndex index) + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(index)); + + NewPointees_[index].Clear(); + NewPointeesTracked_[index] = true; + }; + + /** + * Makes P(pointer) contain pointee + * @param pointer the PointerObject which should contain pointee in its points-to set. + * Must be a unification root. + * @param pointee the PointerObject which should be pointed to by pointer + * @return true if this operation added a new pointee to P(pointer) + */ + bool + AddToPointsToSet(PointerObjectIndex pointer, PointerObjectIndex pointee) + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(pointer)); + + // If pointees added to the superset are being tracked, use the tracking version + bool newPointee = Set_.AddToPointsToSet(pointer, pointee); + if (newPointee && NewPointeesTracked_[pointer]) + return NewPointees_[pointer].Insert(pointee); + + return newPointee; + } + + /** + * Makes P(superset) a superset of P(subset), and propagates the PointsToExternal flag if set. + * @param superset the PointerObject which should point to everything the subset points to + * Must be a unification root. + * @param subset a PointerObject which should have all its pointees also be pointees of superset. + * @return true if this operation adds any pointees or flags to superset. + */ + bool + MakePointsToSetSuperset(PointerObjectIndex superset, PointerObjectIndex subset) + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(superset)); + + // If pointees added to the superset are being tracked, use the tracking version + if (NewPointeesTracked_[superset]) + return Set_.MakePointsToSetSuperset(superset, subset, NewPointees_[superset]); + else + return Set_.MakePointsToSetSuperset(superset, subset); + } + + /** + * Gets the pointees of a PointerObject that have been added since the last time + * ClearNewPointees(index) was called. + * If new pointees of index are not being tracked, all pointees are returned. + * @param index the index of the PointerObject, must be a unification root. + * @return a reference to either all new pointees, or all pointees of index. + */ + [[nodiscard]] const util::HashSet & + GetNewPointees(PointerObjectIndex index) const + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(index)); + if (NewPointeesTracked_[index]) + return NewPointees_[index]; + return Set_.GetPointsToSet(index); + } + + /** + * Clears the tracked set of new pointees of \p index, and stops tracking it. + * @param index the index of the PointerObject, must be a unification root. + */ + void + OnRemoveAllPointees(PointerObjectIndex index) + { + NewPointeesTracked_[index] = false; + NewPointees_[index].Clear(); + } + + /** + * If the given PointerObject has the PointsToExternal flag now, + * and MarkPointsToExternalAsHandled(index) has not been called, return true. Otherwise false. + * An exception is if the PointerObject has been unified with other PointerObjects, + * and some of the other PointerObjects did not have that flag handled already. + * @param index the index of the PointerObject being queried. Must be a unification root + * @return true if the PointerObject is newly flagged as PointsToExternal. + */ + [[nodiscard]] bool + PointsToExternalIsNew(PointerObjectIndex index) + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(index)); + if (!Set_.IsPointingToExternal(index)) + return false; + return !PointsToExternalFlagSeen_[index]; + } + + /** + * Call once the addition of the new flag PointsToExternal has been handled. + * After this call, PointsToExternalIsNew(index) will return false (unless new unifications). + * @param index the index of the PointerObject whose PointsToExternal flag has been handled. + * Must be a unification root. + */ + void + MarkPointsToExternalAsHandled(PointerObjectIndex index) + { + JLM_ASSERT(Set_.IsUnificationRoot(index)); + JLM_ASSERT(Set_.IsPointingToExternal(index)); + PointsToExternalFlagSeen_[index] = true; + } + + /** + * If the given PointerObject has the PointeesEscape flag now, + * but didn't have it the last time ClearDifferenceTracking(index) was called, return true. + * An exception is if the PointerObject has been unified with other PointerObjects, + * and some of the other PointerObjects had not have that flag seen. + * @param index the index of the PointerObject being queried. Must be a unification root + * @return true if the PointerObject is newly flagged as AllPointeesEscape. + */ + [[nodiscard]] bool + PointeesEscapeIsNew(PointerObjectIndex index) + { + JLM_ASSERT(IsInitialized()); + JLM_ASSERT(Set_.IsUnificationRoot(index)); + if (!Set_.HasPointeesEscaping(index)) + return false; + return !PointeesEscapeFlagSeen_[index]; + } + + /** + * Call once the addition of the new flag PointeesEscape has been handled. + * After this call, PointeesEscapeIsNew(index) will return false (unless new unifications). + * @param index the index of the PointerObject whose PointeesEscape flag has been handled. + * Must be a unification root. + */ + void + MarkPointeesEscapeAsHandled(PointerObjectIndex index) + { + JLM_ASSERT(Set_.IsUnificationRoot(index)); + JLM_ASSERT(Set_.HasPointeesEscaping(index)); + PointeesEscapeFlagSeen_[index] = true; + } + + /** + * Performs conservative clearing of tracked differences, after unification. + * The set of tracked pointees is fully cleared, since all pointees might be new to + * constraints previously owned by the opposite PointerObject. + * + * If the worklist has seen the PointsToExternal and PointeesEscape flags on both operands, + * the flags will also be considered already seen for the resulting unification root. + * + * @param root the operand of the unification that ended up at the new root. + * @param nonRoot the root of the other unification, that is now no longer a root. + */ + void + OnPointerObjectsUnified(PointerObjectIndex root, PointerObjectIndex nonRoot) + { + JLM_ASSERT(IsInitialized()); + + // After unification, forget everything about tracked differences in points-to sets + NewPointees_[nonRoot].Clear(); + NewPointees_[root].Clear(); + NewPointeesTracked_[root] = false; + + PointsToExternalFlagSeen_[root] = + PointsToExternalFlagSeen_[root] && PointsToExternalFlagSeen_[nonRoot]; + PointeesEscapeFlagSeen_[root] = + PointeesEscapeFlagSeen_[root] && PointeesEscapeFlagSeen_[nonRoot]; + } + +private: + PointerObjectSet & Set_; + + // Only unification roots matter in these vectors + + // Tracks all new pointees added to a unification root i, + // since ClearNewPointees(i) was last called. + std::vector> NewPointees_; + // Becomes true for a unification root i when CleanNewPointees(i) is called for the first time. + // Becomes false again when unification fully resets difference propagation + std::vector NewPointeesTracked_; + + // These are set to true after the _IsNew have returned true + // When two PointerObjects a and b are unified, the flag only remains "seen", + // if the flag has already been "seen" on both a and b. + std::vector PointsToExternalFlagSeen_; + std::vector PointeesEscapeFlagSeen_; +}; + +} + +#endif // JLM_LLVM_OPT_ALIAS_ANALYSES_DIFFERENCEPROPAGATION_HPP diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index 657c07799..caa77b37e 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include #include @@ -365,8 +366,12 @@ PointerObjectSet::AddToPointsToSet(PointerObjectIndex pointer, PointerObjectInde } // Makes P(superset) a superset of P(subset) +template bool -PointerObjectSet::MakePointsToSetSuperset(PointerObjectIndex superset, PointerObjectIndex subset) +PointerObjectSet::PropagateNewPointees( + PointerObjectIndex superset, + PointerObjectIndex subset, + NewPointeeFunctor & onNewPointee) { auto supersetRoot = GetUnificationRoot(superset); auto subsetRoot = GetUnificationRoot(subset); @@ -377,7 +382,15 @@ PointerObjectSet::MakePointsToSetSuperset(PointerObjectIndex superset, PointerOb auto & P_super = PointsToSets_[supersetRoot]; auto & P_sub = PointsToSets_[subsetRoot]; - bool modified = P_super.UnionWith(P_sub); + bool modified = false; + for (PointerObjectIndex pointee : P_sub.Items()) + { + if (P_super.Insert(pointee)) + { + onNewPointee(pointee); + modified = true; + } + } // If the external node is in the subset, it must also be part of the superset if (IsPointingToExternal(subsetRoot)) @@ -386,6 +399,29 @@ PointerObjectSet::MakePointsToSetSuperset(PointerObjectIndex superset, PointerOb return modified; } +bool +PointerObjectSet::MakePointsToSetSuperset(PointerObjectIndex superset, PointerObjectIndex subset) +{ + // NewPointee is a no-op + const auto & NewPointee = [](PointerObjectIndex) + { + }; + return PropagateNewPointees(superset, subset, NewPointee); +} + +bool +PointerObjectSet::MakePointsToSetSuperset( + PointerObjectIndex superset, + PointerObjectIndex subset, + util::HashSet & newPointees) +{ + const auto & NewPointee = [&](PointerObjectIndex pointee) + { + newPointees.Insert(pointee); + }; + return PropagateNewPointees(superset, subset, NewPointee); +} + bool PointerObjectSet::IsPointingTo(PointerObjectIndex pointer, PointerObjectIndex pointee) const { @@ -1392,7 +1428,8 @@ template< typename Worklist, bool EnableOnlineCycleDetection, bool EnableHybridCycleDetection, - bool EnableLazyCycleDetection> + bool EnableLazyCycleDetection, + bool EnableDifferencePropagation> void PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { @@ -1454,13 +1491,31 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } } + DifferencePropagation differencePropagation(Set_); + if constexpr (EnableDifferencePropagation) + differencePropagation.Initialize(); + + // Makes pointer point to pointee. + // Returns true if the pointee was new. Does not add pointer to the worklist. + const auto & AddToPointsToSet = [&](PointerObjectIndex pointer, + PointerObjectIndex pointee) -> bool + { + if constexpr (EnableDifferencePropagation) + return differencePropagation.AddToPointsToSet(pointer, pointee); + else + return Set_.AddToPointsToSet(pointer, pointee); + }; + // Makes superset point to everything subset points to, and propagates the PointsToEscaped flag. // Returns true if any pointees were new, or the flag was new. // Does not add superset to the worklist. const auto & MakePointsToSetSuperset = [&](PointerObjectIndex superset, PointerObjectIndex subset) -> bool { - return Set_.MakePointsToSetSuperset(superset, subset); + if constexpr (EnableDifferencePropagation) + return differencePropagation.MakePointsToSetSuperset(superset, subset); + else + return Set_.MakePointsToSetSuperset(superset, subset); }; // Performs unification safely while the worklist algorithm is running. @@ -1497,6 +1552,9 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) callConstraints[root].UnionWith(callConstraints[nonRoot]); callConstraints[nonRoot].Clear(); + if constexpr (EnableDifferencePropagation) + differencePropagation.OnPointerObjectsUnified(root, nonRoot); + if constexpr (EnableHybridCycleDetection) { // If the new root did not have a ref node unification target, check if the other node has one @@ -1635,8 +1693,18 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if (!Set_.IsUnificationRoot(node)) return; - auto & nodePointees = Set_.GetPointsToSet(node); - statistics.NumWorkItemNewPointees += nodePointees.Size(); + // If difference propagation is enabled, this set contains only pointees that have been added + // since the last time this work item was popped. Otherwise, it contains all pointees. + const auto & newPointees = EnableDifferencePropagation + ? differencePropagation.GetNewPointees(node) + : Set_.GetPointsToSet(node); + statistics.NumWorkItemNewPointees += newPointees.Size(); + + // If difference propagation is enabled, this bool is true if this is the first time node + // is being visited by the worklist with the PointsToExternal flag set + const auto newPointsToExternal = EnableDifferencePropagation + ? differencePropagation.PointsToExternalIsNew(node) + : Set_.IsPointingToExternal(node); // Perform hybrid cycle detection if all pointees of node should be unified if constexpr (EnableHybridCycleDetection) @@ -1651,7 +1719,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) // if any unification happens, the result must be added to the worklist bool anyUnification = false; - for (const auto pointee : nodePointees.Items()) + for (const auto pointee : newPointees.Items()) { const auto pointeeRoot = Set_.GetUnificationRoot(pointee); if (pointeeRoot == refUnificationRoot) @@ -1673,10 +1741,21 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } } - // If n is marked as PointeesEscaping, add the escaped flag to all pointees - if (Set_.HasPointeesEscaping(node)) + const auto pointeesEscaping = Set_.HasPointeesEscaping(node); + // If difference propagation is enabled, this bool is true if this is the first time node + // is being visited by the worklist with the PointeesEscaping flag set + const auto newPointeesEscaping = EnableDifferencePropagation + ? differencePropagation.PointeesEscapeIsNew(node) + : pointeesEscaping; + + // Mark pointees as escaping, if node has the PointeesEscaping flag + if (pointeesEscaping) { - for (const auto pointee : nodePointees.Items()) + // If this is the first time node is being visited with the PointeesEscaping flag set, + // add the escaped flag to all pointees. Otherwise, only add it to new pointees. + const auto & newEscapingPointees = + newPointeesEscaping ? Set_.GetPointsToSet(node) : newPointees; + for (const auto pointee : newEscapingPointees.Items()) { const auto pointeeRoot = Set_.GetUnificationRoot(pointee); @@ -1721,12 +1800,17 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) // The current it-edge should be kept as is, prepare "it" for the next iteration. ++it; - bool modified = MakePointsToSetSuperset(supersetParent, node); + bool modified = false; + for (const auto pointee : newPointees.Items()) + modified |= AddToPointsToSet(supersetParent, pointee); + + if (newPointsToExternal) + modified |= Set_.MarkAsPointingToExternal(supersetParent); if (modified) worklist.PushWorkItem(supersetParent); - if (EnableLazyCycleDetection && !nodePointees.IsEmpty() && !modified) + if (EnableLazyCycleDetection && !newPointees.IsEmpty() && !modified) { // If nothing was propagated along this edge, check if there is a cycle // If a cycle is detected, this function eliminates it by unifying, and returns the root @@ -1744,11 +1828,11 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) for (const auto value : storeConstraints[node].Items()) { // This loop ensures *P(n) supseteq P(value) - for (const auto pointee : nodePointees.Items()) + for (const auto pointee : newPointees.Items()) QueueNewSupersetEdge(pointee, value); // If P(n) contains "external", the contents of the written value escapes - if (Set_.IsPointingToExternal(node)) + if (newPointsToExternal) MarkAsPointeesEscaping(value); } @@ -1756,11 +1840,11 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) for (const auto value : loadConstraints[node].Items()) { // This loop ensures P(value) supseteq *P(n) - for (const auto pointee : nodePointees.Items()) + for (const auto pointee : newPointees.Items()) QueueNewSupersetEdge(value, pointee); // If P(n) contains "external", the loaded value may also point to external - if (Set_.IsPointingToExternal(node)) + if (newPointsToExternal) MarkAsPointsToExternal(value); } @@ -1768,7 +1852,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) for (const auto callNode : callConstraints[node].Items()) { // Connect the inputs and outputs of the callNode to every possible function pointee - for (const auto pointee : nodePointees.Items()) + for (const auto pointee : newPointees.Items()) { const auto kind = Set_.GetPointerObjectKind(pointee); if (kind == PointerObjectKind::ImportMemoryObject) @@ -1783,7 +1867,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } // If P(n) contains "external", handle calling external functions - if (Set_.IsPointingToExternal(node)) + if (newPointsToExternal) HandleCallingExternalFunction( Set_, *callNode, @@ -1791,6 +1875,17 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) MarkAsPointsToExternal); } + // No pointees have been added to P(node) while visiting node thus far in the handler. + // All new flags have also been handled, or caused this node to be on the worklist again. + if constexpr (EnableDifferencePropagation) + { + differencePropagation.ClearNewPointees(node); + if (newPointsToExternal) + differencePropagation.MarkPointsToExternalAsHandled(node); + if (newPointeesEscaping) + differencePropagation.MarkPointeesEscapeAsHandled(node); + } + // Add all new superset edges, which also propagates points-to sets immediately // and possibly performs unifications to eliminate cycles. // Any unified nodes, or nodes with updated points-to sets, are added to the worklist. @@ -1819,7 +1914,8 @@ PointerObjectConstraintSet::SolveUsingWorklist( WorklistSolverPolicy policy, bool enableOnlineCycleDetection, bool enableHybridCycleDetection, - bool enableLazyCycleDetection) + bool enableLazyCycleDetection, + bool enableDifferencePropagation) { // Takes all parameters as compile time types. @@ -1828,12 +1924,14 @@ PointerObjectConstraintSet::SolveUsingWorklist( const auto Dispatch = [&](auto tWorklist, auto tOnlineCycleDetection, auto tHybridCycleDetection, - auto tLazyCycleDetection) -> WorklistStatistics + auto tLazyCycleDetection, + auto tDifferencePropagation) -> WorklistStatistics { using Worklist = std::remove_pointer_t; constexpr bool vOnlineCycleDetection = decltype(tOnlineCycleDetection)::value; constexpr bool vHybridCycleDetection = decltype(tHybridCycleDetection)::value; constexpr bool vLazyCycleDetection = decltype(tLazyCycleDetection)::value; + constexpr bool vDifferencePropagation = decltype(tDifferencePropagation)::value; if constexpr (vOnlineCycleDetection && (vHybridCycleDetection || vLazyCycleDetection)) { @@ -1846,7 +1944,8 @@ PointerObjectConstraintSet::SolveUsingWorklist( Worklist, vOnlineCycleDetection, vHybridCycleDetection, - vLazyCycleDetection>(statistics); + vLazyCycleDetection, + vDifferencePropagation>(statistics); return statistics; } }; @@ -1887,12 +1986,19 @@ PointerObjectConstraintSet::SolveUsingWorklist( else lazyCycleDetectionVariant = std::false_type{}; + std::variant differencePropagationVariant; + if (enableDifferencePropagation) + differencePropagationVariant = std::true_type{}; + else + differencePropagationVariant = std::false_type{}; + return std::visit( Dispatch, policyVariant, onlineCycleDetectionVariant, hybridCycleDetectionVariant, - lazyCycleDetectionVariant); + lazyCycleDetectionVariant, + differencePropagationVariant); } const char * diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index 316168904..63ebfbfa2 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -163,6 +163,17 @@ class PointerObjectSet final [[nodiscard]] PointerObjectIndex AddPointerObject(PointerObjectKind kind); + /** + * Internal helper function for making P(superset) a superset of P(subset), with a callback. + * @see MakePointsToSetSuperset + */ + template + bool + PropagateNewPointees( + PointerObjectIndex superset, + PointerObjectIndex subset, + NewPointeeFunctor & onNewPointee); + public: [[nodiscard]] size_t NumPointerObjects() const noexcept; @@ -407,6 +418,16 @@ class PointerObjectSet final bool MakePointsToSetSuperset(PointerObjectIndex superset, PointerObjectIndex subset); + /** + * A version of MakePointsToSetSuperset that adds any new pointees of \p superset, + * to the set \p newPointees. + */ + bool + MakePointsToSetSuperset( + PointerObjectIndex superset, + PointerObjectIndex subset, + util::HashSet & newPointees); + /** * @param pointer the PointerObject possibly pointing to \p pointee * @param pointee the PointerObject possibly being pointed at @@ -953,10 +974,12 @@ class PointerObjectConstraintSet final * - Online Cycle Detection (Pearce, 2003) * - Hybrid Cycle Detection (Hardekopf 2007) * - Lazy Cycle Detection (Hardekopf 2007) + * - Difference Propagation (Pearce, 2003) * @param policy the worklist iteration order policy to use * @param enableOnlineCycleDetection if true, online cycle detection will be performed. * @param enableHybridCycleDetection if true, hybrid cycle detection will be performed. * @param enableLazyCycleDetection if true, lazy cycle detection will be performed. + * @param enableDifferencePropagation if true, difference propagation will be enabled. * @return an instance of WorklistStatistics describing solver statistics */ WorklistStatistics @@ -964,7 +987,8 @@ class PointerObjectConstraintSet final WorklistSolverPolicy policy, bool enableOnlineCycleDetection, bool enableHybridCycleDetection, - bool enableLazyCycleDetection); + bool enableLazyCycleDetection, + bool enableDifferencePropagation); /** * Iterates over and applies constraints until all points-to-sets satisfy them. @@ -1009,13 +1033,15 @@ class PointerObjectConstraintSet final * @tparam EnableOnlineCycleDetection if true, online cycle detection is enabled. * @tparam EnableHybridCycleDetection if true, hybrid cycle detection is enabled. * @tparam EnableLazyCycleDetection if true, lazy cycle detection is enabled. + * @tparam EnableDifferencePropagation if true, difference propagation is enabled. * @see SolveUsingWorklist() for the public interface. */ template< typename Worklist, bool EnableOnlineCycleDetection, bool EnableHybridCycleDetection, - bool EnableLazyCycleDetection> + bool EnableLazyCycleDetection, + bool EnableDifferencePropagation> void RunWorklistSolver(WorklistStatistics & statistics); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp b/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp new file mode 100644 index 000000000..df00e4942 --- /dev/null +++ b/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp @@ -0,0 +1,114 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#include + +#include + +#include +#include + +#include + +static int +TestTracksDifferences() +{ + using namespace jlm; + using namespace jlm::llvm::aa; + + // Arrange + tests::NAllocaNodesTest rvsdg(4); + rvsdg.InitializeTest(); + + PointerObjectSet set; + auto r0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); + auto r1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); + auto a0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + auto a1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + auto a2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); + auto a3 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(3)); + + // Let r0 -> a0 and r0 -> a3 before difference tracking even begins + set.AddToPointsToSet(r0, a0); + set.AddToPointsToSet(r0, a3); + + // Act + DifferencePropagation differencePropagation(set); + differencePropagation.Initialize(); + + // Assert + assert(differencePropagation.GetNewPointees(r0) == (util::HashSet{ a0, a3 })); + + // Act 2 - add another pointer/pointee relation: r1 -> a1 + differencePropagation.AddToPointsToSet(r1, a1); + + // Assert that a1 is a new pointee of r1 + assert(differencePropagation.GetNewPointees(r1) == (util::HashSet{ a1 })); + + // Act 3 - clear difference tracking for r1 + differencePropagation.ClearNewPointees(r1); + + // Assert r1 no longer has any new pointees + assert(differencePropagation.GetNewPointees(r1).IsEmpty()); + // r0 still has new pointees + assert(!differencePropagation.GetNewPointees(r0).IsEmpty()); + + // Act 4 - add more pointees to r1, + bool new0 = differencePropagation.AddToPointsToSet(r1, a0); + bool new1 = differencePropagation.AddToPointsToSet(r1, a1); // not a new pointee + bool new2 = differencePropagation.AddToPointsToSet(r1, a2); + + // Assert that only a0 and a2 were new + assert(new0 && !new1 && new2); + assert(differencePropagation.GetNewPointees(r1) == (util::HashSet{ a0, a2 })); + + // Act 5 - make r0 point to a superset of r1, making r0 now point to a0, a1, a2, a3 + // First mark the existing pointees of r0 (a0 and a3) as seen + differencePropagation.ClearNewPointees(r0); + differencePropagation.MakePointsToSetSuperset(r0, r1); + + // Assert that only a1 and a2 are new to r0, as it has already marked a0 and a3 as seen + assert(differencePropagation.GetNewPointees(r0) == (util::HashSet{ a1, a2 })); + + // Act 6 - give nodes r0 and r1 flags + set.MarkAsPointeesEscaping(r0); + set.MarkAsPointingToExternal(r1); + + // Assert that the flags are new, but only if they actually have the flag + assert(differencePropagation.PointeesEscapeIsNew(r0)); + assert(!differencePropagation.PointsToExternalIsNew(r0)); + assert(!differencePropagation.PointeesEscapeIsNew(r1)); + assert(differencePropagation.PointsToExternalIsNew(r1)); + + // Act 7 - mark flags as seen + differencePropagation.MarkPointeesEscapeAsHandled(r0); + differencePropagation.MarkPointsToExternalAsHandled(r1); + + // Assert that the flags are no longer new + assert(!differencePropagation.PointeesEscapeIsNew(r0)); + assert(!differencePropagation.PointsToExternalIsNew(r1)); + + // Act 6 - unify 0 and 1 + // After unification, any pointee or flag that is new to either node becomes new to the union + auto root = set.UnifyPointerObjects(r0, r1); + auto nonRoot = r0 + r1 - root; + differencePropagation.OnPointerObjectsUnified(root, nonRoot); + + // Assert that all pointees that were new to either node, are also new to the root + // a0 and a2 were still marked as new to node r1 at the time of unification. + // a3 is not new to r0, but r1 has never seen it, so it must be regarded as new by the union. + util::HashSet subset{ a0, a2, a3 }; + assert(subset.IsSubsetOf(differencePropagation.GetNewPointees(root))); + + // Neither flag has been seen by both nodes, so they are both new to the unification + assert(differencePropagation.PointeesEscapeIsNew(root)); + assert(differencePropagation.PointsToExternalIsNew(root)); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestDifferencePropagation-TestTracksDifferences", + TestTracksDifferences) diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index c611a9140..20ac7d8d5 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -891,7 +891,8 @@ TestPointerObjectSet() config.GetWorklistSoliverPolicy(), config.IsOnlineCycleDetectionEnabled(), config.IsHybridCycleDetectionEnabled(), - config.IsLazyCycleDetectionEnabled()); + config.IsLazyCycleDetectionEnabled(), + config.IsDifferencePropagationEnabled()); } TestClonePointerObjectConstraintSet(); From 4d800fdad2142a75cb56b8617e6adeaf6196c391 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Wed, 21 Aug 2024 21:24:45 +0200 Subject: [PATCH 049/170] [AndersenAgnostic] Add prefer implicit pointees to the Andersen solver (#589) --- jlm/llvm/opt/alias-analyses/Andersen.cpp | 21 ++++- jlm/llvm/opt/alias-analyses/Andersen.hpp | 17 ++++ .../opt/alias-analyses/PointerObjectSet.cpp | 90 +++++++++++++++++-- .../opt/alias-analyses/PointerObjectSet.hpp | 21 ++++- .../alias-analyses/TestPointerObjectSet.cpp | 39 ++++---- 5 files changed, 159 insertions(+), 29 deletions(-) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 05f4b1ac5..40468e457 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -49,6 +49,8 @@ Andersen::Configuration::ToString() const str << "LazyCD_"; if (EnableDifferencePropagation_) str << "DP_"; + if (EnablePreferImplicitPointees_) + str << "PIP_"; } else { @@ -64,12 +66,19 @@ std::vector Andersen::Configuration::GetAllConfigurations() { std::vector configs; + auto PickPreferImplicitPointees = [&](Configuration config) + { + config.EnablePreferImplicitPointees(false); + configs.push_back(config); + config.EnablePreferImplicitPointees(true); + configs.push_back(config); + }; auto PickDifferencePropagation = [&](Configuration config) { config.EnableDifferencePropagation(false); - configs.push_back(config); + PickPreferImplicitPointees(config); config.EnableDifferencePropagation(true); - configs.push_back(config); + PickPreferImplicitPointees(config); }; auto PickLazyCycleDetection = [&](Configuration config) { @@ -182,6 +191,8 @@ class Andersen::Statistics final : public util::Statistics static constexpr const char * NumLazyCyclesDetected_ = "#LazyCyclesDetected"; static constexpr const char * NumLazyCycleUnifications_ = "#LazyCycleUnifications"; + static constexpr const char * NumPIPExplicitPointeesRemoved_ = "#PIPExplicitPointeesRemoved"; + // After solving statistics static constexpr const char * NumEscapedMemoryObjects_ = "#EscapedMemoryObjects"; static constexpr const char * NumUnificationRoots_ = "#UnificationRoots"; @@ -334,6 +345,9 @@ class Andersen::Statistics final : public util::Statistics if (statistics.NumLazyCycleUnifications) AddMeasurement(NumLazyCycleUnifications_, *statistics.NumLazyCycleUnifications); + + if (statistics.NumExplicitPointeesRemoved) + AddMeasurement(NumPIPExplicitPointeesRemoved_, *statistics.NumExplicitPointeesRemoved); } void @@ -1122,7 +1136,8 @@ Andersen::SolveConstraints( config.IsOnlineCycleDetectionEnabled(), config.IsHybridCycleDetectionEnabled(), config.IsLazyCycleDetectionEnabled(), - config.IsDifferencePropagationEnabled()); + config.IsDifferencePropagationEnabled(), + config.IsPreferImplicitPointeesEnabled()); statistics.StopConstraintSolvingWorklistStatistics(worklistStatistics); } else diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index 5f5a0d514..a83610917 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -195,6 +195,21 @@ class Andersen final : public AliasAnalysis return EnableDifferencePropagation_; } + /** + * Enables or disables preferring implicit pointees in the Worklist solver + */ + void + EnablePreferImplicitPointees(bool enable) noexcept + { + EnablePreferImplicitPointees_ = enable; + } + + [[nodiscard]] bool + IsPreferImplicitPointeesEnabled() const noexcept + { + return EnablePreferImplicitPointees_; + } + [[nodiscard]] std::string ToString() const; @@ -215,6 +230,7 @@ class Andersen final : public AliasAnalysis config.EnableHybridCycleDetection(true); config.EnableLazyCycleDetection(true); config.EnableDifferencePropagation(true); + config.EnablePreferImplicitPointees(true); return config; } @@ -251,6 +267,7 @@ class Andersen final : public AliasAnalysis bool EnableHybridCycleDetection_ = false; bool EnableLazyCycleDetection_ = false; bool EnableDifferencePropagation_ = false; + bool EnablePreferImplicitPointees_ = false; }; ~Andersen() noexcept override = default; diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index caa77b37e..405660be7 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -422,6 +422,13 @@ PointerObjectSet::MakePointsToSetSuperset( return PropagateNewPointees(superset, subset, NewPointee); } +void +PointerObjectSet::RemoveAllPointees(PointerObjectIndex index) +{ + auto root = GetUnificationRoot(index); + PointsToSets_[root].Clear(); +} + bool PointerObjectSet::IsPointingTo(PointerObjectIndex pointer, PointerObjectIndex pointee) const { @@ -1429,7 +1436,8 @@ template< bool EnableOnlineCycleDetection, bool EnableHybridCycleDetection, bool EnableLazyCycleDetection, - bool EnableDifferencePropagation> + bool EnableDifferencePropagation, + bool EnablePreferImplicitPointees> void PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { @@ -1569,6 +1577,17 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) return root; }; + // Removes all explicit pointees from the given PointerObject + const auto RemoveAllPointees = [&](PointerObjectIndex index) + { + JLM_ASSERT(Set_.IsUnificationRoot(index)); + Set_.RemoveAllPointees(index); + + // Prevent the difference propagation from keeping any pointees after the removal + if constexpr (EnableDifferencePropagation) + differencePropagation.OnRemoveAllPointees(index); + }; + // Lambda for getting all superset edge successors of a given pointer object in the subset graph. // If \p node is not a unification root, its set of successors will always be empty. const auto GetSupersetEdgeSuccessors = [&](PointerObjectIndex node) @@ -1590,6 +1609,9 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if constexpr (EnableHybridCycleDetection) statistics.NumHybridCycleUnifications = 0; + if constexpr (EnablePreferImplicitPointees) + statistics.NumExplicitPointeesRemoved = 0; + // The worklist, initialized with every unification root Worklist worklist; for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) @@ -1625,6 +1647,22 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) if (superset == subset) return; + if constexpr (EnablePreferImplicitPointees) + { + // No need to add edges when all pointees propagate implicitly either way + if (Set_.IsPointingToExternal(superset) && Set_.HasPointeesEscaping(subset)) + { + return; + } + + // Ignore adding simple edges to nodes that should only have implicit pointees + if (Set_.CanTrackPointeesImplicitly(superset)) + { + MarkAsPointeesEscaping(subset); + return; + } + } + // If the edge already exists, ignore if (!supersetEdges[subset].Insert(superset)) return; @@ -1741,6 +1779,21 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } } + // If propagating to any node with AllPointeesEscape, we should have AllPointeesEscape + if (EnablePreferImplicitPointees && !Set_.HasPointeesEscaping(node)) + { + for (auto superset : supersetEdges[node].Items()) + { + if (Set_.HasPointeesEscaping(superset)) + { + // Mark the current node. + // This is the beginning of the work item visit, so node does not need to be added again + Set_.MarkAsPointeesEscaping(node); + break; + } + } + } + const auto pointeesEscaping = Set_.HasPointeesEscaping(node); // If difference propagation is enabled, this bool is true if this is the first time node // is being visited by the worklist with the PointeesEscaping flag set @@ -1784,6 +1837,14 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) } } + // If this node can track all pointees implicitly, remove its explicit nodes + if (EnablePreferImplicitPointees && Set_.CanTrackPointeesImplicitly(node)) + { + *(statistics.NumExplicitPointeesRemoved) += Set_.GetPointsToSet(node).Size(); + // This also causes newPointees to become empty + RemoveAllPointees(node); + } + // Propagate P(n) along all edges n -> superset auto supersets = supersetEdges[node].Items(); for (auto it = supersets.begin(); it != supersets.end();) @@ -1797,6 +1858,14 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) continue; } + // Remove edges from nodes with "all pointees escape" to nodes with "points to all escaped" + if (EnablePreferImplicitPointees && pointeesEscaping + && Set_.IsPointingToExternal(supersetParent)) + { + it = supersetEdges[node].Erase(it); + continue; + } + // The current it-edge should be kept as is, prepare "it" for the next iteration. ++it; @@ -1915,7 +1984,8 @@ PointerObjectConstraintSet::SolveUsingWorklist( bool enableOnlineCycleDetection, bool enableHybridCycleDetection, bool enableLazyCycleDetection, - bool enableDifferencePropagation) + bool enableDifferencePropagation, + bool enablePreferImplicitPointees) { // Takes all parameters as compile time types. @@ -1925,13 +1995,15 @@ PointerObjectConstraintSet::SolveUsingWorklist( auto tOnlineCycleDetection, auto tHybridCycleDetection, auto tLazyCycleDetection, - auto tDifferencePropagation) -> WorklistStatistics + auto tDifferencePropagation, + auto tPreferImplicitPointees) -> WorklistStatistics { using Worklist = std::remove_pointer_t; constexpr bool vOnlineCycleDetection = decltype(tOnlineCycleDetection)::value; constexpr bool vHybridCycleDetection = decltype(tHybridCycleDetection)::value; constexpr bool vLazyCycleDetection = decltype(tLazyCycleDetection)::value; constexpr bool vDifferencePropagation = decltype(tDifferencePropagation)::value; + constexpr bool vPreferImplicitPointees = decltype(tPreferImplicitPointees)::value; if constexpr (vOnlineCycleDetection && (vHybridCycleDetection || vLazyCycleDetection)) { @@ -1945,7 +2017,8 @@ PointerObjectConstraintSet::SolveUsingWorklist( vOnlineCycleDetection, vHybridCycleDetection, vLazyCycleDetection, - vDifferencePropagation>(statistics); + vDifferencePropagation, + vPreferImplicitPointees>(statistics); return statistics; } }; @@ -1992,13 +2065,20 @@ PointerObjectConstraintSet::SolveUsingWorklist( else differencePropagationVariant = std::false_type{}; + std::variant preferImplicitPropagationVariant; + if (enablePreferImplicitPointees) + preferImplicitPropagationVariant = std::true_type{}; + else + preferImplicitPropagationVariant = std::false_type{}; + return std::visit( Dispatch, policyVariant, onlineCycleDetectionVariant, hybridCycleDetectionVariant, lazyCycleDetectionVariant, - differencePropagationVariant); + differencePropagationVariant, + preferImplicitPropagationVariant); } const char * diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index 63ebfbfa2..509530d19 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -428,6 +428,13 @@ class PointerObjectSet final PointerObjectIndex subset, util::HashSet & newPointees); + /** + * Removes all pointees from the PointerObject with the given \p index. + * Can be used, e.g., when the PointerObject already points to all its pointees implicitly. + */ + void + RemoveAllPointees(PointerObjectIndex index); + /** * @param pointer the PointerObject possibly pointing to \p pointee * @param pointee the PointerObject possibly being pointed at @@ -844,6 +851,12 @@ class PointerObjectConstraintSet final std::optional NumLazyCyclesDetectionAttempts; std::optional NumLazyCyclesDetected; std::optional NumLazyCycleUnifications; + + /** + * When Prefer Implicit Pointees is enabled, and a node's pointees can be tracked fully + * implicitly, its set of explicit pointees is cleared. + */ + std::optional NumExplicitPointeesRemoved; }; explicit PointerObjectConstraintSet(PointerObjectSet & set) @@ -980,6 +993,7 @@ class PointerObjectConstraintSet final * @param enableHybridCycleDetection if true, hybrid cycle detection will be performed. * @param enableLazyCycleDetection if true, lazy cycle detection will be performed. * @param enableDifferencePropagation if true, difference propagation will be enabled. + * @param enablePreferImplicitPropation if true, enables PIP, which is novel to this codebase * @return an instance of WorklistStatistics describing solver statistics */ WorklistStatistics @@ -988,7 +1002,8 @@ class PointerObjectConstraintSet final bool enableOnlineCycleDetection, bool enableHybridCycleDetection, bool enableLazyCycleDetection, - bool enableDifferencePropagation); + bool enableDifferencePropagation, + bool enablePreferImplicitPropation); /** * Iterates over and applies constraints until all points-to-sets satisfy them. @@ -1034,6 +1049,7 @@ class PointerObjectConstraintSet final * @tparam EnableHybridCycleDetection if true, hybrid cycle detection is enabled. * @tparam EnableLazyCycleDetection if true, lazy cycle detection is enabled. * @tparam EnableDifferencePropagation if true, difference propagation is enabled. + * @tparam EnablePreferImplicitPointees if true, prefer implicit pointees is enabled * @see SolveUsingWorklist() for the public interface. */ template< @@ -1041,7 +1057,8 @@ class PointerObjectConstraintSet final bool EnableOnlineCycleDetection, bool EnableHybridCycleDetection, bool EnableLazyCycleDetection, - bool EnableDifferencePropagation> + bool EnableDifferencePropagation, + bool EnablePreferImplicitPointees> void RunWorklistSolver(WorklistStatistics & statistics); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 20ac7d8d5..6894e1d1f 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -770,38 +770,38 @@ TestPointerObjectConstraintSetSolve(Args... args) } // alloca1 should point to alloca2, etc - assert(set.GetPointsToSet(alloca1).Size() == 1); - assert(set.GetPointsToSet(alloca1).Contains(alloca2)); - assert(set.GetPointsToSet(alloca2).Size() == 1); - assert(set.GetPointsToSet(alloca2).Contains(alloca3)); - assert(set.GetPointsToSet(alloca3).Size() == 1); - assert(set.GetPointsToSet(alloca3).Contains(alloca4)); + assert(set.GetPointsToSet(alloca1).Size() <= 1); + assert(set.IsPointingTo(alloca1, alloca2)); + assert(set.GetPointsToSet(alloca2).Size() <= 1); + assert(set.IsPointingTo(alloca2, alloca3)); + assert(set.GetPointsToSet(alloca3).Size() <= 1); + assert(set.IsPointingTo(alloca3, alloca4)); // %5 is a load of alloca1, and should only be a pointer to alloca2 - assert(set.GetPointsToSet(reg[5]).Size() == 1); - assert(set.GetPointsToSet(reg[5]).Contains(alloca2)); + assert(set.GetPointsToSet(reg[5]).Size() <= 1); + assert(set.IsPointingTo(reg[5], alloca2)); // %6 is a load of alloca3, and should only be a pointer to alloca4 - assert(set.GetPointsToSet(reg[6]).Size() == 1); - assert(set.GetPointsToSet(reg[6]).Contains(alloca4)); + assert(set.GetPointsToSet(reg[6]).Size() <= 1); + assert(set.IsPointingTo(reg[6], alloca4)); // %7 can point to either alloca2 or alloca4 - assert(set.GetPointsToSet(reg[7]).Size() == 2); - assert(set.GetPointsToSet(reg[7]).Contains(alloca2)); - assert(set.GetPointsToSet(reg[7]).Contains(alloca4)); + assert(set.GetPointsToSet(reg[7]).Size() <= 2); + assert(set.IsPointingTo(reg[7], alloca2)); + assert(set.IsPointingTo(reg[7], alloca4)); // %8 should point to external, since it points to the superset of %0 and %1 assert(set.IsPointingToExternal(reg[8])); // %8 may also point to alloca4 - assert(set.GetPointsToSet(reg[8]).Size() == 1); - assert(set.GetPointsToSet(reg[8]).Contains(alloca4)); + assert(set.GetPointsToSet(reg[8]).Size() <= 1); + assert(set.IsPointingTo(reg[8], alloca4)); // %9 may point to v3 - assert(set.GetPointsToSet(reg[9]).Contains(alloca3)); + assert(set.IsPointingTo(reg[9], alloca3)); // Due to the store of %9 into [%8], alloca4 may now point back to alloca3 - assert(set.GetPointsToSet(alloca4).Size() == 1); - assert(set.GetPointsToSet(alloca4).Contains(alloca3)); + assert(set.GetPointsToSet(alloca4).Size() <= 1); + assert(set.IsPointingTo(alloca4, alloca3)); // Also due to the same store, alloca3 might have escaped assert(set.HasEscaped(alloca3)); @@ -892,7 +892,8 @@ TestPointerObjectSet() config.IsOnlineCycleDetectionEnabled(), config.IsHybridCycleDetectionEnabled(), config.IsLazyCycleDetectionEnabled(), - config.IsDifferencePropagationEnabled()); + config.IsDifferencePropagationEnabled(), + config.IsPreferImplicitPointeesEnabled()); } TestClonePointerObjectConstraintSet(); From e9613166781463a8821f57ca38ac7650fcd645e4 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 21 Aug 2024 21:49:46 +0200 Subject: [PATCH 050/170] Remove port class (#588) The title says it all... --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 6 +- .../rvsdg2rhls/remove-redundant-buf.cpp | 2 +- jlm/hls/ir/hls.cpp | 4 +- jlm/hls/ir/hls.hpp | 84 ++++++++-------- jlm/llvm/backend/jlm2llvm/instruction.cpp | 10 +- jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 2 +- jlm/llvm/backend/rvsdg2jlm/context.hpp | 8 +- jlm/llvm/ir/operators/Load.hpp | 2 +- jlm/llvm/ir/operators/MemCpy.hpp | 2 +- jlm/llvm/ir/operators/Phi.hpp | 45 --------- jlm/llvm/ir/operators/Store.hpp | 2 +- jlm/llvm/ir/operators/alloca.hpp | 2 +- jlm/llvm/ir/operators/delta.hpp | 17 +--- jlm/llvm/ir/operators/lambda.hpp | 4 +- jlm/llvm/ir/operators/operators.cpp | 4 +- jlm/llvm/ir/operators/operators.hpp | 96 +++++++++---------- jlm/llvm/ir/operators/sext.hpp | 4 +- jlm/llvm/ir/tac.cpp | 4 +- jlm/llvm/ir/tac.hpp | 2 +- jlm/rvsdg/binary.hpp | 4 +- jlm/rvsdg/bitstring/bitoperation-classes.hpp | 6 +- jlm/rvsdg/bitstring/slice.hpp | 4 +- jlm/rvsdg/control.hpp | 4 +- jlm/rvsdg/node.cpp | 32 +------ jlm/rvsdg/node.hpp | 54 ++--------- jlm/rvsdg/operation.cpp | 29 +----- jlm/rvsdg/operation.hpp | 72 ++------------ jlm/rvsdg/region.cpp | 74 +------------- jlm/rvsdg/region.hpp | 21 ---- jlm/rvsdg/simple-node.cpp | 5 +- jlm/rvsdg/statemux.cpp | 4 +- .../llvm/ThreeAddressCodeConversionTests.cpp | 6 +- .../mlir/frontend/TestMlirToJlmConverter.cpp | 16 ++-- tests/jlm/rvsdg/ArgumentTests.cpp | 5 +- tests/jlm/rvsdg/RegionTests.cpp | 21 ++-- tests/jlm/rvsdg/ResultTests.cpp | 2 +- tests/test-operation.hpp | 46 --------- 37 files changed, 181 insertions(+), 524 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 9d41d03e4..82f54f722 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -1198,7 +1198,7 @@ RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node) auto oneBitValue = GetConstant(body, 1, 1); // memory - auto arraytype = dynamic_cast(&lmem_op->result(0).type()); + auto arraytype = std::dynamic_pointer_cast(lmem_op->result(0)); size_t depth = arraytype->nelements(); auto dataType = GetFirrtlType(&arraytype->element_type()); ::llvm::SmallVector memTypes; @@ -3973,8 +3973,8 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) if (auto op = dynamic_cast(&node->operation())) { append.append("_S"); - append.append( - std::to_string(dynamic_cast(&op->result(0).type())->nelements())); + append.append(std::to_string( + std::dynamic_pointer_cast(op->result(0))->nelements())); append.append("_L"); size_t loads = rvsdg::input::GetNode(**node->output(0)->begin())->noutputs(); append.append(std::to_string(loads)); diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index 69581a798..78a36ec41 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -48,7 +48,7 @@ remove_redundant_buf(jlm::rvsdg::region * region) { if (auto buf = dynamic_cast(&node->operation())) { - if (dynamic_cast(&buf->argument(0).type())) + if (std::dynamic_pointer_cast(buf->argument(0))) { if (!buf->pass_through && eliminate_buf(node->input(0)->origin())) { diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 2dee01f10..42e0171ea 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -96,7 +96,7 @@ loop_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap auto inp = jlm::rvsdg::structural_input::create(loop, in_origin, in_origin->Type()); smap.insert(input(i), loop->input(i)); auto oarg = input(i)->arguments.begin().ptr(); - auto narg = jlm::rvsdg::argument::create(loop->subregion(), inp, oarg->port()); + auto narg = jlm::rvsdg::argument::create(loop->subregion(), inp, oarg->Type()); smap.insert(oarg, narg); } for (size_t i = 0; i < noutputs(); ++i) @@ -132,7 +132,7 @@ loop_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap auto outp = output(i); auto res = outp->results.begin().ptr(); auto origin = smap.lookup(res->origin()); - jlm::rvsdg::result::create(loop->subregion(), origin, loop->output(i), res->port()); + jlm::rvsdg::result::create(loop->subregion(), origin, loop->output(i), res->Type()); } nf->set_mutable(true); return loop; diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 6c5f8480d..c733ed81d 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -39,8 +39,8 @@ class branch_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->loop == loop && ot->argument(0).type() == argument(0).type() - && ot->result(0).type() == result(0).type(); + return ot && ot->loop == loop && *ot->argument(0) == *argument(0) + && *ot->result(0) == *result(0); } std::string @@ -118,8 +118,8 @@ class fork_op final : public jlm::rvsdg::simple_op { auto forkOp = dynamic_cast(&other); // check predicate and value - return forkOp && forkOp->argument(0).type() == argument(0).type() - && forkOp->nresults() == nresults() && forkOp->IsConstant() == IsConstant_; + return forkOp && *forkOp->argument(0) == *argument(0) && forkOp->nresults() == nresults() + && forkOp->IsConstant() == IsConstant_; } /** @@ -186,7 +186,7 @@ class merge_op final : public jlm::rvsdg::simple_op operator==(const jlm::rvsdg::operation & other) const noexcept override { auto ot = dynamic_cast(&other); - return ot && ot->narguments() == narguments() && ot->argument(0).type() == argument(0).type(); + return ot && ot->narguments() == narguments() && *ot->argument(0) == *argument(0); } std::string @@ -234,8 +234,8 @@ class mux_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(0).type() == argument(0).type() - && ot->result(0).type() == result(0).type() && ot->discarding == discarding; + return ot && *ot->argument(0) == *argument(0) && *ot->result(0) == *result(0) + && ot->discarding == discarding; } std::string @@ -300,7 +300,7 @@ class sink_op final : public jlm::rvsdg::simple_op operator==(const jlm::rvsdg::operation & other) const noexcept override { auto ot = dynamic_cast(&other); - return ot && ot->argument(0).type() == argument(0).type(); + return ot && *ot->argument(0) == *argument(0); } std::string @@ -338,7 +338,7 @@ class predicate_buffer_op final : public jlm::rvsdg::simple_op operator==(const jlm::rvsdg::operation & other) const noexcept override { auto ot = dynamic_cast(&other); - return ot && ot->result(0).type() == result(0).type(); + return ot && *ot->result(0) == *result(0); } std::string @@ -381,8 +381,7 @@ class loop_constant_buffer_op final : public jlm::rvsdg::simple_op operator==(const jlm::rvsdg::operation & other) const noexcept override { auto ot = dynamic_cast(&other); - return ot && ot->result(0).type() == result(0).type() - && ot->argument(0).type() == argument(0).type(); + return ot && *ot->result(0) == *result(0) && *ot->argument(0) == *argument(0); } std::string @@ -429,7 +428,7 @@ class buffer_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); return ot && ot->capacity == capacity && ot->pass_through == pass_through - && ot->result(0).type() == result(0).type(); + && *ot->result(0) == *result(0); } std::string @@ -503,8 +502,7 @@ class trigger_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() - && ot->result(0).type() == result(0).type(); + return ot && *ot->argument(1) == *argument(1) && *ot->result(0) == *result(0); } std::string @@ -664,7 +662,7 @@ class backedge_result : public jlm::rvsdg::result private: backedge_result(jlm::rvsdg::output * origin) - : jlm::rvsdg::result(origin->region(), origin, nullptr, origin->port()), + : jlm::rvsdg::result(origin->region(), origin, nullptr, origin->Type()), argument_(nullptr) {} @@ -824,7 +822,7 @@ class load_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -856,7 +854,7 @@ class load_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_LOAD_" + argument(narguments() - 1).type().debug_string(); + return "HLS_LOAD_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -885,13 +883,13 @@ class load_op final : public jlm::rvsdg::simple_op [[nodiscard]] const llvm::PointerType & GetPointerType() const noexcept { - return *util::AssertedCast(&argument(0).type()); + return *util::AssertedCast(argument(0).get()); } [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - return std::dynamic_pointer_cast(result(0).Type()); + return std::dynamic_pointer_cast(result(0)); } }; @@ -916,7 +914,7 @@ class addr_queue_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -939,9 +937,9 @@ class addr_queue_op final : public jlm::rvsdg::simple_op { if (combinatorial) { - return "HLS_ADDR_QUEUE_COMB_" + argument(narguments() - 1).type().debug_string(); + return "HLS_ADDR_QUEUE_COMB_" + argument(narguments() - 1)->debug_string(); } - return "HLS_ADDR_QUEUE_" + argument(narguments() - 1).type().debug_string(); + return "HLS_ADDR_QUEUE_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -983,7 +981,7 @@ class state_gate_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1000,7 +998,7 @@ class state_gate_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_STATE_GATE_" + argument(narguments() - 1).type().debug_string(); + return "HLS_STATE_GATE_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -1036,7 +1034,7 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op { auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1058,7 +1056,7 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_DEC_LOAD_" + argument(narguments() - 1).type().debug_string(); + return "HLS_DEC_LOAD_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -1080,13 +1078,13 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op [[nodiscard]] const llvm::PointerType & GetPointerType() const noexcept { - return *util::AssertedCast(&argument(0).type()); + return *util::AssertedCast(argument(0).get()); } [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - return std::dynamic_pointer_cast(result(0).Type()); + return std::dynamic_pointer_cast(result(0)); } }; @@ -1106,7 +1104,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1192,7 +1190,7 @@ class mem_req_op final : public jlm::rvsdg::simple_op auto ot = dynamic_cast(&other); // check predicate and value return ot && ot->narguments() == narguments() - && (ot->narguments() == 0 || (ot->argument(1).type() == argument(1).type())) + && (ot->narguments() == 0 || (*ot->argument(1) == *argument(1))) && ot->narguments() == narguments(); } @@ -1309,7 +1307,7 @@ class store_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1338,7 +1336,7 @@ class store_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_STORE_" + argument(narguments() - 1).type().debug_string(); + return "HLS_STORE_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -1364,13 +1362,13 @@ class store_op final : public jlm::rvsdg::simple_op [[nodiscard]] const llvm::PointerType & GetPointerType() const noexcept { - return *util::AssertedCast(&argument(0).type()); + return *util::AssertedCast(argument(0).get()); } [[nodiscard]] const rvsdg::valuetype & GetStoredType() const noexcept { - return *util::AssertedCast(&argument(1).type()); + return *util::AssertedCast(argument(1).get()); } }; @@ -1403,7 +1401,7 @@ class local_mem_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_LOCAL_MEM_" + result(0).type().debug_string(); + return "HLS_LOCAL_MEM_" + result(0)->debug_string(); } std::unique_ptr @@ -1436,7 +1434,7 @@ class local_mem_resp_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1484,7 +1482,7 @@ class local_load_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1514,7 +1512,7 @@ class local_load_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_LOCAL_LOAD_" + argument(narguments() - 1).type().debug_string(); + return "HLS_LOCAL_LOAD_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -1542,7 +1540,7 @@ class local_load_op final : public jlm::rvsdg::simple_op [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - return std::dynamic_pointer_cast(result(0).Type()); + return std::dynamic_pointer_cast(result(0)); } }; @@ -1562,7 +1560,7 @@ class local_store_op final : public jlm::rvsdg::simple_op // TODO: auto ot = dynamic_cast(&other); // check predicate and value - return ot && ot->argument(1).type() == argument(1).type() && ot->narguments() == narguments(); + return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } static std::vector> @@ -1591,7 +1589,7 @@ class local_store_op final : public jlm::rvsdg::simple_op std::string debug_string() const override { - return "HLS_LOCAL_STORE_" + argument(narguments() - 1).type().debug_string(); + return "HLS_LOCAL_STORE_" + argument(narguments() - 1)->debug_string(); } std::unique_ptr @@ -1619,7 +1617,7 @@ class local_store_op final : public jlm::rvsdg::simple_op [[nodiscard]] const jlm::rvsdg::valuetype & GetStoredType() const noexcept { - return *util::AssertedCast(&argument(1).type()); + return *util::AssertedCast(argument(1).get()); } }; @@ -1643,7 +1641,7 @@ class local_mem_req_op final : public jlm::rvsdg::simple_op auto ot = dynamic_cast(&other); // check predicate and value return ot && ot->narguments() == narguments() - && (ot->narguments() == 0 || (ot->argument(1).type() == argument(1).type())) + && (ot->narguments() == 0 || (*ot->argument(1) == *argument(1))) && ot->narguments() == narguments(); } diff --git a/jlm/llvm/backend/jlm2llvm/instruction.cpp b/jlm/llvm/backend/jlm2llvm/instruction.cpp index d45ee628b..a834215e5 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.cpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.cpp @@ -161,7 +161,7 @@ convert_undef( context & ctx) { JLM_ASSERT(is(op)); - return ::llvm::UndefValue::get(convert_type(op.result(0).type(), ctx)); + return ::llvm::UndefValue::get(convert_type(*op.result(0), ctx)); } static ::llvm::Value * @@ -505,7 +505,7 @@ convert( data.push_back(c); } - auto at = dynamic_cast(&op.result(0).type()); + auto at = std::dynamic_pointer_cast(op.result(0)); auto type = convert_type(*at, ctx); return ::llvm::ConstantArray::get(type, data); } @@ -517,7 +517,7 @@ convert( ::llvm::IRBuilder<> & builder, context & ctx) { - auto type = convert_type(op.result(0).type(), ctx); + auto type = convert_type(*op.result(0), ctx); return ::llvm::ConstantAggregateZero::get(type); } @@ -840,7 +840,7 @@ convert_cast( context & ctx) { JLM_ASSERT(::llvm::Instruction::isCast(OPCODE)); - auto dsttype = std::dynamic_pointer_cast(op.result(0).Type()); + auto dsttype = std::dynamic_pointer_cast(op.result(0)); auto operand = operands[0]; if (auto vt = dynamic_cast(&operand->type())) @@ -895,7 +895,7 @@ convert( { auto & llvmmod = ctx.llvm_module(); - auto fcttype = convert_type(FunctionType({ op.argument(0).Type() }, {}), ctx); + auto fcttype = convert_type(FunctionType({ op.argument(0) }, {}), ctx); auto function = llvmmod.getOrInsertFunction("free", fcttype); auto operands = std::vector<::llvm::Value *>(1, ctx.value(args[0])); return builder.CreateCall(function, operands); diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 5f8d1b7ab..43b2ddabf 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -119,7 +119,7 @@ create_switch(const cfg_node * node, context & ctx) auto sw = builder.CreateSwitch(condition, defbb); for (const auto & alt : *mop) { - auto & type = *static_cast(&mop->argument(0).type()); + auto & type = *std::static_pointer_cast(mop->argument(0)); auto value = ::llvm::ConstantInt::get(convert_type(type, ctx), alt.first); sw->addCase(value, ctx.basic_block(node->outedge(alt.second)->sink())); } diff --git a/jlm/llvm/backend/rvsdg2jlm/context.hpp b/jlm/llvm/backend/rvsdg2jlm/context.hpp index bc8729c5b..420934861 100644 --- a/jlm/llvm/backend/rvsdg2jlm/context.hpp +++ b/jlm/llvm/backend/rvsdg2jlm/context.hpp @@ -47,11 +47,11 @@ class context final } inline void - insert(const rvsdg::output * port, const llvm::variable * v) + insert(const rvsdg::output * output, const llvm::variable * v) { - JLM_ASSERT(ports_.find(port) == ports_.end()); - JLM_ASSERT(port->type() == v->type()); - ports_[port] = v; + JLM_ASSERT(ports_.find(output) == ports_.end()); + JLM_ASSERT(*output->Type() == *v->Type()); + ports_[output] = v; } inline const llvm::variable * diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index 8a84ae750..a4ff4e43c 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -161,7 +161,7 @@ class LoadOperation : public rvsdg::simple_op [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - auto type = std::dynamic_pointer_cast(result(0).Type()); + auto type = std::dynamic_pointer_cast(result(0)); JLM_ASSERT(type); return type; } diff --git a/jlm/llvm/ir/operators/MemCpy.hpp b/jlm/llvm/ir/operators/MemCpy.hpp index 0dc7d4b0a..f5d33849b 100644 --- a/jlm/llvm/ir/operators/MemCpy.hpp +++ b/jlm/llvm/ir/operators/MemCpy.hpp @@ -53,7 +53,7 @@ class MemCpyOperation : public rvsdg::simple_op [[nodiscard]] const rvsdg::bittype & LengthType() const noexcept { - auto type = dynamic_cast(&argument(2).type()); + auto type = std::dynamic_pointer_cast(argument(2)); JLM_ASSERT(type != nullptr); return *type; } diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index a201093fb..c3a672bab 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -660,11 +660,6 @@ class rvargument final : public jlm::rvsdg::argument ~rvargument() override; private: - rvargument(jlm::rvsdg::region * region, const jlm::rvsdg::port & port) - : argument(region, nullptr, port), - output_(nullptr) - {} - rvargument(jlm::rvsdg::region * region, const std::shared_ptr type) : argument(region, nullptr, std::move(type)), output_(nullptr) @@ -680,14 +675,6 @@ class rvargument final : public jlm::rvsdg::argument rvargument & operator=(rvargument &&) = delete; - static rvargument * - create(jlm::rvsdg::region * region, const jlm::rvsdg::port & port) - { - auto argument = new rvargument(region, port); - region->append_argument(argument); - return argument; - } - static rvargument * create(jlm::rvsdg::region * region, std::shared_ptr type) { @@ -729,10 +716,6 @@ class cvargument final : public jlm::rvsdg::argument public: ~cvargument() override; - cvargument(jlm::rvsdg::region * region, phi::cvinput * input, const jlm::rvsdg::port & port) - : jlm::rvsdg::argument(region, input, port) - {} - cvargument( jlm::rvsdg::region * region, phi::cvinput * input, @@ -754,14 +737,6 @@ class cvargument final : public jlm::rvsdg::argument cvargument & Copy(rvsdg::region & region, rvsdg::structural_input * input) override; - static cvargument * - create(jlm::rvsdg::region * region, phi::cvinput * input, const jlm::rvsdg::port & port) - { - auto argument = new cvargument(region, input, port); - region->append_argument(argument); - return argument; - } - static cvargument * create(jlm::rvsdg::region * region, phi::cvinput * input, std::shared_ptr type) { @@ -788,14 +763,6 @@ class rvresult final : public jlm::rvsdg::result ~rvresult() override; private: - rvresult( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - rvoutput * output, - const jlm::rvsdg::port & port) - : jlm::rvsdg::result(region, origin, output, port) - {} - rvresult( jlm::rvsdg::region * region, jlm::rvsdg::output * origin, @@ -817,18 +784,6 @@ class rvresult final : public jlm::rvsdg::result rvresult & Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; - static rvresult * - create( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - rvoutput * output, - const jlm::rvsdg::port & port) - { - auto result = new rvresult(region, origin, output, port); - region->append_result(result); - return result; - } - static rvresult * create( jlm::rvsdg::region * region, diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index f9bae89db..24dea6cac 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -123,7 +123,7 @@ class StoreOperation : public rvsdg::simple_op [[nodiscard]] const rvsdg::valuetype & GetStoredType() const noexcept { - return *util::AssertedCast(&argument(1).type()); + return *util::AssertedCast(argument(1).get()); } [[nodiscard]] virtual size_t diff --git a/jlm/llvm/ir/operators/alloca.hpp b/jlm/llvm/ir/operators/alloca.hpp index 6962ebbc9..1f6b7a0a2 100644 --- a/jlm/llvm/ir/operators/alloca.hpp +++ b/jlm/llvm/ir/operators/alloca.hpp @@ -48,7 +48,7 @@ class alloca_op final : public rvsdg::simple_op inline const rvsdg::bittype & size_type() const noexcept { - return *static_cast(&argument(0).type()); + return *std::static_pointer_cast(argument(0)); } inline const rvsdg::valuetype & diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 2fe6cf6c8..178a50d9f 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -384,22 +384,11 @@ class output final : public rvsdg::structural_output public: ~output() override; - output(delta::node * node, const rvsdg::port & port) - : structural_output(node, port.Type()) - {} - output(delta::node * node, std::shared_ptr type) : structural_output(node, std::move(type)) {} private: - static output * - create(delta::node * node, const rvsdg::port & port) - { - auto output = std::make_unique(node, port); - return static_cast(node->append_output(std::move(output))); - } - static output * create(delta::node * node, std::shared_ptr type) { @@ -429,7 +418,7 @@ class cvargument final : public rvsdg::argument private: cvargument(rvsdg::region * region, cvinput * input) - : rvsdg::argument(region, input, input->port()) + : rvsdg::argument(region, input, input->Type()) {} static cvargument * @@ -461,8 +450,8 @@ class result final : public rvsdg::result Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; private: - result(rvsdg::output * origin) - : rvsdg::result(origin->region(), origin, nullptr, origin->port()) + explicit result(rvsdg::output * origin) + : rvsdg::result(origin->region(), origin, nullptr, origin->Type()) {} static result * diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 2dc4d672c..de3a340eb 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -607,7 +607,7 @@ class cvargument final : public jlm::rvsdg::argument private: cvargument(jlm::rvsdg::region * region, cvinput * input) - : jlm::rvsdg::argument(region, input, input->port()) + : jlm::rvsdg::argument(region, input, input->Type()) {} static cvargument * @@ -640,7 +640,7 @@ class result final : public jlm::rvsdg::result private: explicit result(jlm::rvsdg::output * origin) - : jlm::rvsdg::result(origin->region(), origin, nullptr, origin->port()) + : rvsdg::result(origin->region(), origin, nullptr, origin->Type()) {} static result * diff --git a/jlm/llvm/ir/operators/operators.cpp b/jlm/llvm/ir/operators/operators.cpp index 22795184a..428f162cd 100644 --- a/jlm/llvm/ir/operators/operators.cpp +++ b/jlm/llvm/ir/operators/operators.cpp @@ -781,9 +781,9 @@ bitcast_op::debug_string() const { return util::strfmt( "BITCAST[", - argument(0).type().debug_string(), + argument(0)->debug_string(), " -> ", - result(0).type().debug_string(), + result(0)->debug_string(), "]"); } diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index a33f798fc..6b3f4aa89 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -58,13 +58,13 @@ class phi_op final : public jlm::rvsdg::simple_op inline const jlm::rvsdg::type & type() const noexcept { - return result(0).type(); + return *result(0); } inline const std::shared_ptr & Type() const noexcept { - return result(0).Type(); + return result(0); } inline cfg_node * @@ -149,16 +149,16 @@ class select_op final : public jlm::rvsdg::simple_op virtual std::unique_ptr copy() const override; - const jlm::rvsdg::type & + [[nodiscard]] const jlm::rvsdg::type & type() const noexcept { - return result(0).type(); + return *result(0); } - const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { - return result(0).Type(); + return result(0); } static std::unique_ptr @@ -193,16 +193,16 @@ class vectorselect_op final : public jlm::rvsdg::simple_op virtual std::unique_ptr copy() const override; - const jlm::rvsdg::type & + [[nodiscard]] const rvsdg::type & type() const noexcept { - return result(0).type(); + return *result(0); } - const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { - return result(0).Type(); + return result(0); } size_t @@ -423,7 +423,7 @@ class branch_op final : public jlm::rvsdg::simple_op inline size_t nalternatives() const noexcept { - return static_cast(&argument(0).type())->nalternatives(); + return std::static_pointer_cast(argument(0))->nalternatives(); } static std::unique_ptr @@ -459,7 +459,7 @@ class ConstantPointerNullOperation final : public jlm::rvsdg::simple_op [[nodiscard]] const PointerType & GetPointerType() const noexcept { - return *jlm::util::AssertedCast(&result(0).type()); + return *util::AssertedCast(result(0).get()); } static std::unique_ptr @@ -533,7 +533,7 @@ class bits2ptr_op final : public jlm::rvsdg::unary_op inline size_t nbits() const noexcept { - return static_cast(&argument(0).type())->nbits(); + return std::static_pointer_cast(argument(0))->nbits(); } static std::unique_ptr @@ -613,7 +613,7 @@ class ptr2bits_op final : public jlm::rvsdg::unary_op inline size_t nbits() const noexcept { - return static_cast(&result(0).type())->nbits(); + return std::static_pointer_cast(result(0))->nbits(); } static std::unique_ptr @@ -658,13 +658,13 @@ class ConstantDataArray final : public jlm::rvsdg::simple_op size_t size() const noexcept { - return static_cast(&result(0).type())->nelements(); + return std::static_pointer_cast(result(0))->nelements(); } const jlm::rvsdg::valuetype & type() const noexcept { - return static_cast(&result(0).type())->element_type(); + return std::static_pointer_cast(result(0))->element_type(); } static std::unique_ptr @@ -822,13 +822,13 @@ class zext_op final : public jlm::rvsdg::unary_op inline size_t nsrcbits() const noexcept { - return static_cast(&argument(0).type())->nbits(); + return std::static_pointer_cast(argument(0))->nbits(); } inline size_t ndstbits() const noexcept { - return static_cast(&result(0).type())->nbits(); + return std::static_pointer_cast(result(0))->nbits(); } static std::unique_ptr @@ -899,7 +899,7 @@ class ConstantFP final : public jlm::rvsdg::simple_op inline const fpsize & size() const noexcept { - return static_cast(&result(0).type())->size(); + return std::static_pointer_cast(result(0))->size(); } static std::unique_ptr @@ -984,7 +984,7 @@ class fpcmp_op final : public jlm::rvsdg::binary_op inline const fpsize & size() const noexcept { - return static_cast(&argument(0).type())->size(); + return std::static_pointer_cast(argument(0))->size(); } static std::unique_ptr @@ -1032,10 +1032,10 @@ class UndefValueOperation final : public jlm::rvsdg::simple_op [[nodiscard]] std::unique_ptr copy() const override; - [[nodiscard]] const jlm::rvsdg::type & + [[nodiscard]] const rvsdg::type & GetType() const noexcept { - return result(0).type(); + return *result(0); } static jlm::rvsdg::output * @@ -1107,9 +1107,7 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op const jlm::rvsdg::valuetype & GetType() const noexcept { - auto & type = result(0).type(); - JLM_ASSERT(dynamic_cast(&type)); - return *static_cast(&type); + return *util::AssertedCast(result(0).get()); } static std::unique_ptr @@ -1195,7 +1193,7 @@ class fpbin_op final : public jlm::rvsdg::binary_op inline const fpsize & size() const noexcept { - return static_cast(&result(0).type())->size(); + return std::static_pointer_cast(result(0))->size(); } static std::unique_ptr @@ -1272,13 +1270,13 @@ class fpext_op final : public jlm::rvsdg::unary_op inline const fpsize & srcsize() const noexcept { - return static_cast(&argument(0).type())->size(); + return std::static_pointer_cast(argument(0))->size(); } inline const fpsize & dstsize() const noexcept { - return static_cast(&result(0).type())->size(); + return std::static_pointer_cast(result(0))->size(); } static std::unique_ptr @@ -1331,7 +1329,7 @@ class fpneg_op final : public jlm::rvsdg::unary_op const fpsize & size() const noexcept { - return static_cast(&argument(0).type())->size(); + return std::static_pointer_cast(argument(0))->size(); } static std::unique_ptr @@ -1407,13 +1405,13 @@ class fptrunc_op final : public jlm::rvsdg::unary_op inline const fpsize & srcsize() const noexcept { - return static_cast(&argument(0).type())->size(); + return std::static_pointer_cast(argument(0))->size(); } inline const fpsize & dstsize() const noexcept { - return static_cast(&result(0).type())->size(); + return std::static_pointer_cast(result(0))->size(); } static std::unique_ptr @@ -1592,7 +1590,7 @@ class ConstantStruct final : public jlm::rvsdg::simple_op const StructType & type() const noexcept { - return *static_cast(&result(0).type()); + return *std::static_pointer_cast(result(0)); } static std::unique_ptr @@ -1693,13 +1691,13 @@ class trunc_op final : public jlm::rvsdg::unary_op inline size_t nsrcbits() const noexcept { - return static_cast(&argument(0).type())->nbits(); + return std::static_pointer_cast(argument(0))->nbits(); } inline size_t ndstbits() const noexcept { - return static_cast(&result(0).type())->nbits(); + return std::static_pointer_cast(result(0))->nbits(); } static std::unique_ptr @@ -1873,13 +1871,13 @@ class ConstantArray final : public jlm::rvsdg::simple_op size_t size() const noexcept { - return static_cast(&result(0).type())->nelements(); + return std::static_pointer_cast(result(0))->nelements(); } const jlm::rvsdg::valuetype & type() const noexcept { - return static_cast(&result(0).type())->element_type(); + return std::static_pointer_cast(result(0))->element_type(); } static std::unique_ptr @@ -2149,17 +2147,17 @@ class vectorunary_op final : public jlm::rvsdg::simple_op : simple_op({ operand }, { result }), op_(op.copy()) { - if (operand->type() != op.argument(0).type()) + if (operand->type() != *op.argument(0)) { auto received = operand->type().debug_string(); - auto expected = op.argument(0).type().debug_string(); + auto expected = op.argument(0)->debug_string(); throw jlm::util::error(jlm::util::strfmt("expected ", expected, ", got ", received)); } - if (result->type() != op.result(0).type()) + if (result->type() != *op.result(0)) { auto received = result->type().debug_string(); - auto expected = op.result(0).type().debug_string(); + auto expected = op.result(0)->debug_string(); throw jlm::util::error(jlm::util::strfmt("expected ", expected, ", got ", received)); } } @@ -2244,17 +2242,17 @@ class vectorbinary_op final : public jlm::rvsdg::simple_op if (*op1 != *op2) throw jlm::util::error("expected the same vector types."); - if (op1->type() != binop.argument(0).type()) + if (op1->type() != *binop.argument(0)) { auto received = op1->type().debug_string(); - auto expected = binop.argument(0).type().debug_string(); + auto expected = binop.argument(0)->debug_string(); throw jlm::util::error(jlm::util::strfmt("expected ", expected, ", got ", received)); } - if (result->type() != binop.result(0).type()) + if (result->type() != *binop.result(0)) { auto received = result->type().debug_string(); - auto expected = binop.result(0).type().debug_string(); + auto expected = binop.result(0)->debug_string(); throw jlm::util::error(jlm::util::strfmt("expected ", expected, ", got ", received)); } } @@ -2348,13 +2346,13 @@ class constant_data_vector_op final : public jlm::rvsdg::simple_op size_t size() const noexcept { - return static_cast(&result(0).type())->size(); + return std::static_pointer_cast(result(0))->size(); } const jlm::rvsdg::valuetype & type() const noexcept { - return static_cast(&result(0).type())->type(); + return std::static_pointer_cast(result(0))->type(); } static std::unique_ptr @@ -2415,7 +2413,7 @@ class ExtractValue final : public jlm::rvsdg::simple_op const jlm::rvsdg::valuetype & type() const noexcept { - return *static_cast(&argument(0).type()); + return *std::static_pointer_cast(argument(0)); } static inline std::unique_ptr @@ -2481,14 +2479,14 @@ class malloc_op final : public jlm::rvsdg::simple_op const jlm::rvsdg::bittype & size_type() const noexcept { - return *static_cast(&argument(0).type()); + return *std::static_pointer_cast(argument(0)); } FunctionType fcttype() const { JLM_ASSERT(narguments() == 1 && nresults() == 2); - return FunctionType({ argument(0).Type() }, { result(0).Type(), result(1).Type() }); + return FunctionType({ argument(0) }, { result(0), result(1) }); } static std::unique_ptr diff --git a/jlm/llvm/ir/operators/sext.hpp b/jlm/llvm/ir/operators/sext.hpp index 003eed4ac..555ff49b1 100644 --- a/jlm/llvm/ir/operators/sext.hpp +++ b/jlm/llvm/ir/operators/sext.hpp @@ -64,13 +64,13 @@ class sext_op final : public rvsdg::unary_op inline size_t nsrcbits() const noexcept { - return static_cast(&argument(0).type())->nbits(); + return std::static_pointer_cast(argument(0))->nbits(); } inline size_t ndstbits() const noexcept { - return static_cast(&result(0).type())->nbits(); + return std::static_pointer_cast(result(0))->nbits(); } static std::unique_ptr diff --git a/jlm/llvm/ir/tac.cpp b/jlm/llvm/ir/tac.cpp index 2b474b93a..38f2545fc 100644 --- a/jlm/llvm/ir/tac.cpp +++ b/jlm/llvm/ir/tac.cpp @@ -35,7 +35,7 @@ check_operands( for (size_t n = 0; n < operands.size(); n++) { - if (operands[n]->type() != operation.argument(n).type()) + if (operands[n]->type() != *operation.argument(n)) throw util::error("invalid type."); } } @@ -50,7 +50,7 @@ check_results( for (size_t n = 0; n < results.size(); n++) { - if (results[n]->type() != operation.result(n).type()) + if (results[n]->type() != *operation.result(n)) throw util::error("invalid type."); } } diff --git a/jlm/llvm/ir/tac.hpp b/jlm/llvm/ir/tac.hpp index 474ac700d..29f7405b7 100644 --- a/jlm/llvm/ir/tac.hpp +++ b/jlm/llvm/ir/tac.hpp @@ -158,7 +158,7 @@ class tac final for (size_t n = 0; n < operation.nresults(); n++) { - auto & type = operation.result(n).Type(); + auto & type = operation.result(n); results_.push_back(tacvariable::create(this, type, names[n])); } } diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index 2831a3a8f..b09644259 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -179,14 +179,14 @@ class flattened_binary_op final : public simple_op virtual ~flattened_binary_op() noexcept; inline flattened_binary_op(std::unique_ptr op, size_t narguments) noexcept - : simple_op({ narguments, op->argument(0).Type() }, { op->result(0).Type() }), + : simple_op({ narguments, op->argument(0) }, { op->result(0) }), op_(std::move(op)) { JLM_ASSERT(op_->is_associative()); } inline flattened_binary_op(const binary_op & op, size_t narguments) - : simple_op({ narguments, op.argument(0).Type() }, { op.result(0).Type() }), + : simple_op({ narguments, op.argument(0) }, { op.result(0) }), op_(std::unique_ptr(static_cast(op.copy().release()))) { JLM_ASSERT(op_->is_associative()); diff --git a/jlm/rvsdg/bitstring/bitoperation-classes.hpp b/jlm/rvsdg/bitstring/bitoperation-classes.hpp index b776ac574..941f49635 100644 --- a/jlm/rvsdg/bitstring/bitoperation-classes.hpp +++ b/jlm/rvsdg/bitstring/bitoperation-classes.hpp @@ -29,7 +29,7 @@ class bitunary_op : public jlm::rvsdg::unary_op inline const bittype & type() const noexcept { - return *static_cast(&argument(0).type()); + return *std::static_pointer_cast(argument(0)); } /* reduction methods */ @@ -78,7 +78,7 @@ class bitbinary_op : public jlm::rvsdg::binary_op inline const bittype & type() const noexcept { - return *static_cast(&result(0).type()); + return *std::static_pointer_cast(result(0)); } }; @@ -117,7 +117,7 @@ class bitcompare_op : public jlm::rvsdg::binary_op inline const bittype & type() const noexcept { - return *static_cast(&argument(0).type()); + return *std::static_pointer_cast(argument(0)); } }; diff --git a/jlm/rvsdg/bitstring/slice.hpp b/jlm/rvsdg/bitstring/slice.hpp index 4dfc50a25..dcbace9ba 100644 --- a/jlm/rvsdg/bitstring/slice.hpp +++ b/jlm/rvsdg/bitstring/slice.hpp @@ -48,7 +48,7 @@ class bitslice_op : public jlm::rvsdg::unary_op inline size_t high() const noexcept { - return low_ + static_cast(&result(0).type())->nbits(); + return low_ + std::static_pointer_cast(result(0))->nbits(); } virtual std::unique_ptr @@ -57,7 +57,7 @@ class bitslice_op : public jlm::rvsdg::unary_op inline const type & argument_type() const noexcept { - return *static_cast(&argument(0).type()); + return *std::static_pointer_cast(argument(0)); } private: diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index 7cef57853..b0252380a 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -172,7 +172,7 @@ class match_op final : public jlm::rvsdg::unary_op inline uint64_t nalternatives() const noexcept { - return static_cast(&result(0).type())->nalternatives(); + return std::static_pointer_cast(result(0))->nalternatives(); } inline uint64_t @@ -194,7 +194,7 @@ class match_op final : public jlm::rvsdg::unary_op inline size_t nbits() const noexcept { - return static_cast(&argument(0).type())->nbits(); + return std::static_pointer_cast(argument(0))->nbits(); } inline const_iterator diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index e918fe153..61d2b25ef 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -21,24 +21,6 @@ input::~input() noexcept origin()->remove_user(this); } -input::input( - jlm::rvsdg::output * origin, - jlm::rvsdg::region * region, - const jlm::rvsdg::port & port) - : index_(0), - origin_(origin), - region_(region), - port_(port.copy()) -{ - if (region != origin->region()) - throw jlm::util::error("Invalid operand region."); - - if (port.type() != origin->type()) - throw jlm::util::type_error(port.type().debug_string(), origin->type().debug_string()); - - origin->add_user(this); -} - input::input( jlm::rvsdg::output * origin, jlm::rvsdg::region * region, @@ -46,13 +28,13 @@ input::input( : index_(0), origin_(origin), region_(region), - port_(std::make_unique(std::move(type))) + Type_(std::move(type)) { if (region != origin->region()) throw jlm::util::error("Invalid operand region."); - if (port_->type() != origin->type()) - throw jlm::util::type_error(port_->type().debug_string(), origin->type().debug_string()); + if (*Type() != origin->type()) + throw jlm::util::type_error(Type()->debug_string(), origin->type().debug_string()); origin->add_user(this); } @@ -101,16 +83,10 @@ output::~output() noexcept JLM_ASSERT(nusers() == 0); } -output::output(jlm::rvsdg::region * region, const jlm::rvsdg::port & port) - : index_(0), - region_(region), - port_(port.copy()) -{} - output::output(jlm::rvsdg::region * region, std::shared_ptr type) : index_(0), region_(region), - port_(std::make_unique(std::move(type))) + Type_(std::move(type)) {} std::string diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 17d24780d..035f97a18 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -40,8 +40,6 @@ class input public: virtual ~input() noexcept; - input(jlm::rvsdg::output * origin, jlm::rvsdg::region * region, const jlm::rvsdg::port & port); - input( jlm::rvsdg::output * origin, jlm::rvsdg::region * region, @@ -72,16 +70,16 @@ class input void divert_to(jlm::rvsdg::output * new_origin); - inline const jlm::rvsdg::type & + [[nodiscard]] const rvsdg::type & type() const noexcept { - return port_->type(); + return *Type(); } - inline const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { - return port_->Type(); + return Type_; } inline jlm::rvsdg::region * @@ -90,24 +88,9 @@ class input return region_; } - inline const jlm::rvsdg::port & - port() const noexcept - { - return *port_; - } - virtual std::string debug_string() const; - inline void - replace(const jlm::rvsdg::port & port) - { - if (port_->type() != port.type()) - throw jlm::util::type_error(port_->type().debug_string(), port.type().debug_string()); - - port_ = port.copy(); - } - /** * Retrieve the associated node from \p input if \p input is derived from jlm::rvsdg::node_input. * @@ -282,7 +265,7 @@ class input size_t index_; jlm::rvsdg::output * origin_; jlm::rvsdg::region * region_; - std::unique_ptr port_; + std::shared_ptr Type_; }; template @@ -309,8 +292,6 @@ class output public: virtual ~output() noexcept; - output(jlm::rvsdg::region * region, const jlm::rvsdg::port & port); - output(jlm::rvsdg::region * region, std::shared_ptr type); output(const output &) = delete; @@ -372,16 +353,16 @@ class output return users_.end(); } - inline const jlm::rvsdg::type & + [[nodiscard]] const rvsdg::type & type() const noexcept { - return port_->type(); + return *Type(); } - inline const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { - return port_->Type(); + return Type_; } inline jlm::rvsdg::region * @@ -390,24 +371,9 @@ class output return region_; } - inline const jlm::rvsdg::port & - port() const noexcept - { - return *port_; - } - virtual std::string debug_string() const; - inline void - replace(const jlm::rvsdg::port & port) - { - if (port_->type() != port.type()) - throw jlm::util::type_error(port_->type().debug_string(), port.type().debug_string()); - - port_ = port.copy(); - } - template class iterator { @@ -577,7 +543,7 @@ class output size_t index_; jlm::rvsdg::region * region_; - std::unique_ptr port_; + std::shared_ptr Type_; std::unordered_set users_; }; diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index a1b6ae6db..778d20090 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -11,31 +11,6 @@ namespace jlm::rvsdg { -/* port */ - -port::~port() -{} - -port::port(std::shared_ptr type) - : type_(std::move(type)) -{} - -bool -port::operator==(const port & other) const noexcept -{ - // If both types are identical (same pointer), no need - // to semantically check for equality. - return type_ == other.type_ || *type_ == *other.type_; -} - -std::unique_ptr -port::copy() const -{ - return std::make_unique(*this); -} - -/* operation */ - operation::~operation() noexcept {} @@ -56,7 +31,7 @@ simple_op::narguments() const noexcept return operands_.size(); } -const jlm::rvsdg::port & +const std::shared_ptr & simple_op::argument(size_t index) const noexcept { JLM_ASSERT(index < narguments()); @@ -69,7 +44,7 @@ simple_op::nresults() const noexcept return results_.size(); } -const jlm::rvsdg::port & +const std::shared_ptr & simple_op::result(size_t index) const noexcept { JLM_ASSERT(index < nresults()); diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index 5923a9992..067e2ef2a 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -24,55 +24,6 @@ class region; class simple_normal_form; class structural_normal_form; -/* port */ - -class port -{ -public: - virtual ~port(); - - explicit port(std::shared_ptr type); - - port(const port & other) = default; - - port(port && other) = default; - - port & - operator=(const port & other) = default; - - port & - operator=(port && other) = default; - - virtual bool - operator==(const port &) const noexcept; - - inline bool - operator!=(const port & other) const noexcept - { - return !(*this == other); - } - - inline const jlm::rvsdg::type & - type() const noexcept - { - return *type_; - } - - inline const std::shared_ptr & - Type() const noexcept - { - return type_; - } - - virtual std::unique_ptr - copy() const; - -private: - std::shared_ptr type_; -}; - -/* operation */ - class operation { public: @@ -115,38 +66,31 @@ class simple_op : public operation public: virtual ~simple_op(); - inline simple_op( + simple_op( std::vector> operands, std::vector> results) - { - for (auto & op : operands) - { - operands_.push_back(port(std::move(op))); - } - for (auto & res : results) - { - results_.push_back(port(std::move(res))); - } - } + : operands_(std::move(operands)), + results_(std::move(results)) + {} size_t narguments() const noexcept; - const jlm::rvsdg::port & + [[nodiscard]] const std::shared_ptr & argument(size_t index) const noexcept; size_t nresults() const noexcept; - const jlm::rvsdg::port & + [[nodiscard]] const std::shared_ptr & result(size_t index) const noexcept; static jlm::rvsdg::simple_normal_form * normal_form(jlm::rvsdg::graph * graph) noexcept; private: - std::vector results_; - std::vector operands_; + std::vector> operands_; + std::vector> results_; }; /* structural operation */ diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 12b0abbe6..fe3b6ac90 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -14,8 +14,6 @@ namespace jlm::rvsdg { -/* argument */ - argument::~argument() noexcept { on_output_destroy(this); @@ -24,27 +22,6 @@ argument::~argument() noexcept input()->arguments.erase(this); } -argument::argument( - jlm::rvsdg::region * region, - jlm::rvsdg::structural_input * input, - const jlm::rvsdg::port & port) - : output(region, port), - input_(input) -{ - if (input) - { - if (input->node() != region->node()) - throw jlm::util::error("Argument cannot be added to input."); - - if (input->type() != *Type()) - { - throw util::type_error(Type()->debug_string(), input->type().debug_string()); - } - - input->arguments.push_back(this); - } -} - argument::argument( jlm::rvsdg::region * region, jlm::rvsdg::structural_input * input, @@ -69,18 +46,7 @@ argument::argument( argument & argument::Copy(rvsdg::region & region, structural_input * input) { - return *argument::create(®ion, input, port()); -} - -jlm::rvsdg::argument * -argument::create( - jlm::rvsdg::region * region, - structural_input * input, - const jlm::rvsdg::port & port) -{ - auto argument = new jlm::rvsdg::argument(region, input, port); - region->append_argument(argument); - return argument; + return *argument::create(®ion, input, Type()); } jlm::rvsdg::argument * @@ -94,8 +60,6 @@ argument::create( return argument; } -/* result */ - result::~result() noexcept { on_input_destroy(this); @@ -104,28 +68,6 @@ result::~result() noexcept output()->results.erase(this); } -result::result( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, - const jlm::rvsdg::port & port) - : input(origin, region, port), - output_(output) -{ - if (output) - { - if (output->node() != region->node()) - throw jlm::util::error("Result cannot be added to output."); - - if (*Type() != *output->Type()) - { - throw jlm::util::type_error(Type()->debug_string(), output->Type()->debug_string()); - } - - output->results.push_back(this); - } -} - result::result( jlm::rvsdg::region * region, jlm::rvsdg::output * origin, @@ -151,19 +93,7 @@ result::result( result & result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) { - return *result::create(origin.region(), &origin, output, port()); -} - -jlm::rvsdg::result * -result::create( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, - const jlm::rvsdg::port & port) -{ - auto result = new jlm::rvsdg::result(region, origin, output, port); - region->append_result(result); - return result; + return *result::create(origin.region(), &origin, output, Type()); } jlm::rvsdg::result * diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 74d7fa850..59627d176 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -43,11 +43,6 @@ class argument : public output virtual ~argument() noexcept; protected: - argument( - jlm::rvsdg::region * region, - jlm::rvsdg::structural_input * input, - const jlm::rvsdg::port & port); - argument( jlm::rvsdg::region * region, jlm::rvsdg::structural_input * input, @@ -84,9 +79,6 @@ class argument : public output virtual argument & Copy(rvsdg::region & region, structural_input * input); - static jlm::rvsdg::argument * - create(jlm::rvsdg::region * region, structural_input * input, const jlm::rvsdg::port & port); - static jlm::rvsdg::argument * create( jlm::rvsdg::region * region, @@ -109,12 +101,6 @@ class result : public input virtual ~result() noexcept; protected: - result( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, - const jlm::rvsdg::port & port); - result( jlm::rvsdg::region * region, jlm::rvsdg::output * origin, @@ -153,13 +139,6 @@ class result : public input virtual result & Copy(rvsdg::output & origin, structural_output * output); - static jlm::rvsdg::result * - create( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, - const jlm::rvsdg::port & port); - static jlm::rvsdg::result * create( jlm::rvsdg::region * region, diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index bc7e5fc58..58c80506c 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -61,12 +61,11 @@ simple_node::simple_node( for (size_t n = 0; n < operation().narguments(); n++) { - node::add_input( - std::make_unique(this, operands[n], operation().argument(n).Type())); + node::add_input(std::make_unique(this, operands[n], operation().argument(n))); } for (size_t n = 0; n < operation().nresults(); n++) - node::add_output(std::make_unique(this, operation().result(n).Type())); + node::add_output(std::make_unique(this, operation().result(n))); on_node_create(this); } diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 5ee0da7b9..8d3b7d471 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -75,7 +75,7 @@ perform_multiple_origin_reduction( const std::vector & operands) { std::unordered_set set(operands.begin(), operands.end()); - return create_state_mux(op.result(0).Type(), { set.begin(), set.end() }, op.nresults()); + return create_state_mux(op.result(0), { set.begin(), set.end() }, op.nresults()); } static std::vector @@ -102,7 +102,7 @@ perform_mux_mux_reduction( new_operands.push_back(operand); } - return create_state_mux(op.result(0).Type(), new_operands, op.nresults()); + return create_state_mux(op.result(0), new_operands, op.nresults()); } mux_normal_form::~mux_normal_form() noexcept diff --git a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp index 4264435ef..f797bfc87 100644 --- a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp @@ -29,7 +29,7 @@ SetupControlFlowGraph( std::vector operands; for (size_t n = 0; n < operation.narguments(); n++) { - auto & operandType = operation.argument(n).Type(); + auto & operandType = operation.argument(n); auto operand = cfg->entry()->append_argument(argument::create("", operandType)); operands.emplace_back(operand); } @@ -60,13 +60,13 @@ SetupFunctionWithThreeAddressCode(const jlm::rvsdg::simple_op & operation) std::vector> operandTypes; for (size_t n = 0; n < operation.narguments(); n++) { - operandTypes.emplace_back(operation.argument(n).Type()); + operandTypes.emplace_back(operation.argument(n)); } std::vector> resultTypes; for (size_t n = 0; n < operation.nresults(); n++) { - resultTypes.emplace_back(operation.result(n).Type()); + resultTypes.emplace_back(operation.result(n)); } auto functionType = FunctionType::Create(operandTypes, resultTypes); diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 971004582..947b73035 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -301,8 +301,8 @@ TestDivOperation() const jlm::rvsdg::bitconstant_op * DivInput1Constant = dynamic_cast(&DivInput1Node->operation()); assert(DivInput1Constant->value() == 5); - assert(dynamic_cast(&DivInput1Constant->result(0).type())); - assert(dynamic_cast(&DivInput1Constant->result(0).type())->nbits() == 32); + assert(is(DivInput1Constant->result(0))); + assert(std::dynamic_pointer_cast(DivInput1Constant->result(0))->nbits() == 32); } } return 0; @@ -498,8 +498,8 @@ TestCompZeroExt() const jlm::rvsdg::bitconstant_op * Const2Op = dynamic_cast(&Const2Node->operation()); assert(Const2Op->value() == 5); - assert(dynamic_cast(&Const2Op->result(0).type())); - assert(dynamic_cast(&Const2Op->result(0).type())->nbits() == 32); + assert(is(Const2Op->result(0))); + assert(std::dynamic_pointer_cast(Const2Op->result(0))->nbits() == 32); // Check add op const jlm::rvsdg::bitadd_op * AddOp = @@ -522,8 +522,8 @@ TestCompZeroExt() const jlm::rvsdg::bitconstant_op * Const1Op = dynamic_cast(&Const1Node->operation()); assert(Const1Op->value() == 20); - assert(dynamic_cast(&Const1Op->result(0).type())); - assert(dynamic_cast(&Const1Op->result(0).type())->nbits() == 32); + assert(is(Const1Op->result(0))); + assert(std::dynamic_pointer_cast(Const1Op->result(0))->nbits() == 32); } } return 0; @@ -672,8 +672,8 @@ TestMatchOp() auto matchOp = dynamic_cast(&matchNode->operation()); assert(matchOp->narguments() == 1); - assert(dynamic_cast(&matchOp->argument(0).type())); - assert(dynamic_cast(&matchOp->argument(0).type())->nbits() == 32); + assert(is(matchOp->argument(0))); + assert(std::dynamic_pointer_cast(matchOp->argument(0))->nbits() == 32); // 3 alternatives + default assert(matchOp->nalternatives() == 4); diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index 8be81deb4..73babebd6 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -80,10 +80,7 @@ ArgumentInputTypeMismatch() exceptionWasCaught = false; try { - jlm::rvsdg::argument::create( - structuralNode->subregion(0), - structuralInput, - jlm::rvsdg::port(stateType)); + jlm::rvsdg::argument::create(structuralNode->subregion(0), structuralInput, stateType); // The line below should not be executed as the line above is expected to throw an exception. assert(false); } diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index e15d8245a..59b2dd626 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -123,12 +123,9 @@ RemoveResultsWhere() auto valueType = jlm::tests::valuetype::Create(); auto node = jlm::tests::test_op::Create(®ion, {}, {}, { valueType }); - auto result0 = - jlm::rvsdg::result::create(®ion, node->output(0), nullptr, jlm::rvsdg::port(valueType)); - auto result1 = - jlm::rvsdg::result::create(®ion, node->output(0), nullptr, jlm::rvsdg::port(valueType)); - auto result2 = - jlm::rvsdg::result::create(®ion, node->output(0), nullptr, jlm::rvsdg::port(valueType)); + auto result0 = jlm::rvsdg::result::create(®ion, node->output(0), nullptr, valueType); + auto result1 = jlm::rvsdg::result::create(®ion, node->output(0), nullptr, valueType); + auto result2 = jlm::rvsdg::result::create(®ion, node->output(0), nullptr, valueType); // Act & Arrange assert(region.nresults() == 3); @@ -177,9 +174,9 @@ RemoveArgumentsWhere() jlm::rvsdg::region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); - auto argument0 = jlm::rvsdg::argument::create(®ion, nullptr, jlm::rvsdg::port(valueType)); - auto argument1 = jlm::rvsdg::argument::create(®ion, nullptr, jlm::rvsdg::port(valueType)); - auto argument2 = jlm::rvsdg::argument::create(®ion, nullptr, jlm::rvsdg::port(valueType)); + auto argument0 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); + auto argument1 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); + auto argument2 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); auto node = jlm::tests::test_op::Create(®ion, { valueType }, { argument1 }, { valueType }); @@ -229,9 +226,9 @@ PruneArguments() jlm::rvsdg::region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); - auto argument0 = jlm::rvsdg::argument::create(®ion, nullptr, jlm::rvsdg::port(valueType)); - jlm::rvsdg::argument::create(®ion, nullptr, jlm::rvsdg::port(valueType)); - auto argument2 = jlm::rvsdg::argument::create(®ion, nullptr, jlm::rvsdg::port(valueType)); + auto argument0 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); + jlm::rvsdg::argument::create(®ion, nullptr, valueType); + auto argument2 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); auto node = jlm::tests::test_op::Create( ®ion, diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index 2b0536c00..c89a04332 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -92,7 +92,7 @@ ResultInputTypeMismatch() structuralNode->subregion(0), simpleNode->output(0), structuralOutput, - jlm::rvsdg::port(stateType)); + stateType); // The line below should not be executed as the line above is expected to throw an exception. assert(false); } diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index a052c02e3..0563098cc 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -73,10 +73,6 @@ class unary_op final : public rvsdg::unary_op public: virtual ~unary_op() noexcept; - inline unary_op(const rvsdg::port & srcport, const rvsdg::port & dstport) noexcept - : rvsdg::unary_op(srcport.Type(), dstport.Type()) - {} - inline unary_op( std::shared_ptr srctype, std::shared_ptr dsttype) noexcept @@ -98,26 +94,6 @@ class unary_op final : public rvsdg::unary_op virtual std::unique_ptr copy() const override; - static inline rvsdg::node * - create( - rvsdg::region * region, - const rvsdg::port & srcport, - rvsdg::output * operand, - const rvsdg::port & dstport) - { - return rvsdg::simple_node::create(region, std::move(unary_op(srcport, dstport)), { operand }); - } - - static inline rvsdg::output * - create_normalized( - const rvsdg::port & srcport, - rvsdg::output * operand, - const rvsdg::port & dstport) - { - unary_op op(srcport, dstport); - return rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; - } - static inline rvsdg::node * create( rvsdg::region * region, @@ -189,17 +165,6 @@ class binary_op final : public rvsdg::binary_op virtual std::unique_ptr copy() const override; - static inline rvsdg::node * - create( - const rvsdg::port & srcport, - const rvsdg::port & dstport, - rvsdg::output * op1, - rvsdg::output * op2) - { - binary_op op(srcport.Type(), dstport.Type(), rvsdg::binary_op::flags::none); - return rvsdg::simple_node::create(op1->region(), op, { op1, op2 }); - } - static inline rvsdg::node * create( const std::shared_ptr & srctype, @@ -211,17 +176,6 @@ class binary_op final : public rvsdg::binary_op return rvsdg::simple_node::create(op1->region(), op, { op1, op2 }); } - static inline rvsdg::output * - create_normalized( - const rvsdg::port & srcport, - const rvsdg::port & dstport, - rvsdg::output * op1, - rvsdg::output * op2) - { - binary_op op(srcport.Type(), dstport.Type(), rvsdg::binary_op::flags::none); - return rvsdg::simple_node::create_normalized(op1->region(), op, { op1, op2 })[0]; - } - static inline rvsdg::output * create_normalized( const std::shared_ptr srctype, From 84d201a0a98ba60fa9d3ac5ed2d510f3b298cf17 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Thu, 22 Aug 2024 07:17:58 +0200 Subject: [PATCH 051/170] Porting to llvm-18 (#537) Clang format seems to have caused some files to be formatted differently, which makes the diff larger than necessary. --- .github/actions/InstallPackages/action.yml | 6 +-- configure.sh | 10 +++- .../FirrtlToVerilogConverter.cpp | 50 +++++++++---------- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 33 ++++++------ .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 5 +- jlm/hls/ir/hls.hpp | 4 +- jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 4 ++ .../frontend/LlvmInstructionConversion.cpp | 2 +- jlm/llvm/frontend/LlvmModuleConversion.cpp | 4 ++ jlm/llvm/ir/attribute.hpp | 4 ++ jlm/llvm/ir/operators/Load.cpp | 3 +- jlm/llvm/ir/operators/Load.hpp | 12 ++--- jlm/llvm/ir/operators/MemCpy.hpp | 8 +-- .../ir/operators/MemoryStateOperations.hpp | 4 +- jlm/llvm/ir/operators/Store.cpp | 3 +- jlm/llvm/ir/operators/Store.hpp | 12 ++--- .../RegionAwareMemoryNodeProvider.cpp | 4 +- jlm/rvsdg/binary.cpp | 3 +- jlm/rvsdg/bitstring/concat.cpp | 3 +- jlm/rvsdg/gamma.cpp | 3 +- jlm/rvsdg/node.cpp | 3 +- jlm/rvsdg/nullary.cpp | 3 +- jlm/rvsdg/simple-normal-form.cpp | 3 +- jlm/rvsdg/statemux.cpp | 3 +- jlm/rvsdg/structural-normal-form.cpp | 3 +- jlm/rvsdg/unary.cpp | 3 +- scripts/build-circt.sh | 4 +- scripts/build-mlir.sh | 4 +- scripts/run-llvm-test-suite.sh | 2 +- tests/jlm/llvm/frontend/llvm/LoadTests.cpp | 2 +- tests/jlm/llvm/frontend/llvm/MemCpyTests.cpp | 2 +- tests/jlm/llvm/frontend/llvm/StoreTests.cpp | 2 +- 32 files changed, 121 insertions(+), 90 deletions(-) diff --git a/.github/actions/InstallPackages/action.yml b/.github/actions/InstallPackages/action.yml index 0faf3409a..abc62af62 100644 --- a/.github/actions/InstallPackages/action.yml +++ b/.github/actions/InstallPackages/action.yml @@ -4,7 +4,7 @@ description: "Installs packages that the jlm framework depends on." inputs: llvm-version: description: "LLVM/MLIR version that is installed" - default: 17 + default: 18 required: false install-llvm: @@ -85,8 +85,8 @@ runs: run: | sudo apt-get install libmlir-${{inputs.llvm-version}}-dev mlir-${{inputs.llvm-version}}-tools if ! [ -f /usr/lib/x86_64-linux-gnu/libMLIR.so ]; then - sudo ln -s /usr/lib/llvm-${{inputs.llvm-version}}/lib/libMLIR.so.${{inputs.llvm-version}} /usr/lib/x86_64-linux-gnu/ - sudo ln -s /usr/lib/llvm-${{inputs.llvm-version}}/lib/libMLIR.so.${{inputs.llvm-version}} /usr/lib/x86_64-linux-gnu/libMLIR.so + sudo ln -s /usr/lib/llvm-${{inputs.llvm-version}}/lib/libMLIR.so.${{inputs.llvm-version}}* /usr/lib/x86_64-linux-gnu/ + sudo ln -s /usr/lib/llvm-${{inputs.llvm-version}}/lib/libMLIR.so.${{inputs.llvm-version}}* /usr/lib/x86_64-linux-gnu/libMLIR.so fi shell: bash diff --git a/configure.sh b/configure.sh index d9c9736d2..a60e70547 100755 --- a/configure.sh +++ b/configure.sh @@ -1,7 +1,7 @@ #!/bin/bash set -eu -LLVM_VERSION=17 +LLVM_VERSION=18 # Default values for all tunables. TARGET="release" @@ -126,20 +126,26 @@ if [ "${ENABLE_HLS}" == "yes" ] ; then "-lCIRCTImportFIRFile" "-lCIRCTFIRRTLTransforms" "-lCIRCTHWTransforms" + "-lCIRCTOMTransforms" + "-lCIRCTSim" "-lCIRCTSVTransforms" "-lCIRCTTransforms" + "-lCIRCTTargetDebugInfo" "-lCIRCTSV" "-lCIRCTComb" + "-lCIRCTSupport" + "-lCIRCTDebug" "-lCIRCTLTL" "-lCIRCTVerif" "-lCIRCTFIRRTL" "-lCIRCTSeq" + "-lCIRCTSeqToSV" "-lCIRCTSeqTransforms" "-lCIRCTHW" "-lCIRCTVerifToSV" + "-lCIRCTSimToSV" "-lCIRCTExportChiselInterface" "-lCIRCTOM" - "-lCIRCTSupport" "-lMLIR" ) fi diff --git a/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp b/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp index badc6af0e..92429b123 100644 --- a/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp +++ b/jlm/hls/backend/firrtl2verilog/FirrtlToVerilogConverter.cpp @@ -43,6 +43,7 @@ FirrtlToVerilogConverter::Convert( firrtl::FIRParserOptions options; options.infoLocatorHandling = firrtl::FIRParserOptions::InfoLocHandling::IgnoreInfo; options.numAnnotationFiles = 0; + options.scalarizePublicModules = true; options.scalarizeExtModules = true; auto module = importFIRFile(sourceMgr, &context, ts, options); if (!module) @@ -51,20 +52,30 @@ FirrtlToVerilogConverter::Convert( return false; } - // Manually set up the options for the firtool - cl::OptionCategory mainCategory("firtool Options"); - firtool::FirtoolOptions firtoolOptions(mainCategory); - firtoolOptions.preserveAggregate = firrtl::PreserveAggregate::PreserveMode::None; - firtoolOptions.preserveMode = firrtl::PreserveValues::PreserveMode::None; - firtoolOptions.buildMode = firtool::FirtoolOptions::BuildModeRelease; - firtoolOptions.exportChiselInterface = false; + // Manually set the options for the firtool + firtool::FirtoolOptions firtoolOptions; + firtoolOptions.setOutputFilename(outputVerilogFile.to_str()); + firtoolOptions.setPreserveAggregate(firrtl::PreserveAggregate::PreserveMode::None); + firtoolOptions.setPreserveValues(firrtl::PreserveValues::PreserveMode::None); + firtoolOptions.setBuildMode(firtool::FirtoolOptions::BuildModeDefault); + firtoolOptions.setChiselInterfaceOutDirectory(""); + firtoolOptions.setDisableHoistingHWPassthrough(true); + firtoolOptions.setOmirOutFile(""); + firtoolOptions.setBlackBoxRootPath(""); + firtoolOptions.setReplSeqMemFile(""); + firtoolOptions.setOutputAnnotationFilename(""); // Populate the pass manager and apply them to the module mlir::PassManager pm(&context); + if (failed(firtool::populatePreprocessTransforms(pm, firtoolOptions))) + { + std::cerr << "Failed to populate preprocess transforms" << std::endl; + return false; + } // Firtool sets a blackBoxRoot based on the inputFilename path, but this functionality is not used // so we set it to an empty string (the final argument) - if (failed(firtool::populateCHIRRTLToLowFIRRTL(pm, firtoolOptions, *module, ""))) + if (failed(firtool::populateCHIRRTLToLowFIRRTL(pm, firtoolOptions, ""))) { std::cerr << "Failed to populate CHIRRTL to LowFIRRTL" << std::endl; return false; @@ -79,28 +90,17 @@ FirrtlToVerilogConverter::Convert( std::cerr << "Failed to populate HW to SV" << std::endl; return false; } - - if (failed(pm.run(module.get()))) + std::error_code errorCode; + llvm::raw_fd_ostream os(outputVerilogFile.to_str(), errorCode); + if (failed(firtool::populateExportVerilog(pm, firtoolOptions, os))) { - std::cerr << "Failed to run pass manager" << std::endl; + std::cerr << "Failed to populate Export Verilog" << std::endl; return false; } - mlir::PassManager exportPm(&context); - - // Legalize unsupported operations within the modules. - exportPm.nest().addPass(sv::createHWLegalizeModulesPass()); - - // Tidy up the IR to improve verilog emission quality. - exportPm.nest().addPass(sv::createPrettifyVerilogPass()); - - std::error_code errorCode; - llvm::raw_fd_ostream os(outputVerilogFile.to_str(), errorCode); - exportPm.addPass(createExportVerilogPass(os)); - - if (failed(exportPm.run(module.get()))) + if (failed(pm.run(module.get()))) { - std::cerr << "Failed to run export pass manager" << std::endl; + std::cerr << "Failed to run pass manager" << std::endl; return false; } diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 82f54f722..1848c4166 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -8,7 +8,6 @@ #include #include -#include #include namespace jlm::hls @@ -1135,7 +1134,9 @@ RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node) auto module = Builder_->create( Builder_->getUnknownLoc(), name, - circt::firrtl::ConventionAttr::get(Builder_->getContext(), Convention::Internal), + circt::firrtl::ConventionAttr::get( + Builder_->getContext(), + circt::firrtl::Convention::Internal), ports); auto body = module.getBodyBlock(); @@ -1218,7 +1219,7 @@ RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node) 2, 1, depth, - RUWAttr::New, + circt::firrtl::RUWAttr::New, memNames, "mem"); body->push_back(memory); @@ -2526,7 +2527,9 @@ RhlsToFirrtlConverter::MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * cir auto module = Builder_->create( Builder_->getUnknownLoc(), moduleName, - circt::firrtl::ConventionAttr::get(Builder_->getContext(), Convention::Internal), + circt::firrtl::ConventionAttr::get( + Builder_->getContext(), + circt::firrtl::Convention::Internal), ports); // Get the body of the module such that we can add contents to the module auto body = module.getBodyBlock(); @@ -2884,7 +2887,9 @@ RhlsToFirrtlConverter::MlirGen(const llvm::lambda::node * lambdaNode) auto module = Builder_->create( Builder_->getUnknownLoc(), moduleName, - circt::firrtl::ConventionAttr::get(Builder_->getContext(), Convention::Internal), + circt::firrtl::ConventionAttr::get( + Builder_->getContext(), + circt::firrtl::Convention::Internal), ports); // Get the body of the module such that we can add contents to the module auto body = module.getBodyBlock(); @@ -3567,7 +3572,7 @@ RhlsToFirrtlConverter::check_module(circt::firrtl::FModuleOp & module) { auto portName = module.getPortName(i); auto port = module.getArgument(i); - if (portName.startswith("o")) + if (portName.starts_with("o")) { // out port for (auto & use : port.getUses()) @@ -3861,7 +3866,9 @@ RhlsToFirrtlConverter::nodeToModule(const jlm::rvsdg::simple_node * node, bool m return Builder_->create( Builder_->getUnknownLoc(), name, - circt::firrtl::ConventionAttr::get(Builder_->getContext(), Convention::Internal), + circt::firrtl::ConventionAttr::get( + Builder_->getContext(), + circt::firrtl::Convention::Internal), ports); } @@ -4043,11 +4050,7 @@ RhlsToFirrtlConverter::WriteCircuitToFile(const circt::firrtl::CircuitOp circuit std::error_code EC; ::llvm::raw_fd_ostream output(fileName, EC); size_t targetLineLength = 100; - auto status = circt::firrtl::exportFIRFile( - module, - output, - targetLineLength, - circt::firrtl::FIRVersion::defaultFIRVersion()); + auto status = circt::firrtl::exportFIRFile(module, output, targetLineLength, DefaultFIRVersion_); if (status.failed()) { @@ -4078,11 +4081,7 @@ RhlsToFirrtlConverter::toString(const circt::firrtl::CircuitOp circuit) ::llvm::raw_string_ostream output(outputString); size_t targetLineLength = 100; - auto status = circt::firrtl::exportFIRFile( - module, - output, - targetLineLength, - circt::firrtl::FIRVersion::defaultFIRVersion()); + auto status = circt::firrtl::exportFIRFile(module, output, targetLineLength, DefaultFIRVersion_); if (status.failed()) throw std::logic_error("Exporting of firrtl failed"); diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 6cb46845c..4127e799a 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -23,6 +23,7 @@ #include #include +#include #include #include #include @@ -48,7 +49,8 @@ class RhlsToFirrtlConverter : public BaseHLS } RhlsToFirrtlConverter() - : Context_(std::make_unique<::mlir::MLIRContext>()) + : Context_(std::make_unique<::mlir::MLIRContext>()), + DefaultFIRVersion_{ 4, 0, 0 } { Context_->getOrLoadDialect(); Builder_ = std::make_unique<::mlir::OpBuilder>(Context_.get()); @@ -279,6 +281,7 @@ class RhlsToFirrtlConverter : public BaseHLS std::unique_ptr<::mlir::OpBuilder> Builder_; std::unique_ptr<::mlir::MLIRContext> Context_; + const circt::firrtl::FIRVersion DefaultFIRVersion_; }; } // namespace jlm::hls diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index c733ed81d..230a1c55b 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -25,8 +25,8 @@ class branch_op final : public jlm::rvsdg::simple_op private: branch_op(size_t nalternatives, const std::shared_ptr & type, bool loop) : jlm::rvsdg::simple_op( - { jlm::rvsdg::ctltype::Create(nalternatives), type }, - { nalternatives, type }), + { jlm::rvsdg::ctltype::Create(nalternatives), type }, + { nalternatives, type }), loop(loop) {} diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 43b2ddabf..630cfbdc0 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -179,6 +179,8 @@ convert_attribute_kind(const attribute::kind & kind) { attribute::kind::Builtin, ak::Builtin }, { attribute::kind::Cold, ak::Cold }, { attribute::kind::Convergent, ak::Convergent }, + { attribute::kind::CoroDestroyOnlyWhenComplete, ak::CoroDestroyOnlyWhenComplete }, + { attribute::kind::DeadOnUnwind, ak::DeadOnUnwind }, { attribute::kind::DisableSanitizerInstrumentation, ak::DisableSanitizerInstrumentation }, { attribute::kind::FnRetThunkExtern, ak::FnRetThunkExtern }, { attribute::kind::Hot, ak::Hot }, @@ -214,6 +216,7 @@ convert_attribute_kind(const attribute::kind & kind) { attribute::kind::NonNull, ak::NonNull }, { attribute::kind::NullPointerIsValid, ak::NullPointerIsValid }, { attribute::kind::OptForFuzzing, ak::OptForFuzzing }, + { attribute::kind::OptimizeForDebugging, ak::OptimizeForDebugging }, { attribute::kind::OptimizeForSize, ak::OptimizeForSize }, { attribute::kind::OptimizeNone, ak::OptimizeNone }, { attribute::kind::PresplitCoroutine, ak::PresplitCoroutine }, @@ -240,6 +243,7 @@ convert_attribute_kind(const attribute::kind & kind) { attribute::kind::SwiftError, ak::SwiftError }, { attribute::kind::SwiftSelf, ak::SwiftSelf }, { attribute::kind::WillReturn, ak::WillReturn }, + { attribute::kind::Writable, ak::Writable }, { attribute::kind::WriteOnly, ak::WriteOnly }, { attribute::kind::ZExt, ak::ZExt }, { attribute::kind::LastEnumAttr, ak::LastEnumAttr }, diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index 899668d07..e1f390d08 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -752,7 +752,7 @@ static bool IsVolatile(const ::llvm::Value & value) { auto constant = ::llvm::dyn_cast(&value); - JLM_ASSERT(constant != nullptr && constant->getType()->getBitWidth() == 1); + JLM_ASSERT(constant != nullptr && constant->getType()->getIntegerBitWidth() == 1); auto apInt = constant->getValue(); JLM_ASSERT(apInt.isZero() || apInt.isOne()); diff --git a/jlm/llvm/frontend/LlvmModuleConversion.cpp b/jlm/llvm/frontend/LlvmModuleConversion.cpp index d137cfdf2..d9a693ba8 100644 --- a/jlm/llvm/frontend/LlvmModuleConversion.cpp +++ b/jlm/llvm/frontend/LlvmModuleConversion.cpp @@ -92,6 +92,8 @@ ConvertAttributeKind(const ::llvm::Attribute::AttrKind & kind) { ak::Builtin, attribute::kind::Builtin }, { ak::Cold, attribute::kind::Cold }, { ak::Convergent, attribute::kind::Convergent }, + { ak::CoroDestroyOnlyWhenComplete, attribute::kind::CoroDestroyOnlyWhenComplete }, + { ak::DeadOnUnwind, attribute::kind::DeadOnUnwind }, { ak::DisableSanitizerInstrumentation, attribute::kind::DisableSanitizerInstrumentation }, { ak::FnRetThunkExtern, attribute::kind::FnRetThunkExtern }, { ak::Hot, attribute::kind::Hot }, @@ -127,6 +129,7 @@ ConvertAttributeKind(const ::llvm::Attribute::AttrKind & kind) { ak::NonNull, attribute::kind::NonNull }, { ak::NullPointerIsValid, attribute::kind::NullPointerIsValid }, { ak::OptForFuzzing, attribute::kind::OptForFuzzing }, + { ak::OptimizeForDebugging, attribute::kind::OptimizeForDebugging }, { ak::OptimizeForSize, attribute::kind::OptimizeForSize }, { ak::OptimizeNone, attribute::kind::OptimizeNone }, { ak::PresplitCoroutine, attribute::kind::PresplitCoroutine }, @@ -153,6 +156,7 @@ ConvertAttributeKind(const ::llvm::Attribute::AttrKind & kind) { ak::SwiftError, attribute::kind::SwiftError }, { ak::SwiftSelf, attribute::kind::SwiftSelf }, { ak::WillReturn, attribute::kind::WillReturn }, + { ak::Writable, attribute::kind::Writable }, { ak::WriteOnly, attribute::kind::WriteOnly }, { ak::ZExt, attribute::kind::ZExt }, { ak::LastEnumAttr, attribute::kind::LastEnumAttr }, diff --git a/jlm/llvm/ir/attribute.hpp b/jlm/llvm/ir/attribute.hpp index 6dab4a8c1..1fdb6ca3f 100644 --- a/jlm/llvm/ir/attribute.hpp +++ b/jlm/llvm/ir/attribute.hpp @@ -33,6 +33,8 @@ class attribute Builtin, Cold, Convergent, + CoroDestroyOnlyWhenComplete, + DeadOnUnwind, DisableSanitizerInstrumentation, FnRetThunkExtern, Hot, @@ -68,6 +70,7 @@ class attribute NonNull, NullPointerIsValid, OptForFuzzing, + OptimizeForDebugging, OptimizeForSize, OptimizeNone, PresplitCoroutine, @@ -94,6 +97,7 @@ class attribute SwiftError, SwiftSelf, WillReturn, + Writable, WriteOnly, ZExt, LastEnumAttr, diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 0e1419ff4..74132a3b9 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -676,7 +676,8 @@ create_load_normal_form( return new jlm::llvm::load_normal_form(opclass, parent, graph); } -static void __attribute__((constructor)) register_normal_form() +static void __attribute__((constructor)) +register_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::llvm::LoadNonVolatileOperation), diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index a4ff4e43c..d472269a3 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -194,9 +194,9 @@ class LoadVolatileOperation final : public LoadOperation size_t numMemoryStates, size_t alignment) : LoadOperation( - CreateOperandTypes(numMemoryStates), - CreateResultTypes(std::move(loadedType), numMemoryStates), - alignment) + CreateOperandTypes(numMemoryStates), + CreateResultTypes(std::move(loadedType), numMemoryStates), + alignment) {} bool @@ -444,9 +444,9 @@ class LoadNonVolatileOperation final : public LoadOperation size_t numMemoryStates, size_t alignment) : LoadOperation( - CreateOperandTypes(numMemoryStates), - CreateResultTypes(std::move(loadedType), numMemoryStates), - alignment) + CreateOperandTypes(numMemoryStates), + CreateResultTypes(std::move(loadedType), numMemoryStates), + alignment) {} bool diff --git a/jlm/llvm/ir/operators/MemCpy.hpp b/jlm/llvm/ir/operators/MemCpy.hpp index f5d33849b..28b49af37 100644 --- a/jlm/llvm/ir/operators/MemCpy.hpp +++ b/jlm/llvm/ir/operators/MemCpy.hpp @@ -74,8 +74,8 @@ class MemCpyNonVolatileOperation final : public MemCpyOperation MemCpyNonVolatileOperation(std::shared_ptr lengthType, size_t numMemoryStates) : MemCpyOperation( - CreateOperandTypes(std::move(lengthType), numMemoryStates), - CreateResultTypes(numMemoryStates)) + CreateOperandTypes(std::move(lengthType), numMemoryStates), + CreateResultTypes(numMemoryStates)) {} bool @@ -153,8 +153,8 @@ class MemCpyVolatileOperation final : public MemCpyOperation MemCpyVolatileOperation(std::shared_ptr lengthType, size_t numMemoryStates) : MemCpyOperation( - CreateOperandTypes(std::move(lengthType), numMemoryStates), - CreateResultTypes(numMemoryStates)) + CreateOperandTypes(std::move(lengthType), numMemoryStates), + CreateResultTypes(numMemoryStates)) {} bool diff --git a/jlm/llvm/ir/operators/MemoryStateOperations.hpp b/jlm/llvm/ir/operators/MemoryStateOperations.hpp index 184b82f5b..4a9777664 100644 --- a/jlm/llvm/ir/operators/MemoryStateOperations.hpp +++ b/jlm/llvm/ir/operators/MemoryStateOperations.hpp @@ -21,8 +21,8 @@ class MemoryStateOperation : public rvsdg::simple_op protected: MemoryStateOperation(size_t numOperands, size_t numResults) : simple_op( - { numOperands, MemoryStateType::Create() }, - { numResults, MemoryStateType::Create() }) + { numOperands, MemoryStateType::Create() }, + { numResults, MemoryStateType::Create() }) {} }; diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 10a1698bf..04eb919c6 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -472,7 +472,8 @@ create_store_normal_form( return new jlm::llvm::store_normal_form(opclass, parent, graph); } -static void __attribute__((constructor)) register_normal_form() +static void __attribute__((constructor)) +register_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::llvm::StoreNonVolatileOperation), diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 24dea6cac..9ec691d20 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -148,9 +148,9 @@ class StoreNonVolatileOperation final : public StoreOperation size_t numMemoryStates, size_t alignment) : StoreOperation( - CreateOperandTypes(std::move(storedType), numMemoryStates), - { numMemoryStates, MemoryStateType::Create() }, - alignment) + CreateOperandTypes(std::move(storedType), numMemoryStates), + { numMemoryStates, MemoryStateType::Create() }, + alignment) {} bool @@ -414,9 +414,9 @@ class StoreVolatileOperation final : public StoreOperation size_t numMemoryStates, size_t alignment) : StoreOperation( - CreateOperandTypes(std::move(storedType), numMemoryStates), - CreateResultTypes(numMemoryStates), - alignment) + CreateOperandTypes(std::move(storedType), numMemoryStates), + CreateResultTypes(numMemoryStates), + alignment) {} bool diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index 2f564e96e..b5b7ebcc8 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -37,8 +37,8 @@ class RegionAwareMemoryNodeProvider::Statistics final : public util::Statistics const RvsdgModule & rvsdgModule, const PointsToGraph & pointsToGraph) : util::Statistics( - Statistics::Id::RegionAwareMemoryNodeProvisioning, - rvsdgModule.SourceFileName()), + Statistics::Id::RegionAwareMemoryNodeProvisioning, + rvsdgModule.SourceFileName()), StatisticsCollector_(statisticsCollector) { if (!IsDemanded()) diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index ba392283c..99f3fbbe4 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -463,7 +463,8 @@ flattened_binary_operation_get_default_normal_form_( return new jlm::rvsdg::flattened_binary_normal_form(operator_class, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::binary_op), diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 7a5dc0193..1b34156ad 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -238,7 +238,8 @@ get_default_normal_form( return new concat_normal_form(parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::bitconcat_op), diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 6c28a9992..d73eb15f9 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -404,7 +404,8 @@ gamma_node_get_default_normal_form_( return new jlm::rvsdg::gamma_normal_form(operator_class, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::gamma_op), diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 61d2b25ef..78d9922dd 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -133,7 +133,8 @@ node_get_default_normal_form_( return new jlm::rvsdg::node_normal_form(operator_class, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::operation), diff --git a/jlm/rvsdg/nullary.cpp b/jlm/rvsdg/nullary.cpp index a3e44f1ca..52d11d30a 100644 --- a/jlm/rvsdg/nullary.cpp +++ b/jlm/rvsdg/nullary.cpp @@ -43,7 +43,8 @@ nullary_operation_get_default_normal_form_( return new jlm::rvsdg::nullary_normal_form(operator_class, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::nullary_op), diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index da73c254f..ce181b579 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -118,7 +118,8 @@ get_default_normal_form( return new jlm::rvsdg::simple_normal_form(operator_class, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::simple_op), diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 8d3b7d471..d50f57e2c 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -209,7 +209,8 @@ create_mux_normal_form( return new jlm::rvsdg::mux_normal_form(opclass, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::mux_op), diff --git a/jlm/rvsdg/structural-normal-form.cpp b/jlm/rvsdg/structural-normal-form.cpp index 8a2177fc8..843b17fb6 100644 --- a/jlm/rvsdg/structural-normal-form.cpp +++ b/jlm/rvsdg/structural-normal-form.cpp @@ -30,7 +30,8 @@ get_default_normal_form( return new jlm::rvsdg::structural_normal_form(operator_class, parent, graph); } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::structural_op), diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 6dd7c79e6..51e2379bf 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -109,7 +109,8 @@ unary_operation_get_default_normal_form_( return nf; } -static void __attribute__((constructor)) register_node_normal_form(void) +static void __attribute__((constructor)) +register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::unary_op), diff --git a/scripts/build-circt.sh b/scripts/build-circt.sh index ccefed813..e6b8d5150 100755 --- a/scripts/build-circt.sh +++ b/scripts/build-circt.sh @@ -1,7 +1,7 @@ #!/bin/bash set -eu -GIT_COMMIT=debf1ed774c2bbdbfc8e7bc987a21f72e8f08f65 +GIT_COMMIT=2dc8240d91a0f993d616b152aa4d7520156862fe # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" @@ -10,7 +10,7 @@ CIRCT_BUILD=${JLM_ROOT_DIR}/build-circt CIRCT_INSTALL=${JLM_ROOT_DIR}/usr LLVM_LIT_PATH=/usr/local/bin/lit -LLVM_VERSION=17 +LLVM_VERSION=18 LLVM_CONFIG_BIN=llvm-config-${LLVM_VERSION} function commit() diff --git a/scripts/build-mlir.sh b/scripts/build-mlir.sh index 38cbab79b..7a82b11da 100755 --- a/scripts/build-mlir.sh +++ b/scripts/build-mlir.sh @@ -1,7 +1,7 @@ #!/bin/bash set -eu -GIT_COMMIT=ab630d5a881a0e8fc5bdfa63a5984186fa9096c0 +GIT_COMMIT=90f30f1112906f2868fb42a6fa1a20fb8a20e03b # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" @@ -9,7 +9,7 @@ JLM_ROOT_DIR="$(realpath "${SCRIPT_DIR}/..")" MLIR_BUILD=${JLM_ROOT_DIR}/build-mlir MLIR_INSTALL=${JLM_ROOT_DIR}/usr -LLVM_VERSION=17 +LLVM_VERSION=18 LLVM_CONFIG_BIN=llvm-config-${LLVM_VERSION} function commit() diff --git a/scripts/run-llvm-test-suite.sh b/scripts/run-llvm-test-suite.sh index 3bf70b1f8..082277e02 100755 --- a/scripts/run-llvm-test-suite.sh +++ b/scripts/run-llvm-test-suite.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/llvm-test-suite.git -GIT_COMMIT=dadc9d4760fe36c734cc7bd9d460930b4f020513 +GIT_COMMIT=ebdef97621d4e024dca3ec0095de958e6ccb3ad8 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" diff --git a/tests/jlm/llvm/frontend/llvm/LoadTests.cpp b/tests/jlm/llvm/frontend/llvm/LoadTests.cpp index 6d946a371..bbb7f2b72 100644 --- a/tests/jlm/llvm/frontend/llvm/LoadTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/LoadTests.cpp @@ -25,7 +25,7 @@ LoadConversion() std::unique_ptr llvmModule(new Module("module", context)); auto int64Type = Type::getInt64Ty(context); - auto pointerType = Type::getInt64PtrTy(context); + auto pointerType = llvm::PointerType::getUnqual(context); auto functionType = FunctionType::get(int64Type, ArrayRef({ pointerType }), false); auto function = diff --git a/tests/jlm/llvm/frontend/llvm/MemCpyTests.cpp b/tests/jlm/llvm/frontend/llvm/MemCpyTests.cpp index 6231bad17..3cd9d30d5 100644 --- a/tests/jlm/llvm/frontend/llvm/MemCpyTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/MemCpyTests.cpp @@ -25,7 +25,7 @@ MemCpyConversion() std::unique_ptr llvmModule(new Module("module", context)); auto int64Type = Type::getInt64Ty(context); - auto pointerType = Type::getInt64PtrTy(context); + auto pointerType = llvm::PointerType::getUnqual(context); auto voidType = Type::getVoidTy(context); auto functionType = diff --git a/tests/jlm/llvm/frontend/llvm/StoreTests.cpp b/tests/jlm/llvm/frontend/llvm/StoreTests.cpp index 60461f872..42eb08295 100644 --- a/tests/jlm/llvm/frontend/llvm/StoreTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/StoreTests.cpp @@ -25,7 +25,7 @@ StoreConversion() std::unique_ptr llvmModule(new Module("module", context)); auto int64Type = Type::getInt64Ty(context); - auto pointerType = Type::getInt64PtrTy(context); + auto pointerType = llvm::PointerType::getUnqual(context); auto voidType = Type::getVoidTy(context); auto functionType = From 4b66762a0648c03f69de99759cff6847bd1e8efb Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 23 Aug 2024 08:56:02 +0200 Subject: [PATCH 052/170] Change LLVM version in README.md (#592) We forgot to bump the LLVM version to 18. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c84e4b46e..35c089911 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Jlm is an experimental compiler/optimizer that consumes and produces LLVM IR. It Regionalized Value State Dependence Graph (RVSDG) as intermediate representation for optimizations. ## Dependencies -* Clang/LLVM 17 +* Clang/LLVM 18 * Doxygen 1.9.1 ### HLS dependencies From 07206bcb7676c5e5c767ee357189a5e009e51ac2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 23 Aug 2024 11:58:48 +0200 Subject: [PATCH 053/170] Add support for dynamically configurable optimizations (#593) --- jlm/llvm/opt/OptimizationSequence.hpp | 6 +- jlm/tooling/CommandLine.cpp | 75 +++++++++---------- jlm/tooling/CommandLine.hpp | 4 +- .../tooling/TestJlmOptCommandLineParser.cpp | 2 +- 4 files changed, 43 insertions(+), 44 deletions(-) diff --git a/jlm/llvm/opt/OptimizationSequence.hpp b/jlm/llvm/opt/OptimizationSequence.hpp index 428ee1629..4dc957d50 100644 --- a/jlm/llvm/opt/OptimizationSequence.hpp +++ b/jlm/llvm/opt/OptimizationSequence.hpp @@ -21,7 +21,7 @@ class OptimizationSequence final : public optimization ~OptimizationSequence() noexcept override; - explicit OptimizationSequence(std::vector optimizations) + explicit OptimizationSequence(std::vector> optimizations) : Optimizations_(std::move(optimizations)) {} @@ -32,14 +32,14 @@ class OptimizationSequence final : public optimization CreateAndRun( RvsdgModule & rvsdgModule, util::StatisticsCollector & statisticsCollector, - std::vector optimizations) + std::vector> optimizations) { OptimizationSequence sequentialApplication(std::move(optimizations)); sequentialApplication.run(rvsdgModule, statisticsCollector); } private: - std::vector Optimizations_; + std::vector> Optimizations_; }; } diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index b85389066..fb908ba86 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -98,10 +98,10 @@ JlmOptCommandLineOptions::Reset() noexcept OptimizationIds_.clear(); } -std::vector +std::vector> JlmOptCommandLineOptions::GetOptimizations() const noexcept { - std::vector optimizations; + std::vector> optimizations; optimizations.reserve(OptimizationIds_.size()); for (auto & optimizationId : OptimizationIds_) @@ -223,46 +223,45 @@ JlmOptCommandLineOptions::ToCommandLineArgument(OutputFormat outputFormat) return mapping.at(outputFormat).data(); } -llvm::optimization * -JlmOptCommandLineOptions::GetOptimization(enum OptimizationId id) +std::unique_ptr +JlmOptCommandLineOptions::GetOptimization(enum OptimizationId optimizationId) { using Andersen = llvm::aa::Andersen; using Steensgaard = llvm::aa::Steensgaard; - using AgnosticMNP = llvm::aa::AgnosticMemoryNodeProvider; - using RegionAwareMNP = llvm::aa::RegionAwareMemoryNodeProvider; - static llvm::aa::AliasAnalysisStateEncoder andersenAgnostic; - static llvm::aa::AliasAnalysisStateEncoder andersenRegionAware; - static llvm::aa::AliasAnalysisStateEncoder steensgaardAgnostic; - static llvm::aa::AliasAnalysisStateEncoder steensgaardRegionAware; - static llvm::cne commonNodeElimination; - static llvm::DeadNodeElimination deadNodeElimination; - static llvm::fctinline functionInlining; - static llvm::InvariantValueRedirection invariantValueRedirection; - static llvm::pullin nodePullIn; - static llvm::pushout nodePushOut; - static llvm::tginversion thetaGammaInversion; - static llvm::loopunroll loopUnrolling(4); - static llvm::nodereduction nodeReduction; - - static std::unordered_map map( - { { OptimizationId::AAAndersenAgnostic, &andersenAgnostic }, - { OptimizationId::AAAndersenRegionAware, &andersenRegionAware }, - { OptimizationId::AASteensgaardAgnostic, &steensgaardAgnostic }, - { OptimizationId::AASteensgaardRegionAware, &steensgaardRegionAware }, - { OptimizationId::CommonNodeElimination, &commonNodeElimination }, - { OptimizationId::DeadNodeElimination, &deadNodeElimination }, - { OptimizationId::FunctionInlining, &functionInlining }, - { OptimizationId::InvariantValueRedirection, &invariantValueRedirection }, - { OptimizationId::LoopUnrolling, &loopUnrolling }, - { OptimizationId::NodePullIn, &nodePullIn }, - { OptimizationId::NodePushOut, &nodePushOut }, - { OptimizationId::NodeReduction, &nodeReduction }, - { OptimizationId::ThetaGammaInversion, &thetaGammaInversion } }); - - if (map.find(id) != map.end()) - return map[id]; + using AgnosticMnp = llvm::aa::AgnosticMemoryNodeProvider; + using RegionAwareMnp = llvm::aa::RegionAwareMemoryNodeProvider; - throw util::error("Unknown optimization identifier"); + switch (optimizationId) + { + case OptimizationId::AAAndersenAgnostic: + return std::make_unique>(); + case OptimizationId::AAAndersenRegionAware: + return std::make_unique>(); + case OptimizationId::AASteensgaardAgnostic: + return std::make_unique>(); + case OptimizationId::AASteensgaardRegionAware: + return std::make_unique>(); + case OptimizationId::CommonNodeElimination: + return std::make_unique(); + case OptimizationId::DeadNodeElimination: + return std::make_unique(); + case OptimizationId::FunctionInlining: + return std::make_unique(); + case OptimizationId::InvariantValueRedirection: + return std::make_unique(); + case OptimizationId::LoopUnrolling: + return std::make_unique(4); + case OptimizationId::NodePullIn: + return std::make_unique(); + case OptimizationId::NodePushOut: + return std::make_unique(); + case OptimizationId::NodeReduction: + return std::make_unique(); + case OptimizationId::ThetaGammaInversion: + return std::make_unique(); + default: + JLM_UNREACHABLE("Unhandled optimization id."); + } } const util::BijectiveMap & diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index 189d430e6..2a6b8ce63 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -135,7 +135,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions return OptimizationIds_; } - [[nodiscard]] std::vector + [[nodiscard]] std::vector> GetOptimizations() const noexcept; static OptimizationId @@ -156,7 +156,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions static const char * ToCommandLineArgument(OutputFormat outputFormat); - static llvm::optimization * + static std::unique_ptr GetOptimization(enum OptimizationId optimizationId); static std::unique_ptr diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index e82262691..c38ca1d0a 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -74,7 +74,7 @@ TestOptimizationIdToOptimizationTranslation() { auto optimizationId = static_cast(n); - // throws exception on failure + // terminates on unhandled optimization id JlmOptCommandLineOptions::GetOptimization(optimizationId); } } From d48bdd8e9ca2ad7388198245d8b12c85b6195595 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 23 Aug 2024 13:19:30 +0200 Subject: [PATCH 054/170] make ClangFormat check HLS, and make it optional to specify CIRCT and MLIR install paths in configure.sh (#591) This PR started as a fix of formatting that changed with clang-format 18. I also noticed that the constructor had an unnecessary `const`, so I fixed that. Looking into why this was not caught by the ClangFormat workflow, I saw that the `configure.sh` script's `--enable-mlir` "consumed" the `--enable-hls`, causing hls to not be enabled, which affects the set of files found by `make format-dry-run`. In an attempt to make this not happen in the future, I made `configure.sh` complain and fail if the path you pass to `--enable-mlir` or `--enable-hls` is not a folder that exists. This forces the user to set up the folder first, but it should be helpful in preventing this type of mistake. Formatting is a bit of a special case, as we want to "enable" hls and mlir without needing to install either, so I passed in `/usr` as the path, since that is a folder we know exists. --- .github/actions/BuildJlm/action.yml | 4 ++-- README.md | 2 +- configure.sh | 24 ++++++++++++------- .../rhls2firrtl/verilator-harness-hls.hpp | 5 ++-- 4 files changed, 21 insertions(+), 14 deletions(-) diff --git a/.github/actions/BuildJlm/action.yml b/.github/actions/BuildJlm/action.yml index 533706a8b..9ffb78c20 100644 --- a/.github/actions/BuildJlm/action.yml +++ b/.github/actions/BuildJlm/action.yml @@ -62,10 +62,10 @@ runs: run: | export JLM_CONFIGURE_ARGUMENTS="--target release --enable-asserts" if [[ "${{inputs.enable-hls}}" == "true" ]]; then - JLM_CONFIGURE_ARGUMENTS="$JLM_CONFIGURE_ARGUMENTS --enable-hls ${{ github.workspace }}/build-circt/circt" + JLM_CONFIGURE_ARGUMENTS="$JLM_CONFIGURE_ARGUMENTS --enable-hls=${{ github.workspace }}/build-circt/circt" fi if [[ "${{inputs.enable-mlir}}" == "true" ]]; then - JLM_CONFIGURE_ARGUMENTS="$JLM_CONFIGURE_ARGUMENTS --enable-mlir ${{ github.workspace }}/lib/mlir-rvsdg" + JLM_CONFIGURE_ARGUMENTS="$JLM_CONFIGURE_ARGUMENTS --enable-mlir=${{ github.workspace }}/lib/mlir-rvsdg" fi if [[ "${{inputs.enable-coverage}}" == "true" ]]; then JLM_CONFIGURE_ARGUMENTS="$JLM_CONFIGURE_ARGUMENTS --enable-coverage" diff --git a/README.md b/README.md index 35c089911..4d8e64244 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ CIRCT and the HLS backend can be setup with the following commands: ``` ./scripts/build-circt.sh --build-path --install-path -./configure --enable-hls +./configure --enable-hls= ``` ## Publications diff --git a/configure.sh b/configure.sh index a60e70547..e7f2ef544 100755 --- a/configure.sh +++ b/configure.sh @@ -9,10 +9,10 @@ ENABLE_ASSERTS="no" LLVM_CONFIG_BIN="llvm-config-"${LLVM_VERSION} ENABLE_COVERAGE="no" ENABLE_HLS= -CIRCT_PATH= +CIRCT_PATH="./usr" CIRCT_LDFLAGS= ENABLE_MLIR= -MLIR_PATH= +MLIR_PATH="./usr" MLIR_LDFLAGS= function usage() @@ -23,12 +23,12 @@ function usage() echo " --target MODE Sets the build mode. Supported build modes are" echo " 'debug' and 'release'. [${TARGET}]" echo " --enable-asserts Enables asserts." - echo " --enable-hls PATH Enable the HLS backend, and sets the path to" - echo " CIRCT, which the backend depends on." + echo " --enable-hls[=PATH] Enable the HLS backend, and optionally sets a custom" + echo " path to the CIRCT installation. [${CIRCT_PATH}]" echo " --llvm-config PATH The llvm-config script used to determine up llvm" echo " build dependencies. [${LLVM_CONFIG_BIN}]" - echo " --enable-mlir PATH Sets the path to the MLIR RVSDG Dialect and enables" - echo " building the MLIR backend and frontend. [${MLIR_PATH}]" + echo " --enable-mlir[=PATH] Enables the MLIR Dialect, and optionally sets a custom" + echo " path to the MLIR dialect installation. [${MLIR_PATH}]" echo " --enable-coverage Enable test coverage computation target." echo " --help Prints this message and stops." echo @@ -47,7 +47,10 @@ while [[ "$#" -ge 1 ]] ; do --enable-hls) ENABLE_HLS="yes" shift - CIRCT_PATH="$1" + ;; + --enable-hls=*) + ENABLE_HLS="yes" + CIRCT_PATH="${1#*=}" shift ;; --enable-asserts) @@ -60,16 +63,19 @@ while [[ "$#" -ge 1 ]] ; do shift ;; --enable-mlir) + ENABLE_MLIR="yes" shift - MLIR_PATH="$1" + ;; + --enable-mlir=*) ENABLE_MLIR="yes" + MLIR_PATH="${1#*=}" shift ;; --enable-coverage) ENABLE_COVERAGE="yes" shift ;; - --help) + -*) # Any unknown option triggers the usage text. This includes --help and -h usage >&2 exit 1 ;; diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp index db2a306ae..b93041ed9 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp @@ -30,8 +30,9 @@ class VerilatorHarnessHLS : public BaseHLS * /param verilogFile The filename to the Verilog file that is to be used together with the * generated harness as input to Verilator. */ - VerilatorHarnessHLS(const util::filepath verilogFile) - : VerilogFile_(std::move(verilogFile)){}; + explicit VerilatorHarnessHLS(util::filepath verilogFile) + : VerilogFile_(std::move(verilogFile)) + {} private: const util::filepath VerilogFile_; From bbd5d966875e864bf5047b6b24407b450838da09 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Fri, 23 Aug 2024 14:45:35 +0200 Subject: [PATCH 055/170] [AndersenAgnostic] Add the topological worklist solver to Andersen (#590) Adds the dummy worklist, which only remembers if /anything/ has been pushed. When given this "worklist", the solver instead performs topological sweeps across all work items. --- jlm/llvm/opt/alias-analyses/Andersen.cpp | 6 ++ .../opt/alias-analyses/PointerObjectSet.cpp | 69 ++++++++++++++++- .../opt/alias-analyses/PointerObjectSet.hpp | 16 ++++ jlm/util/Worklist.hpp | 74 ++++++++++++++++--- tests/jlm/util/TestWorklist.cpp | 21 ++++++ 5 files changed, 175 insertions(+), 11 deletions(-) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 40468e457..12a6f08a3 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -117,6 +117,8 @@ Andersen::Configuration::GetAllConfigurations() PickOnlineCycleDetection(config); config.SetWorklistSolverPolicy(Policy::FirstInFirstOut); PickOnlineCycleDetection(config); + config.SetWorklistSolverPolicy(Policy::TopologicalSort); + PickDifferencePropagation(config); // With topo, skip all cycle detection }; auto PickOfflineNormalization = [&](Configuration config) { @@ -180,6 +182,7 @@ class Andersen::Statistics final : public util::Statistics "#WorklistSolverWorkItemsPopped"; static constexpr const char * NumWorklistSolverWorkItemsNewPointees_ = "#WorklistSolverWorkItemsNewPointees"; + static constexpr const char * NumTopologicalWorklistSweeps_ = "#TopologicalWorklistSweeps"; // Online technique statistics static constexpr const char * NumOnlineCyclesDetected_ = "#OnlineCyclesDetected"; @@ -328,6 +331,9 @@ class Andersen::Statistics final : public util::Statistics AddMeasurement(NumWorklistSolverWorkItemsPopped_, statistics.NumWorkItemsPopped); AddMeasurement(NumWorklistSolverWorkItemsNewPointees_, statistics.NumWorkItemNewPointees); + if (statistics.NumTopologicalWorklistSweeps) + AddMeasurement(NumTopologicalWorklistSweeps_, *statistics.NumTopologicalWorklistSweeps); + if (statistics.NumOnlineCyclesDetected) AddMeasurement(NumOnlineCyclesDetected_, *statistics.NumOnlineCyclesDetected); diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index 405660be7..0400416e7 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -1961,8 +1961,62 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) FlushNewSupersetEdges(); }; - while (worklist.HasMoreWorkItems()) - HandleWorkItem(worklist.PopWorkItem()); + // The observer worklist only contains one bit of state: + // "has anything been pushed since last reset?" + // It does not provide an iteration order, so if any work item need to be revisited, + // we do a topological traversal over all work items instead, called a "sweep". + // Performing topological sorting also detects all cycles, which are unified away. + constexpr bool useTopologicalTraversal = + std::is_same_v>; + + if constexpr (useTopologicalTraversal) + { + std::vector sccIndex; + std::vector topologicalOrder; + + statistics.NumTopologicalWorklistSweeps = 0; + + while (worklist.HasPushBeenMade()) + { + (*statistics.NumTopologicalWorklistSweeps)++; + worklist.ResetPush(); + + // First perform a topological sort of the entire subset graph, with respect to simple edges + util::FindStronglyConnectedComponents( + Set_.NumPointerObjects(), + GetSupersetEdgeSuccessors, + sccIndex, + topologicalOrder); + + // Visit all nodes in topological order + // cycles will result in neighbouring nodes in the topologicalOrder sharing sccIndex + for (size_t i = 0; i < topologicalOrder.size(); i++) + { + const auto node = topologicalOrder[i]; + const auto nextNodeIndex = i + 1; + if (nextNodeIndex < topologicalOrder.size()) + { + auto & nextNode = topologicalOrder[nextNodeIndex]; + if (sccIndex[node] == sccIndex[nextNode]) + { + // This node is in a cycle with the next node, unify them + nextNode = UnifyPointerObjects(node, nextNode); + continue; + } + } + + // Otherwise handle the work item (only unification roots) + if (Set_.IsUnificationRoot(node)) + HandleWorkItem(node); + } + } + } + else + { + // The worklist is a normal worklist + while (worklist.HasMoreWorkItems()) + HandleWorkItem(worklist.PopWorkItem()); + } if constexpr (EnableOnlineCycleDetection) { @@ -2005,6 +2059,12 @@ PointerObjectConstraintSet::SolveUsingWorklist( constexpr bool vDifferencePropagation = decltype(tDifferencePropagation)::value; constexpr bool vPreferImplicitPointees = decltype(tPreferImplicitPointees)::value; + if constexpr ( + std::is_same_v> + && (vOnlineCycleDetection || vHybridCycleDetection || vLazyCycleDetection)) + { + JLM_UNREACHABLE("Can not enable online, hybrid or lazy cycle detection with the topo policy"); + } if constexpr (vOnlineCycleDetection && (vHybridCycleDetection || vLazyCycleDetection)) { JLM_UNREACHABLE("Can not enable hybrid or lazy cycle detection with online cycle detection"); @@ -2026,6 +2086,7 @@ PointerObjectConstraintSet::SolveUsingWorklist( std::variant< typename util::LrfWorklist *, typename util::TwoPhaseLrfWorklist *, + typename util::ObserverWorklist *, typename util::LifoWorklist *, typename util::FifoWorklist *> policyVariant; @@ -2034,6 +2095,8 @@ PointerObjectConstraintSet::SolveUsingWorklist( policyVariant = (util::LrfWorklist *)nullptr; else if (policy == WorklistSolverPolicy::TwoPhaseLeastRecentlyFired) policyVariant = (util::TwoPhaseLrfWorklist *)nullptr; + else if (policy == WorklistSolverPolicy::TopologicalSort) + policyVariant = (util::ObserverWorklist *)nullptr; else if (policy == WorklistSolverPolicy::LastInFirstOut) policyVariant = (util::LifoWorklist *)nullptr; else if (policy == WorklistSolverPolicy::FirstInFirstOut) @@ -2090,6 +2153,8 @@ PointerObjectConstraintSet::WorklistSolverPolicyToString(WorklistSolverPolicy po return "LeastRecentlyFired"; case WorklistSolverPolicy::TwoPhaseLeastRecentlyFired: return "TwoPhaseLeastRecentlyFired"; + case WorklistSolverPolicy::TopologicalSort: + return "TopologicalSort"; case WorklistSolverPolicy::FirstInFirstOut: return "FirstInFirstOut"; case WorklistSolverPolicy::LastInFirstOut: diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index 509530d19..ff303dfbd 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -784,6 +784,16 @@ class PointerObjectConstraintSet final */ TwoPhaseLeastRecentlyFired, + /** + * Not a real worklist policy. + * For each "sweep", all nodes are visited in topological order. + * Any cycles found during topological sorting are eliminated. + * This continues until a full sweep has been done with no attempts at pushing to the worklist. + * Described by: + * Pearce 2007: "Efficient field-sensitive pointer analysis of C" + */ + TopologicalSort, + /** * A worklist policy based on a queue. * @see jlm::util::FifoWorklist @@ -825,6 +835,12 @@ class PointerObjectConstraintSet final */ size_t NumWorkItemNewPointees{}; + /** + * The number of times the topological worklist orders the whole set of work items + * and visits them all in topological order. + */ + std::optional NumTopologicalWorklistSweeps; + /** * The number of cycles detected by online cycle detection, * and number of unifications made to eliminate the cycles, diff --git a/jlm/util/Worklist.hpp b/jlm/util/Worklist.hpp index c6e48af39..a3ed2c159 100644 --- a/jlm/util/Worklist.hpp +++ b/jlm/util/Worklist.hpp @@ -45,7 +45,7 @@ class Worklist * @return true if there are work items left to be visited */ [[nodiscard]] virtual bool - HasMoreWorkItems() = 0; + HasMoreWorkItems() const noexcept = 0; /** * Removes one work item from the worklist. @@ -78,8 +78,8 @@ class LifoWorklist final : public Worklist LifoWorklist() = default; - bool - HasMoreWorkItems() override + [[nodiscard]] bool + HasMoreWorkItems() const noexcept override { return !WorkItems_.empty(); } @@ -123,8 +123,8 @@ class FifoWorklist final : public Worklist FifoWorklist() = default; - bool - HasMoreWorkItems() override + [[nodiscard]] bool + HasMoreWorkItems() const noexcept override { return !WorkItems_.empty(); } @@ -173,8 +173,8 @@ class LrfWorklist final : public Worklist LrfWorklist() = default; - bool - HasMoreWorkItems() override + [[nodiscard]] bool + HasMoreWorkItems() const noexcept override { return !WorkItems_.empty(); } @@ -240,8 +240,8 @@ class TwoPhaseLrfWorklist final : public Worklist TwoPhaseLrfWorklist() = default; - bool - HasMoreWorkItems() override + [[nodiscard]] bool + HasMoreWorkItems() const noexcept override { return !Current_.empty() || !Next_.empty(); } @@ -294,6 +294,62 @@ class TwoPhaseLrfWorklist final : public Worklist std::unordered_map LastFire_; }; +/** + * A fake worklist that only holds a single bit of information: + * "Has any item been pushed since the last reset?" + * Used to implement the Topological worklist policy, which is not technically a worklist policy + * @tparam T the type of the work items. + * @see Worklist + */ +template +class ObserverWorklist final : public Worklist +{ +public: + ~ObserverWorklist() override = default; + + ObserverWorklist() = default; + + [[nodiscard]] bool + HasMoreWorkItems() const noexcept override + { + JLM_UNREACHABLE("Dummy worklist"); + } + + T + PopWorkItem() override + { + JLM_UNREACHABLE("Dummy worklist"); + } + + void + PushWorkItem(T item [[maybe_unused]]) override + { + PushMade_ = true; + } + + /** + * @return true if the PushWorkItem method has been called since the last time + * ResetPush() was called. + */ + [[nodiscard]] bool + HasPushBeenMade() const noexcept + { + return PushMade_; + } + + /** + * Makes the dummy worklist forget about being pushed to. + */ + void + ResetPush() + { + PushMade_ = false; + } + +private: + bool PushMade_ = false; +}; + } #endif // JLM_UTIL_WORKLIST_HPP diff --git a/tests/jlm/util/TestWorklist.cpp b/tests/jlm/util/TestWorklist.cpp index a888f60fa..7286a4d58 100644 --- a/tests/jlm/util/TestWorklist.cpp +++ b/tests/jlm/util/TestWorklist.cpp @@ -133,3 +133,24 @@ TestTwoPhaseLrfWorklist() JLM_UNIT_TEST_REGISTER( "jlm/llvm/opt/alias-analyses/TestWorklist-TestTwoPhaseLrfWorklist", TestTwoPhaseLrfWorklist) + +static int +TestObserverWorklist() +{ + jlm::util::ObserverWorklist wl; + assert(!wl.HasPushBeenMade()); + wl.PushWorkItem(7); + assert(wl.HasPushBeenMade()); + wl.ResetPush(); + assert(!wl.HasPushBeenMade()); + wl.ResetPush(); + assert(!wl.HasPushBeenMade()); + wl.PushWorkItem(7); + assert(wl.HasPushBeenMade()); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/alias-analyses/TestWorklist-TestObserverWorklist", + TestObserverWorklist) From 44395a742eeee6f78d55558065d810cb65424d44 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 23 Aug 2024 15:56:41 +0200 Subject: [PATCH 056/170] Add EntryArgument for HLS loop (#594) This PR adds an argument subclass for the entry arguments of the HLS loop. This is necessary in order to make the `rvsdg::argument` class abstract. Co-authored-by: HKrogstie --- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 8 +++--- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 +-- jlm/hls/ir/hls.cpp | 20 +++++++++----- jlm/hls/ir/hls.hpp | 36 +++++++++++++++++++++++++ 4 files changed, 56 insertions(+), 12 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index c2e2c4204..429a8b4e4 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -248,7 +248,7 @@ decouple_load( auto new_in = jlm::rvsdg::structural_input::create(new_loop, arg->input()->origin(), arg->Type()); smap.insert(arg->input(), new_in); - new_arg = jlm::rvsdg::argument::create(new_loop->subregion(), new_in, arg->Type()); + new_arg = &EntryArgument::Create(*new_loop->subregion(), *new_in, arg->Type()); } smap.insert(arg, new_arg); continue; @@ -364,9 +364,9 @@ decouple_load( auto buf = buffer_op::create(*dload_out[0], 2, true)[0]; // replace data output of loadNode auto old_data_in = jlm::rvsdg::structural_input::create(loopNode, buf, dload_out[0]->Type()); - auto old_data_arg = - jlm::rvsdg::argument::create(loopNode->subregion(), old_data_in, dload_out[0]->Type()); - loadNode->output(0)->divert_users(old_data_arg); + auto & old_data_arg = + EntryArgument::Create(*loopNode->subregion(), *old_data_in, dload_out[0]->Type()); + loadNode->output(0)->divert_users(&old_data_arg); remove(loadNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index a9a3519ba..b86c42f00 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -31,8 +31,8 @@ jlm::hls::route_response(jlm::rvsdg::region * target, jlm::rvsdg::output * respo auto ln = dynamic_cast(target->node()); JLM_ASSERT(ln); auto input = jlm::rvsdg::structural_input::create(ln, parent_response, parent_response->Type()); - auto argument = jlm::rvsdg::argument::create(target, input, response->Type()); - return argument; + auto & argument = EntryArgument::Create(*target, *input, response->Type()); + return &argument; } } diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 42e0171ea..db4f19055 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -35,6 +35,14 @@ bundletype::ComputeHash() const noexcept return seed; } +EntryArgument::~EntryArgument() noexcept = default; + +EntryArgument & +EntryArgument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +{ + return EntryArgument::Create(region, *input, Type()); +} + backedge_argument & backedge_argument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) { @@ -55,11 +63,11 @@ loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer auto input = jlm::rvsdg::structural_input::create(this, origin, origin->Type()); auto output = jlm::rvsdg::structural_output::create(this, origin->Type()); - auto argument_in = jlm::rvsdg::argument::create(subregion(), input, origin->Type()); + auto & argument_in = EntryArgument::Create(*subregion(), *input, origin->Type()); auto argument_loop = add_backedge(origin->Type()); auto mux = - hls::mux_op::create(*predicate_buffer(), { argument_in, argument_loop }, false, true)[0]; + hls::mux_op::create(*predicate_buffer(), { &argument_in, argument_loop }, false, true)[0]; auto branch = hls::branch_op::create(*predicate()->origin(), *mux, true); if (buffer != nullptr) { @@ -77,8 +85,8 @@ loop_node::add_loopconst(jlm::rvsdg::output * origin) { auto input = jlm::rvsdg::structural_input::create(this, origin, origin->Type()); - auto argument_in = jlm::rvsdg::argument::create(subregion(), input, origin->Type()); - auto buffer = hls::loop_constant_buffer_op::create(*predicate_buffer(), *argument_in)[0]; + auto & argument_in = EntryArgument::Create(*subregion(), *input, origin->Type()); + auto buffer = hls::loop_constant_buffer_op::create(*predicate_buffer(), argument_in)[0]; return buffer; } @@ -96,8 +104,8 @@ loop_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap auto inp = jlm::rvsdg::structural_input::create(loop, in_origin, in_origin->Type()); smap.insert(input(i), loop->input(i)); auto oarg = input(i)->arguments.begin().ptr(); - auto narg = jlm::rvsdg::argument::create(loop->subregion(), inp, oarg->Type()); - smap.insert(oarg, narg); + auto & narg = EntryArgument::Create(*loop->subregion(), *inp, oarg->Type()); + smap.insert(oarg, &narg); } for (size_t i = 0; i < noutputs(); ++i) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 230a1c55b..6d2eb10c0 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -607,6 +607,42 @@ class backedge_argument; class backedge_result; class loop_node; +/** + * Represents the entry argument for the HLS loop. + */ +class EntryArgument : public rvsdg::argument +{ + friend loop_node; + +public: + ~EntryArgument() noexcept override; + +private: + EntryArgument( + rvsdg::region & region, + rvsdg::structural_input & input, + const std::shared_ptr type) + : rvsdg::argument(®ion, &input, std::move(type)) + {} + +public: + EntryArgument & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + + // FIXME: This should not be public, but we currently still have some transformations that use + // this one. Make it eventually private. + static EntryArgument & + Create( + rvsdg::region & region, + rvsdg::structural_input & input, + const std::shared_ptr type) + { + auto argument = new EntryArgument(region, input, std::move(type)); + region.append_argument(argument); + return *argument; + } +}; + class backedge_argument : public jlm::rvsdg::argument { friend loop_node; From e4550dd1f5763e1b615d52198b993f8da03ff834 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sat, 24 Aug 2024 10:20:37 +0200 Subject: [PATCH 057/170] Run hls::cne and llvm::dne as part of the passes (#573) --- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 8 +++++--- jlm/hls/ir/hls.hpp | 2 +- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 952473bd0..c402e18cb 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -416,16 +416,18 @@ rvsdg2rhls(llvm::RvsdgModule & rhls) { pre_opt(rhls); merge_gamma(rhls); + util::StatisticsCollector statisticsCollector; + llvm::DeadNodeElimination llvmDne; + llvmDne.run(rhls, statisticsCollector); - // mem_sep(rhls); mem_sep_argument(rhls); - // run conversion on copy remove_unused_state(rhls); // main conversion steps - // add_triggers(rhls); // TODO: is this needed? distribute_constants(rhls); ConvertGammaNodes(rhls); ConvertThetaNodes(rhls); + hls::cne hlsCne; + hlsCne.run(rhls, statisticsCollector); // rhls optimization dne(rhls); alloca_conv(rhls); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 6d2eb10c0..7cb777e4b 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -157,7 +157,7 @@ class fork_op final : public jlm::rvsdg::simple_op } /** - * Cechk if a fork is a constant fork (CFORK). + * Check if a fork is a constant fork (CFORK). * * /return True if the fork is a constant fork, i.e., the input of the fork is a constant, else * false. From bac32fa19635229b114aca0eaa9eb5c776bb7ef9 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Mon, 26 Aug 2024 12:25:44 +0200 Subject: [PATCH 058/170] Use latest commit hash in run-hls-test.sh (#599) --- scripts/run-hls-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index c102a12ef..0d84012a5 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=3e92b3d37b654b0f14b8b13d9ff8c07cad5f3796 +GIT_COMMIT=51d327e20c42eebe3578d4b1ad0950e4ab389c2d # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" From 39ee1e9dcb943abe11f2ead3d99c121107d231ad Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 26 Aug 2024 20:43:03 +0200 Subject: [PATCH 059/170] Adds MLIR backend information to README (#596) --- README.md | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/README.md b/README.md index 4d8e64244..4910cd0c4 100644 --- a/README.md +++ b/README.md @@ -8,9 +8,13 @@ Regionalized Value State Dependence Graph (RVSDG) as intermediate representation * Doxygen 1.9.1 ### HLS dependencies -* CIRCT that is built with LLVM/MLIR 17 +* MLIR 18 +* CIRCT that is built with LLVM/MLIR 18 * Verilator 4.038 +### MLIR backend and frontend dependencies +* MLIR 18 + ### Optional dependencies * gcovr, for computing code coverage summary @@ -20,8 +24,8 @@ Regionalized Value State Dependence Graph (RVSDG) as intermediate representation make all ``` -This presumes that llvm-config-17 can be found in $PATH. If that is not the case, -you may need to explicitly configure it: +This presumes that the reight version of llvm-config can be found in $PATH. +If that is not the case, you may need to explicitly configure it: ``` ./configure.sh --llvm-config /path/to/llvm-config @@ -76,9 +80,22 @@ and the build has to be configured accordingly. A change of build configuration stale intermediate files first, i.e., run 'make clean'. CIRCT and the HLS backend can be setup with the following commands: ``` -./scripts/build-circt.sh --build-path --install-path +./scripts/build-circt.sh + +./configure --enable-hls +``` + +## MLIR backend +The MLIR backend uses the MLIR RVSDG dialect. + +A compatible installation of the MLIR RVSDG dialect is needed to compile jlm with the MLIR backend +enabled, and the build has to be configured accordingly. A change of build configuration may require +cleaning stale intermediate files first, i.e., run 'make clean'. +The MLIR RVSDG dialect and the MLIR backend can be setup with the following commands: +``` +./scripts/build-mlir.sh -./configure --enable-hls= +./configure --enable-mlir ``` ## Publications From d1a91a2925c41df1e09064929b22f46ead495f44 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 27 Aug 2024 21:14:39 +0200 Subject: [PATCH 060/170] Make argument class abstract (#600) 1. Replaces all usages of `argument::create()` with `TestGraphArgument::Create()` in the unit tests. 2. Makes the argument class abstract 3. Minor clean ups in the argument class 4. Add documentation to argument class --- jlm/rvsdg/region.cpp | 17 ------------ jlm/rvsdg/region.hpp | 38 ++++++++++++------------- tests/jlm/rvsdg/ArgumentTests.cpp | 7 +++-- tests/jlm/rvsdg/RegionTests.cpp | 46 +++++++++++++++++-------------- tests/jlm/rvsdg/ResultTests.cpp | 6 ++-- tests/jlm/rvsdg/test-graph.cpp | 9 +++--- tests/jlm/rvsdg/test-nodes.cpp | 17 ++++++------ tests/test-operation.hpp | 17 ++++++++---- 8 files changed, 76 insertions(+), 81 deletions(-) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index fe3b6ac90..ac9f2d548 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -43,23 +43,6 @@ argument::argument( } } -argument & -argument::Copy(rvsdg::region & region, structural_input * input) -{ - return *argument::create(®ion, input, Type()); -} - -jlm::rvsdg::argument * -argument::create( - jlm::rvsdg::region * region, - structural_input * input, - std::shared_ptr type) -{ - auto argument = new jlm::rvsdg::argument(region, input, std::move(type)); - region->append_argument(argument); - return argument; -} - result::~result() noexcept { on_input_destroy(this); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 59627d176..5d5d9d7fd 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -31,23 +31,31 @@ class structural_op; class structural_output; class substitution_map; +/** + * \brief Represents the argument of a region. + * + * Region arguments represent the initial values of the region's acyclic graph. These values + * are mapped to the arguments throughout the execution, and the concrete semantics of this mapping + * depends on the structural node the region is part of. A region argument is either linked + * with a \ref structural_input or is a standalone argument. + */ class argument : public output { - jlm::util::intrusive_list_anchor structural_input_anchor_; + util::intrusive_list_anchor structural_input_anchor_; public: - typedef jlm::util:: - intrusive_list_accessor - structural_input_accessor; + typedef util::intrusive_list_accessor + structural_input_accessor; - virtual ~argument() noexcept; + ~argument() noexcept override; protected: argument( - jlm::rvsdg::region * region, - jlm::rvsdg::structural_input * input, + rvsdg::region * region, + structural_input * input, std::shared_ptr type); +public: argument(const argument &) = delete; argument(argument &&) = delete; @@ -58,8 +66,7 @@ class argument : public output argument & operator=(argument &&) = delete; -public: - inline jlm::rvsdg::structural_input * + [[nodiscard]] structural_input * input() const noexcept { return input_; @@ -72,21 +79,12 @@ class argument : public output * @param input The structural_input to the argument, if any. * * @return A reference to the copied argument. - * - * FIXME: This method should be made abstract once we enforced that no instances of argument - * itself can be created any longer. */ virtual argument & - Copy(rvsdg::region & region, structural_input * input); - - static jlm::rvsdg::argument * - create( - jlm::rvsdg::region * region, - structural_input * input, - std::shared_ptr type); + Copy(rvsdg::region & region, structural_input * input) = 0; private: - jlm::rvsdg::structural_input * input_; + structural_input * input_; }; class result : public input diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index 73babebd6..f7251e6a3 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -16,6 +16,7 @@ static int ArgumentNodeMismatch() { using namespace jlm::rvsdg; + using namespace jlm::tests; // Arrange auto valueType = jlm::tests::valuetype::Create(); @@ -32,7 +33,7 @@ ArgumentNodeMismatch() bool inputErrorHandlerCalled = false; try { - argument::create(structuralNode2->subregion(0), structuralInput, valueType); + TestGraphArgument::Create(*structuralNode2->subregion(0), structuralInput, valueType); } catch (jlm::util::error & e) { @@ -67,7 +68,7 @@ ArgumentInputTypeMismatch() bool exceptionWasCaught = false; try { - jlm::rvsdg::argument::create(structuralNode->subregion(0), structuralInput, stateType); + TestGraphArgument::Create(*structuralNode->subregion(0), structuralInput, stateType); // The line below should not be executed as the line above is expected to throw an exception. assert(false); } @@ -80,7 +81,7 @@ ArgumentInputTypeMismatch() exceptionWasCaught = false; try { - jlm::rvsdg::argument::create(structuralNode->subregion(0), structuralInput, stateType); + TestGraphArgument::Create(*structuralNode->subregion(0), structuralInput, stateType); // The line below should not be executed as the line above is expected to throw an exception. assert(false); } diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 59b2dd626..e39b98b56 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -28,15 +28,15 @@ Contains() auto structuralNode1 = structural_node::create(graph.root(), 1); auto structuralInput1 = jlm::rvsdg::structural_input::create(structuralNode1, import, valueType); - auto regionArgument1 = - jlm::rvsdg::argument::create(structuralNode1->subregion(0), structuralInput1, valueType); - unary_op::create(structuralNode1->subregion(0), valueType, regionArgument1, valueType); + auto & regionArgument1 = + TestGraphArgument::Create(*structuralNode1->subregion(0), structuralInput1, valueType); + unary_op::create(structuralNode1->subregion(0), valueType, ®ionArgument1, valueType); auto structuralNode2 = structural_node::create(graph.root(), 1); auto structuralInput2 = jlm::rvsdg::structural_input::create(structuralNode2, import, valueType); - auto regionArgument2 = - jlm::rvsdg::argument::create(structuralNode2->subregion(0), structuralInput2, valueType); - binary_op::create(valueType, valueType, regionArgument2, regionArgument2); + auto & regionArgument2 = + TestGraphArgument::Create(*structuralNode2->subregion(0), structuralInput2, valueType); + binary_op::create(valueType, valueType, ®ionArgument2, ®ionArgument2); // Act & Assert assert(jlm::rvsdg::region::Contains(*graph.root(), false)); @@ -169,22 +169,24 @@ JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-RemoveResultsWhere", RemoveResults static int RemoveArgumentsWhere() { + using namespace jlm::tests; + // Arrange jlm::rvsdg::graph rvsdg; jlm::rvsdg::region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); - auto argument0 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); - auto argument1 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); - auto argument2 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); + auto & argument0 = TestGraphArgument::Create(region, nullptr, valueType); + auto & argument1 = TestGraphArgument::Create(region, nullptr, valueType); + auto & argument2 = TestGraphArgument::Create(region, nullptr, valueType); - auto node = jlm::tests::test_op::Create(®ion, { valueType }, { argument1 }, { valueType }); + auto node = jlm::tests::test_op::Create(®ion, { valueType }, { &argument1 }, { valueType }); // Act & Arrange assert(region.narguments() == 3); - assert(argument0->index() == 0); - assert(argument1->index() == 1); - assert(argument2->index() == 2); + assert(argument0.index() == 0); + assert(argument1.index() == 1); + assert(argument2.index() == 2); region.RemoveArgumentsWhere( [](const jlm::rvsdg::argument & argument) @@ -192,7 +194,7 @@ RemoveArgumentsWhere() return true; }); assert(region.narguments() == 1); - assert(argument1->index() == 0); + assert(argument1.index() == 0); region.remove_node(node); region.RemoveArgumentsWhere( @@ -201,7 +203,7 @@ RemoveArgumentsWhere() return false; }); assert(region.narguments() == 1); - assert(argument1->index() == 0); + assert(argument1.index() == 0); region.RemoveArgumentsWhere( [](const jlm::rvsdg::argument & argument) @@ -221,19 +223,21 @@ JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-RemoveArgumentsWhere", RemoveArgum static int PruneArguments() { + using namespace jlm::tests; + // Arrange jlm::rvsdg::graph rvsdg; jlm::rvsdg::region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); - auto argument0 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); - jlm::rvsdg::argument::create(®ion, nullptr, valueType); - auto argument2 = jlm::rvsdg::argument::create(®ion, nullptr, valueType); + auto & argument0 = TestGraphArgument::Create(region, nullptr, valueType); + TestGraphArgument::Create(region, nullptr, valueType); + auto & argument2 = TestGraphArgument::Create(region, nullptr, valueType); auto node = jlm::tests::test_op::Create( ®ion, { valueType, valueType }, - { argument0, argument2 }, + { &argument0, &argument2 }, { valueType }); // Act & Arrange @@ -241,8 +245,8 @@ PruneArguments() region.PruneArguments(); assert(region.narguments() == 2); - assert(argument0->index() == 0); - assert(argument2->index() == 1); + assert(argument0.index() == 0); + assert(argument2.index() == 1); region.remove_node(node); region.PruneArguments(); diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index c89a04332..511cd832d 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -16,6 +16,7 @@ static int ResultNodeMismatch() { using namespace jlm::rvsdg; + using namespace jlm::tests; // Arrange auto valueType = jlm::tests::valuetype::Create(); @@ -28,14 +29,15 @@ ResultNodeMismatch() auto structuralInput = structural_input::create(structuralNode1, import, valueType); - auto argument = argument::create(structuralNode1->subregion(0), structuralInput, valueType); + auto & argument = + TestGraphArgument::Create(*structuralNode1->subregion(0), structuralInput, valueType); auto structuralOutput = structural_output::create(structuralNode1, valueType); // Act bool outputErrorHandlerCalled = false; try { - result::create(structuralNode2->subregion(0), argument, structuralOutput, valueType); + result::create(structuralNode2->subregion(0), &argument, structuralOutput, valueType); } catch (jlm::util::error & e) { diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index ff24405e4..65980b5f4 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -29,6 +29,7 @@ static int test_recursive_prune() { using namespace jlm::rvsdg; + using namespace jlm::tests; auto t = jlm::tests::valuetype::Create(); @@ -40,9 +41,9 @@ test_recursive_prune() auto n3 = jlm::tests::structural_node::create(graph.root(), 1); structural_input::create(n3, imp, t); - auto a1 = argument::create(n3->subregion(0), nullptr, t); - auto n4 = jlm::tests::test_op::create(n3->subregion(0), { a1 }, { t }); - auto n5 = jlm::tests::test_op::create(n3->subregion(0), { a1 }, { t }); + auto & a1 = TestGraphArgument::Create(*n3->subregion(0), nullptr, t); + auto n4 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); + auto n5 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); result::create(n3->subregion(0), n4->output(0), nullptr, t); auto o1 = structural_output::create(n3, t); @@ -146,7 +147,7 @@ Copy() auto valueType = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - auto & argument = TestGraphArgument::Create(*graph.root(), valueType); + auto & argument = TestGraphArgument::Create(*graph.root(), nullptr, valueType); auto node = test_op::create(graph.root(), { &argument }, { valueType }); TestGraphResult::Create(*node->output(0)); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index b08446d19..8cab5f049 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -13,6 +13,7 @@ static void test_node_copy(void) { using namespace jlm::rvsdg; + using namespace jlm::tests; auto stype = jlm::tests::statetype::Create(); auto vtype = jlm::tests::valuetype::Create(); @@ -27,11 +28,11 @@ test_node_copy(void) auto o1 = structural_output::create(n1, stype); auto o2 = structural_output::create(n1, vtype); - auto a1 = argument::create(n1->subregion(0), i1, stype); - auto a2 = argument::create(n1->subregion(0), i2, vtype); + auto & a1 = TestGraphArgument::Create(*n1->subregion(0), i1, stype); + auto & a2 = TestGraphArgument::Create(*n1->subregion(0), i2, vtype); - auto n2 = jlm::tests::test_op::create(n1->subregion(0), { a1 }, { stype }); - auto n3 = jlm::tests::test_op::create(n1->subregion(0), { a2 }, { vtype }); + auto n2 = jlm::tests::test_op::create(n1->subregion(0), { &a1 }, { stype }); + auto n3 = jlm::tests::test_op::create(n1->subregion(0), { &a2 }, { vtype }); result::create(n1->subregion(0), n2->output(0), o1, stype); result::create(n1->subregion(0), n3->output(0), o2, vtype); @@ -61,10 +62,10 @@ test_node_copy(void) /* copy second into third region only with arguments */ jlm::rvsdg::substitution_map smap2; - auto a3 = argument::create(n1->subregion(2), i1, stype); - auto a4 = argument::create(n1->subregion(2), i2, vtype); - smap2.insert(r2->argument(0), a3); - smap2.insert(r2->argument(1), a4); + auto & a3 = TestGraphArgument::Create(*n1->subregion(2), i1, stype); + auto & a4 = TestGraphArgument::Create(*n1->subregion(2), i2, vtype); + smap2.insert(r2->argument(0), &a3); + smap2.insert(r2->argument(1), &a4); smap2.insert(o1, o1); smap2.insert(o2, o2); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 0563098cc..14bd133b8 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -347,22 +347,27 @@ create_testop( class TestGraphArgument final : public jlm::rvsdg::argument { private: - TestGraphArgument(jlm::rvsdg::region & region, std::shared_ptr type) - : jlm::rvsdg::argument(®ion, nullptr, type) + TestGraphArgument( + jlm::rvsdg::region & region, + jlm::rvsdg::structural_input * input, + std::shared_ptr type) + : jlm::rvsdg::argument(®ion, input, type) {} public: TestGraphArgument & Copy(jlm::rvsdg::region & region, jlm::rvsdg::structural_input * input) override { - JLM_ASSERT(input == nullptr); - return Create(region, Type()); + return Create(region, input, Type()); } static TestGraphArgument & - Create(jlm::rvsdg::region & region, std::shared_ptr type) + Create( + jlm::rvsdg::region & region, + jlm::rvsdg::structural_input * input, + std::shared_ptr type) { - auto graphArgument = new TestGraphArgument(region, std::move(type)); + auto graphArgument = new TestGraphArgument(region, input, std::move(type)); region.append_argument(graphArgument); return *graphArgument; } From 7489075cd6741bd1fae461b1b27c0f3e20d8c611 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 28 Aug 2024 20:35:30 +0200 Subject: [PATCH 061/170] Add ThetaPredicateResult class (#601) Add class for representing the predicate result of a theta node. --- jlm/rvsdg/theta.cpp | 16 ++++++++++++++++ jlm/rvsdg/theta.hpp | 38 +++++++++++++++++++++++++++++++------- 2 files changed, 47 insertions(+), 7 deletions(-) diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 8c216e850..fd46c6268 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -27,6 +27,13 @@ theta_op::copy() const return std::unique_ptr(new theta_op(*this)); } +theta_node::theta_node(rvsdg::region & parent) + : structural_node(rvsdg::theta_op(), &parent, 1) +{ + auto predicate = control_false(subregion()); + ThetaPredicateResult::Create(*predicate); +} + /* theta input */ theta_input::~theta_input() noexcept @@ -61,6 +68,15 @@ ThetaResult::Copy(rvsdg::output & origin, structural_output * output) return ThetaResult::Create(origin, *thetaOutput); } +ThetaPredicateResult::~ThetaPredicateResult() noexcept = default; + +ThetaPredicateResult & +ThetaPredicateResult::Copy(rvsdg::output & origin, structural_output * output) +{ + JLM_ASSERT(output == nullptr); + return ThetaPredicateResult::Create(origin); +} + /* theta node */ theta_node::~theta_node() diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index ba418d6a2..defe8fa10 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -91,18 +91,13 @@ class theta_node final : public structural_node virtual ~theta_node(); private: - inline theta_node(jlm::rvsdg::region * parent) - : structural_node(jlm::rvsdg::theta_op(), parent, 1) - { - auto predicate = jlm::rvsdg::control_false(subregion()); - result::create(subregion(), predicate, nullptr, ctltype::Create(2)); - } + explicit theta_node(rvsdg::region & parent); public: static jlm::rvsdg::theta_node * create(jlm::rvsdg::region * parent) { - return new jlm::rvsdg::theta_node(parent); + return new theta_node(*parent); } inline jlm::rvsdg::region * @@ -407,6 +402,35 @@ class ThetaResult final : public result } }; +/** + * Represents the predicate result of a theta subregion. + */ +class ThetaPredicateResult final : public result +{ + friend theta_node; + +public: + ~ThetaPredicateResult() noexcept override; + + ThetaPredicateResult & + Copy(rvsdg::output & origin, structural_output * output) override; + +private: + explicit ThetaPredicateResult(rvsdg::output & origin) + : result(origin.region(), &origin, nullptr, ctltype::Create(2)) + { + JLM_ASSERT(is(origin.region()->node())); + } + + static ThetaPredicateResult & + Create(rvsdg::output & origin) + { + auto thetaResult = new ThetaPredicateResult(origin); + origin.region()->append_result(thetaResult); + return *thetaResult; + } +}; + static inline bool is_invariant(const jlm::rvsdg::theta_output * output) noexcept { From 68a5dffe3b861b94554552558d6ec21583a438c5 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 28 Aug 2024 21:11:59 +0200 Subject: [PATCH 062/170] Add RVSDG tree printer pass (#595) This pass enables the printing of the RVSDG tree between optimizations. Intended usage: `jlm-opt --RvsdgTreePrinter --DeadNodeElimination --RvsdgTreePrinter -s /tmp input.ll` This will write the files `input-rvsdgTree-0` and `input-rvsdgTree-1` to `/tmp`, containing the RVSDG tree from before applying dead node elimination and from after dead node elimination, respectively. The plan is to extend this pass with support for annotations to the RVSDG tree. Moreover, the pass can also be generalized to print not just RVSDG tree, but also the ASCII version of the RVSDG or other debug output. --- jlm/llvm/Makefile.sub | 3 + jlm/llvm/opt/RvsdgTreePrinter.cpp | 94 +++++++++++++++++++ jlm/llvm/opt/RvsdgTreePrinter.hpp | 94 +++++++++++++++++++ jlm/tooling/CommandGraphGenerator.cpp | 5 +- jlm/tooling/CommandLine.cpp | 23 ++++- jlm/tooling/CommandLine.hpp | 14 ++- jlm/util/Statistics.cpp | 1 + jlm/util/Statistics.hpp | 1 + tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp | 55 +++++++++++ tests/jlm/tooling/TestJlmOptCommand.cpp | 4 + .../tooling/TestJlmOptCommandLineParser.cpp | 14 ++- 11 files changed, 301 insertions(+), 7 deletions(-) create mode 100644 jlm/llvm/opt/RvsdgTreePrinter.cpp create mode 100644 jlm/llvm/opt/RvsdgTreePrinter.hpp create mode 100644 tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 06a636c4a..6bea7a6b9 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -61,6 +61,7 @@ libllvm_SOURCES = \ jlm/llvm/opt/pull.cpp \ jlm/llvm/opt/push.cpp \ jlm/llvm/opt/reduction.cpp \ + jlm/llvm/opt/RvsdgTreePrinter.cpp \ jlm/llvm/opt/unroll.cpp \ libllvm_HEADERS = \ @@ -85,6 +86,7 @@ libllvm_HEADERS = \ jlm/llvm/opt/reduction.hpp \ jlm/llvm/opt/InvariantValueRedirection.hpp \ jlm/llvm/opt/inversion.hpp \ + jlm/llvm/opt/RvsdgTreePrinter.hpp \ jlm/llvm/frontend/LlvmModuleConversion.hpp \ jlm/llvm/frontend/LlvmTypeConversion.hpp \ jlm/llvm/frontend/ControlFlowRestructuring.hpp \ @@ -188,6 +190,7 @@ libllvm_TESTS += \ tests/jlm/llvm/opt/alias-analyses/TestSteensgaard \ tests/jlm/llvm/opt/alias-analyses/TestTopDownMemoryNodeEliminator \ tests/jlm/llvm/opt/InvariantValueRedirectionTests \ + tests/jlm/llvm/opt/RvsdgTreePrinterTests \ tests/jlm/llvm/opt/test-cne \ tests/jlm/llvm/opt/TestDeadNodeElimination \ tests/jlm/llvm/opt/test-inlining \ diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp new file mode 100644 index 000000000..748999aa7 --- /dev/null +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -0,0 +1,94 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +#include + +namespace jlm::llvm +{ + +class RvsdgTreePrinter::Statistics final : public util::Statistics +{ +public: + ~Statistics() override = default; + + explicit Statistics(const util::filepath & sourceFile) + : util::Statistics(util::Statistics::Id::RvsdgTreePrinter, sourceFile) + {} + + void + Start() noexcept + { + AddTimer(Label::Timer).start(); + } + + void + Stop() noexcept + { + GetTimer(Label::Timer).stop(); + } + + static std::unique_ptr + Create(const util::filepath & sourceFile) + { + return std::make_unique(sourceFile); + } +}; + +RvsdgTreePrinter::~RvsdgTreePrinter() noexcept = default; + +void +RvsdgTreePrinter::run(RvsdgModule & rvsdgModule, util::StatisticsCollector & statisticsCollector) +{ + auto statistics = Statistics::Create(rvsdgModule.SourceFileName()); + statistics->Start(); + + auto tree = rvsdg::region::ToTree(*rvsdgModule.Rvsdg().root()); + WriteTreeToFile(rvsdgModule, tree); + + statistics->Stop(); +} + +void +RvsdgTreePrinter::run(RvsdgModule & rvsdgModule) +{ + util::StatisticsCollector collector; + run(rvsdgModule, collector); +} + +void +RvsdgTreePrinter::WriteTreeToFile(const RvsdgModule & rvsdgModule, const std::string & tree) const +{ + auto outputFile = CreateOutputFile(rvsdgModule); + + outputFile.open("w"); + fprintf(outputFile.fd(), "%s\n", tree.c_str()); + outputFile.close(); +} + +util::file +RvsdgTreePrinter::CreateOutputFile(const RvsdgModule & rvsdgModule) const +{ + auto fileName = util::strfmt( + Configuration_.OutputDirectory().to_str(), + "/", + rvsdgModule.SourceFileName().base().c_str(), + "-rvsdgTree-", + GetOutputFileNameCounter(rvsdgModule)); + return util::filepath(fileName); +} + +uint64_t +RvsdgTreePrinter::GetOutputFileNameCounter(const RvsdgModule & rvsdgModule) +{ + static std::unordered_map RvsdgModuleCounterMap_; + + return RvsdgModuleCounterMap_[&rvsdgModule]++; +} + +} diff --git a/jlm/llvm/opt/RvsdgTreePrinter.hpp b/jlm/llvm/opt/RvsdgTreePrinter.hpp new file mode 100644 index 000000000..3e8388ec2 --- /dev/null +++ b/jlm/llvm/opt/RvsdgTreePrinter.hpp @@ -0,0 +1,94 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_LLVM_OPT_RVSDGTREEPRINTER_HPP +#define JLM_LLVM_OPT_RVSDGTREEPRINTER_HPP + +#include +#include + +namespace jlm::util +{ +class StatisticsCollector; +} + +namespace jlm::llvm +{ + +class RvsdgModule; + +/** \brief RVSDG tree printer debug pass + * + * Prints an RVSDG tree to a file. + */ +class RvsdgTreePrinter final : public optimization +{ + class Statistics; + +public: + /** + * Configuration for the \ref RvsdgTreePrinter. + */ + class Configuration final + { + public: + explicit Configuration(const util::filepath & outputDirectory) + : OutputDirectory_(std::move(outputDirectory)) + { + JLM_ASSERT(outputDirectory.IsDirectory()); + JLM_ASSERT(outputDirectory.Exists()); + } + + /** + * The output directory for the RVSDG tree files. + */ + [[nodiscard]] const util::filepath & + OutputDirectory() const noexcept + { + return OutputDirectory_; + } + + private: + util::filepath OutputDirectory_; + }; + + ~RvsdgTreePrinter() noexcept override; + + explicit RvsdgTreePrinter(Configuration configuration) + : Configuration_(std::move(configuration)) + {} + + RvsdgTreePrinter(const RvsdgTreePrinter &) = delete; + + RvsdgTreePrinter(RvsdgTreePrinter &&) = delete; + + RvsdgTreePrinter & + operator=(const RvsdgTreePrinter &) = delete; + + RvsdgTreePrinter & + operator=(RvsdgTreePrinter &&) = delete; + + void + run(RvsdgModule & rvsdgModule, jlm::util::StatisticsCollector & statisticsCollector) override; + + void + run(RvsdgModule & rvsdgModule); + +private: + void + WriteTreeToFile(const RvsdgModule & rvsdgModule, const std::string & tree) const; + + [[nodiscard]] util::file + CreateOutputFile(const RvsdgModule & rvsdgModule) const; + + static uint64_t + GetOutputFileNameCounter(const RvsdgModule & rvsdgModule); + + Configuration Configuration_; +}; + +} + +#endif diff --git a/jlm/tooling/CommandGraphGenerator.cpp b/jlm/tooling/CommandGraphGenerator.cpp index 0753c67e7..5196888ea 100644 --- a/jlm/tooling/CommandGraphGenerator.cpp +++ b/jlm/tooling/CommandGraphGenerator.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include #include @@ -126,9 +127,10 @@ JlcCommandGraphGenerator::GenerateCommandGraph(const JlcCommandLineOptions & com if (compilation.RequiresOptimization()) { + util::filepath tempDirectory(std::filesystem::temp_directory_path()); auto clangCommand = util::AssertedCast(&lastNode->GetCommand()); auto statisticsFilePath = util::StatisticsCollectorSettings::CreateUniqueStatisticsFile( - util::filepath(std::filesystem::temp_directory_path()), + tempDirectory, compilation.InputFile()); util::StatisticsCollectorSettings statisticsCollectorSettings( statisticsFilePath, @@ -140,6 +142,7 @@ JlcCommandGraphGenerator::GenerateCommandGraph(const JlcCommandLineOptions & com CreateJlmOptCommandOutputFile(compilation.InputFile()), JlmOptCommandLineOptions::OutputFormat::Llvm, statisticsCollectorSettings, + jlm::llvm::RvsdgTreePrinter::Configuration(tempDirectory), commandLineOptions.JlmOptOptimizations_); auto & jlmOptCommandNode = diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index fb908ba86..26a9accd9 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -135,6 +136,7 @@ JlmOptCommandLineOptions::FromCommandLineArgumentToOptimizationId( { OptimizationCommandLineArgument::NodePushOut_, OptimizationId::NodePushOut }, { OptimizationCommandLineArgument::NodePullIn_, OptimizationId::NodePullIn }, { OptimizationCommandLineArgument::NodeReduction_, OptimizationId::NodeReduction }, + { OptimizationCommandLineArgument::RvsdgTreePrinter_, OptimizationId::RvsdgTreePrinter }, { OptimizationCommandLineArgument::ThetaGammaInversion_, OptimizationId::ThetaGammaInversion }, { OptimizationCommandLineArgument::LoopUnrolling_, OptimizationId::LoopUnrolling } }); @@ -168,6 +170,7 @@ JlmOptCommandLineOptions::ToCommandLineArgument(OptimizationId optimizationId) { OptimizationId::NodePullIn, OptimizationCommandLineArgument::NodePullIn_ }, { OptimizationId::NodePushOut, OptimizationCommandLineArgument::NodePushOut_ }, { OptimizationId::NodeReduction, OptimizationCommandLineArgument::NodeReduction_ }, + { OptimizationId::RvsdgTreePrinter, OptimizationCommandLineArgument::RvsdgTreePrinter_ }, { OptimizationId::ThetaGammaInversion, OptimizationCommandLineArgument::ThetaGammaInversion_ } }); @@ -224,7 +227,7 @@ JlmOptCommandLineOptions::ToCommandLineArgument(OutputFormat outputFormat) } std::unique_ptr -JlmOptCommandLineOptions::GetOptimization(enum OptimizationId optimizationId) +JlmOptCommandLineOptions::GetOptimization(enum OptimizationId optimizationId) const { using Andersen = llvm::aa::Andersen; using Steensgaard = llvm::aa::Steensgaard; @@ -257,6 +260,8 @@ JlmOptCommandLineOptions::GetOptimization(enum OptimizationId optimizationId) return std::make_unique(); case OptimizationId::NodeReduction: return std::make_unique(); + case OptimizationId::RvsdgTreePrinter: + return std::make_unique(RvsdgTreePrinterConfiguration_); case OptimizationId::ThetaGammaInversion: return std::make_unique(); default: @@ -289,6 +294,7 @@ JlmOptCommandLineOptions::GetStatisticsIdCommandLineArguments() { util::Statistics::Id::RvsdgConstruction, "print-rvsdg-construction" }, { util::Statistics::Id::RvsdgDestruction, "print-rvsdg-destruction" }, { util::Statistics::Id::RvsdgOptimization, "print-rvsdg-optimization" }, + { util::Statistics::Id::RvsdgTreePrinter, "print-rvsdg-tree" }, { util::Statistics::Id::SteensgaardAnalysis, "print-steensgaard-analysis" }, { util::Statistics::Id::ThetaGammaInversion, "print-ivt-stat" }, { util::Statistics::Id::TopDownMemoryNodeEliminator, "TopDownMemoryNodeEliminator" } @@ -586,6 +592,9 @@ JlcCommandLineParser::ParseCommandLineArguments(int argc, const char * const * a CreateStatisticsOption( util::Statistics::Id::RvsdgOptimization, "Collect RVSDG optimization pass statistics."), + CreateStatisticsOption( + util::Statistics::Id::RvsdgTreePrinter, + "Collect RVSDG tree printer pass statistics."), CreateStatisticsOption( util::Statistics::Id::SteensgaardAnalysis, "Collect Steensgaard alias analysis pass statistics."), @@ -734,7 +743,8 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const std::string statisticsDirectoryDefault = std::filesystem::temp_directory_path(); const auto statisticDirectoryDescription = - "Write statistics to file in . Default is " + statisticsDirectoryDefault + "."; + "Write statistics and debug output to files in . Default is " + + statisticsDirectoryDefault + "."; cl::opt statisticDirectory( "s", cl::init(statisticsDirectoryDefault), @@ -803,6 +813,9 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const CreateStatisticsOption( util::Statistics::Id::RvsdgOptimization, "Write RVSDG optimization statistics to file."), + CreateStatisticsOption( + util::Statistics::Id::RvsdgTreePrinter, + "Write RVSDG tree printer pass statistics."), CreateStatisticsOption( util::Statistics::Id::SteensgaardAnalysis, "Write Steensgaard analysis statistics to file."), @@ -862,6 +875,7 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const auto nodePushOut = JlmOptCommandLineOptions::OptimizationId::NodePushOut; auto nodePullIn = JlmOptCommandLineOptions::OptimizationId::NodePullIn; auto nodeReduction = JlmOptCommandLineOptions::OptimizationId::NodeReduction; + auto rvsdgTreePrinter = JlmOptCommandLineOptions::OptimizationId::RvsdgTreePrinter; auto thetaGammaInversion = JlmOptCommandLineOptions::OptimizationId::ThetaGammaInversion; auto loopUnrolling = JlmOptCommandLineOptions::OptimizationId::LoopUnrolling; @@ -911,6 +925,10 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const nodeReduction, JlmOptCommandLineOptions::ToCommandLineArgument(nodeReduction), "Node Reduction"), + ::clEnumValN( + rvsdgTreePrinter, + JlmOptCommandLineOptions::ToCommandLineArgument(rvsdgTreePrinter), + "Rvsdg Tree Printer"), ::clEnumValN( thetaGammaInversion, JlmOptCommandLineOptions::ToCommandLineArgument(thetaGammaInversion), @@ -949,6 +967,7 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const outputFile, outputFormat, std::move(statisticsCollectorSettings), + llvm::RvsdgTreePrinter::Configuration(statisticsDirectoryFilePath), std::move(optimizationIds)); return *CommandLineOptions_; diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index 2a6b8ce63..5e37c470e 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -7,6 +7,7 @@ #define JLM_TOOLING_COMMANDLINE_HPP #include +#include #include #include #include @@ -76,6 +77,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions NodePullIn, NodePushOut, NodeReduction, + RvsdgTreePrinter, ThetaGammaInversion, LastEnumValue // must always be the last enum value, used for iteration @@ -87,13 +89,15 @@ class JlmOptCommandLineOptions final : public CommandLineOptions util::filepath outputFile, OutputFormat outputFormat, util::StatisticsCollectorSettings statisticsCollectorSettings, + llvm::RvsdgTreePrinter::Configuration rvsdgTreePrinterConfiguration, std::vector optimizations) : InputFile_(std::move(inputFile)), InputFormat_(inputFormat), OutputFile_(std::move(outputFile)), OutputFormat_(outputFormat), StatisticsCollectorSettings_(std::move(statisticsCollectorSettings)), - OptimizationIds_(std::move(optimizations)) + OptimizationIds_(std::move(optimizations)), + RvsdgTreePrinterConfiguration_(std::move(rvsdgTreePrinterConfiguration)) {} void @@ -156,8 +160,8 @@ class JlmOptCommandLineOptions final : public CommandLineOptions static const char * ToCommandLineArgument(OutputFormat outputFormat); - static std::unique_ptr - GetOptimization(enum OptimizationId optimizationId); + [[nodiscard]] std::unique_ptr + GetOptimization(enum OptimizationId optimizationId) const; static std::unique_ptr Create( @@ -166,6 +170,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions util::filepath outputFile, OutputFormat outputFormat, util::StatisticsCollectorSettings statisticsCollectorSettings, + llvm::RvsdgTreePrinter::Configuration rvsdgTreePrinterConfiguration, std::vector optimizations) { return std::make_unique( @@ -174,6 +179,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions std::move(outputFile), outputFormat, std::move(statisticsCollectorSettings), + std::move(rvsdgTreePrinterConfiguration), std::move(optimizations)); } @@ -184,6 +190,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions OutputFormat OutputFormat_; util::StatisticsCollectorSettings StatisticsCollectorSettings_; std::vector OptimizationIds_; + llvm::RvsdgTreePrinter::Configuration RvsdgTreePrinterConfiguration_; struct OptimizationCommandLineArgument { @@ -200,6 +207,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions inline static const char * ThetaGammaInversion_ = "ThetaGammaInversion"; inline static const char * LoopUnrolling_ = "LoopUnrolling"; inline static const char * NodeReduction_ = "NodeReduction"; + inline static const char * RvsdgTreePrinter_ = "RvsdgTreePrinter"; }; static const util::BijectiveMap & diff --git a/jlm/util/Statistics.cpp b/jlm/util/Statistics.cpp index 9347c1fb9..78806f911 100644 --- a/jlm/util/Statistics.cpp +++ b/jlm/util/Statistics.cpp @@ -39,6 +39,7 @@ GetStatisticsIdNames() { Statistics::Id::RvsdgConstruction, "InterProceduralGraphToRvsdg" }, { Statistics::Id::RvsdgDestruction, "RVSDGDESTRUCTION" }, { Statistics::Id::RvsdgOptimization, "RVSDGOPTIMIZATION" }, + { Statistics::Id::RvsdgTreePrinter, "RvsdgTreePrinter" }, { Statistics::Id::SteensgaardAnalysis, "SteensgaardAnalysis" }, { Statistics::Id::ThetaGammaInversion, "IVT" }, { Statistics::Id::TopDownMemoryNodeEliminator, "TopDownMemoryNodeEliminator" } diff --git a/jlm/util/Statistics.hpp b/jlm/util/Statistics.hpp index 02328228d..4f808ea0f 100644 --- a/jlm/util/Statistics.hpp +++ b/jlm/util/Statistics.hpp @@ -50,6 +50,7 @@ class Statistics RvsdgConstruction, RvsdgDestruction, RvsdgOptimization, + RvsdgTreePrinter, SteensgaardAnalysis, ThetaGammaInversion, TopDownMemoryNodeEliminator, diff --git a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp new file mode 100644 index 000000000..612ad58e3 --- /dev/null +++ b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp @@ -0,0 +1,55 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include +#include +#include + +#include + +static int +PrintRvsdgTree() +{ + using namespace jlm::llvm; + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + std::string fileName = "PrintTreeTest"; + auto rvsdgModule = RvsdgModule::Create({ fileName }, "", ""); + + auto functionType = + FunctionType::Create({ MemoryStateType::Create() }, { MemoryStateType::Create() }); + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "f", + linkage::external_linkage); + auto lambdaOutput = lambda->finalize({ lambda->fctargument(0) }); + GraphExport::Create(*lambdaOutput, "f"); + + auto tempDirectory = std::filesystem::temp_directory_path(); + RvsdgTreePrinter::Configuration configuration({ tempDirectory }); + RvsdgTreePrinter printer(configuration); + + // Act + printer.run(*rvsdgModule); + + // Assert + auto outputFilePath = tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"; + + std::ifstream file(outputFilePath); + std::stringstream buffer; + buffer << file.rdbuf(); + + assert(buffer.str() == "RootRegion\n-LAMBDA[f]\n--Region[0]\n\n"); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/RvsdgTreePrinterTests-PrintRvsdgTree", PrintRvsdgTree) diff --git a/tests/jlm/tooling/TestJlmOptCommand.cpp b/tests/jlm/tooling/TestJlmOptCommand.cpp index 613fa8131..3eecc3da8 100644 --- a/tests/jlm/tooling/TestJlmOptCommand.cpp +++ b/tests/jlm/tooling/TestJlmOptCommand.cpp @@ -6,6 +6,7 @@ #include #include +#include #include #include @@ -14,7 +15,9 @@ static void TestStatistics() { + using namespace jlm::llvm; using namespace jlm::tooling; + using namespace jlm::util; // Arrange std::string expectedStatisticsDir = "/myStatisticsDir/"; @@ -29,6 +32,7 @@ TestStatistics() jlm::util::filepath("outputFile.ll"), JlmOptCommandLineOptions::OutputFormat::Llvm, statisticsCollectorSettings, + RvsdgTreePrinter::Configuration(filepath(std::filesystem::temp_directory_path())), { JlmOptCommandLineOptions::OptimizationId::DeadNodeElimination, JlmOptCommandLineOptions::OptimizationId::LoopUnrolling }); diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index c38ca1d0a..db54d1948 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -66,7 +66,19 @@ static void TestOptimizationIdToOptimizationTranslation() { using namespace jlm::tooling; + using namespace jlm::util; + // Arrange + JlmOptCommandLineOptions options( + filepath(""), + JlmOptCommandLineOptions::InputFormat::Llvm, + filepath(""), + JlmOptCommandLineOptions::OutputFormat::Llvm, + StatisticsCollectorSettings(), + jlm::llvm::RvsdgTreePrinter::Configuration(filepath(std::filesystem::temp_directory_path())), + std::vector()); + + // Act & Assert for (size_t n = static_cast(JlmOptCommandLineOptions::OptimizationId::FirstEnumValue) + 1; n != static_cast(JlmOptCommandLineOptions::OptimizationId::LastEnumValue); @@ -75,7 +87,7 @@ TestOptimizationIdToOptimizationTranslation() auto optimizationId = static_cast(n); // terminates on unhandled optimization id - JlmOptCommandLineOptions::GetOptimization(optimizationId); + static_cast(options.GetOptimization(optimizationId)); } } From 461b83affad7df4ae817e2070cd30b06c6b6201f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 29 Aug 2024 20:12:09 +0200 Subject: [PATCH 063/170] Add ExitResult for HLS loop (#602) 1. Add the ExitResult class for representing result values before the HLS loop terminates 2. Replaces respective `result::create()` with `ExitResult::Create()` usages --- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 8 ++----- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 2 +- jlm/hls/ir/hls.cpp | 12 ++++++++-- jlm/hls/ir/hls.hpp | 32 +++++++++++++++++++++++++ 4 files changed, 45 insertions(+), 9 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 429a8b4e4..eaa3b1795 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -297,11 +297,7 @@ decouple_load( auto new_res_origin = smap.lookup(res->origin()); auto new_state_output = jlm::rvsdg::structural_output::create(new_loop, new_res_origin->Type()); - jlm::rvsdg::result::create( - new_loop->subregion(), - new_res_origin, - new_state_output, - new_res_origin->Type()); + ExitResult::Create(*new_res_origin, *new_state_output); res->output()->divert_users(new_state_output); } } @@ -333,7 +329,7 @@ decouple_load( // create output for address auto load_addr = gate_out[0]; auto addr_output = jlm::rvsdg::structural_output::create(new_loop, load_addr->Type()); - jlm::rvsdg::result::create(new_loop->subregion(), load_addr, addr_output, load_addr->Type()); + ExitResult::Create(*load_addr, *addr_output); // trace and remove loop input for mem data reponse auto mem_data_loop_out = new_load->input(new_load->ninputs() - 1)->origin(); auto mem_data_loop_arg = dynamic_cast(mem_data_loop_out); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index b86c42f00..aedcb594e 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -48,7 +48,7 @@ jlm::hls::route_request(jlm::rvsdg::region * target, jlm::rvsdg::output * reques auto ln = dynamic_cast(request->region()->node()); JLM_ASSERT(ln); auto output = jlm::rvsdg::structural_output::create(ln, request->Type()); - jlm::rvsdg::result::create(request->region(), request, output, request->Type()); + ExitResult::Create(*request, *output); return route_request(target, output); } } diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index db4f19055..28dd71155 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -57,6 +57,14 @@ backedge_result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * ou return *backedge_result::create(&origin); } +ExitResult::~ExitResult() noexcept = default; + +ExitResult & +ExitResult::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +{ + return Create(origin, *output); +} + jlm::rvsdg::structural_output * loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { @@ -73,7 +81,7 @@ loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer { *buffer = mux; } - jlm::rvsdg::result::create(subregion(), branch[0], output, origin->Type()); + ExitResult::Create(*branch[0], *output); auto result_loop = argument_loop->result(); auto buf = hls::buffer_op::create(*branch[1], 2)[0]; result_loop->divert_to(buf); @@ -140,7 +148,7 @@ loop_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap auto outp = output(i); auto res = outp->results.begin().ptr(); auto origin = smap.lookup(res->origin()); - jlm::rvsdg::result::create(loop->subregion(), origin, loop->output(i), res->Type()); + ExitResult::Create(*origin, *loop->output(i)); } nf->set_mutable(true); return loop; diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 7cb777e4b..d33100b70 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -713,6 +713,38 @@ class backedge_result : public jlm::rvsdg::result backedge_argument * argument_; }; +/** + * Represents the exit result of the HLS loop. + */ +class ExitResult final : public rvsdg::result +{ + friend loop_node; + +public: + ~ExitResult() noexcept override; + +private: + ExitResult(rvsdg::output & origin, rvsdg::structural_output & output) + : rvsdg::result(origin.region(), &origin, &output, origin.Type()) + { + JLM_ASSERT(rvsdg::is(origin.region()->node())); + } + +public: + ExitResult & + Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + + // FIXME: This should not be public, but we currently still have some transformations that use + // this one. Make it eventually private. + static ExitResult & + Create(rvsdg::output & origin, rvsdg::structural_output & output) + { + auto result = new ExitResult(origin, output); + origin.region()->append_result(result); + return *result; + } +}; + class loop_node final : public jlm::rvsdg::structural_node { public: From 63ddb402dba04c24d84c872580a9859df5460e87 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 1 Sep 2024 11:58:27 +0200 Subject: [PATCH 064/170] Make result class abstract (#604) 1. Replaces all usages of `result::create()` in the unit tests with the appropriate subclasses. 2. Makes the result class abstract 3. Minor clean ups in the result class 4. Add documentation to result class --- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 7 +--- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 3 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 3 +- .../rvsdg2rhls/remove-unused-state.cpp | 3 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 7 +--- jlm/llvm/ir/operators/lambda.cpp | 7 ++-- jlm/llvm/ir/operators/lambda.hpp | 9 ++-- jlm/rvsdg/region.cpp | 20 --------- jlm/rvsdg/region.hpp | 42 +++++++++---------- tests/jlm/rvsdg/RegionTests.cpp | 22 +++++----- tests/jlm/rvsdg/ResultTests.cpp | 27 ++---------- tests/jlm/rvsdg/test-graph.cpp | 4 +- tests/jlm/rvsdg/test-nodes.cpp | 4 +- tests/test-operation.hpp | 27 +++++++++--- 14 files changed, 79 insertions(+), 106 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index ee051c5a2..023e05a40 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -109,12 +109,9 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) JLM_ASSERT(lambdaNode.output()->nusers() == 1); lambdaNode.region()->RemoveResult((*lambdaNode.output()->begin())->index()); + auto oldExport = lambdaNode.ComputeCallSummary()->GetRvsdgExport(); + jlm::llvm::GraphExport::Create(*newLambdaOutput, oldExport ? oldExport->Name() : ""); remove(&lambdaNode); - jlm::rvsdg::result::create( - newLambda->region(), - newLambdaOutput, - nullptr, - newLambdaOutput->Type()); } static void diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 0a12fbad5..2884e410e 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -77,8 +77,9 @@ add_lambda_argument(llvm::lambda::node * ln, std::shared_ptroutput()->divert_users(new_out); ln->region()->RemoveResult((*ln->output()->begin())->index()); + auto oldExport = ln->ComputeCallSummary()->GetRvsdgExport(); + jlm::llvm::GraphExport::Create(*new_out, oldExport ? oldExport->Name() : ""); remove(ln); - jlm::rvsdg::result::create(new_lambda->region(), new_out, nullptr, new_out->Type()); return new_lambda; } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index aedcb594e..0b3f0d61f 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -693,7 +693,8 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) } originalResults.insert(originalResults.end(), newResults.begin(), newResults.end()); auto newOut = newLambda->finalize(originalResults); - jlm::rvsdg::result::create(newLambda->region(), newOut, nullptr, newOut->Type()); + auto oldExport = lambda->ComputeCallSummary()->GetRvsdgExport(); + llvm::GraphExport::Create(*newOut, oldExport ? oldExport->Name() : ""); JLM_ASSERT(lambda->output()->nusers() == 1); lambda->region()->RemoveResult((*lambda->output()->begin())->index()); diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index 30be74166..8f47c4a03 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -241,8 +241,9 @@ remove_lambda_passthrough(llvm::lambda::node * ln) // ln->output()->divert_users(new_out); // can't divert since the type changed JLM_ASSERT(ln->output()->nusers() == 1); ln->region()->RemoveResult((*ln->output()->begin())->index()); + auto oldExport = ln->ComputeCallSummary()->GetRvsdgExport(); + jlm::llvm::GraphExport::Create(*new_out, oldExport ? oldExport->Name() : ""); remove(ln); - jlm::rvsdg::result::create(new_lambda->region(), new_out, nullptr, new_out->Type()); return new_lambda; } diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index c402e18cb..aa81009b5 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -385,11 +385,8 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) // copy function into rhls auto new_ln = ln->copy(rhls->Rvsdg().root(), smap); new_ln = change_linkage(new_ln, llvm::linkage::external_linkage); - jlm::rvsdg::result::create( - rhls->Rvsdg().root(), - new_ln->output(), - nullptr, - new_ln->output()->Type()); + auto oldExport = ln->ComputeCallSummary()->GetRvsdgExport(); + jlm::llvm::GraphExport::Create(*new_ln->output(), oldExport ? oldExport->Name() : ""); // add function as input to rm and remove it auto & graphImport = llvm::GraphImport::Create( rm.Rvsdg(), diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index ac6f17c8f..ddf902092 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -287,7 +287,7 @@ node::ComputeCallSummary() const worklist.insert(worklist.end(), output()->begin(), output()->end()); std::vector directCalls; - rvsdg::result * rvsdgExport = nullptr; + GraphExport * rvsdgExport = nullptr; std::vector otherUsers; while (!worklist.empty()) @@ -373,10 +373,9 @@ node::ComputeCallSummary() const continue; } - auto result = dynamic_cast(input); - if (result != nullptr && input->region() == graph()->root()) + if (auto graphExport = dynamic_cast(input)) { - rvsdgExport = result; + rvsdgExport = graphExport; continue; } diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index de3a340eb..b1e334179 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -8,6 +8,7 @@ #include #include +#include #include #include #include @@ -714,7 +715,7 @@ class node::CallSummary final public: CallSummary( - rvsdg::result * rvsdgExport, + GraphExport * rvsdgExport, std::vector directCalls, std::vector otherUsers) : RvsdgExport_(rvsdgExport), @@ -816,7 +817,7 @@ class node::CallSummary final * * @return The export of the lambda from the RVSDG root region. */ - [[nodiscard]] rvsdg::result * + [[nodiscard]] GraphExport * GetRvsdgExport() const noexcept { return RvsdgExport_; @@ -857,7 +858,7 @@ class node::CallSummary final */ static std::unique_ptr Create( - rvsdg::result * rvsdgExport, + GraphExport * rvsdgExport, std::vector directCalls, std::vector otherUsers) { @@ -868,7 +869,7 @@ class node::CallSummary final } private: - rvsdg::result * RvsdgExport_; + GraphExport * RvsdgExport_; std::vector DirectCalls_; std::vector OtherUsers_; }; diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index ac9f2d548..632780e4d 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -73,26 +73,6 @@ result::result( } } -result & -result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) -{ - return *result::create(origin.region(), &origin, output, Type()); -} - -jlm::rvsdg::result * -result::create( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, - std::shared_ptr type) -{ - auto result = new jlm::rvsdg::result(region, origin, output, std::move(type)); - region->append_result(result); - return result; -} - -/* region */ - region::~region() { on_region_destroy(this); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 5d5d9d7fd..8bdd7f2d3 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -87,24 +87,33 @@ class argument : public output structural_input * input_; }; +/** + * \brief Represents the result of a region. + * + * Region results represent the final values of the region's acyclic graph. The result values + * can be mapped back to the region arguments or the corresponding structural outputs + * throughout the execution, but the concrete semantics of this mapping + * depends on the structural node the region is part of. A region result is either linked + * with a \ref structural_output or is a standalone result. + */ class result : public input { - jlm::util::intrusive_list_anchor structural_output_anchor_; + util::intrusive_list_anchor structural_output_anchor_; public: - typedef jlm::util:: - intrusive_list_accessor - structural_output_accessor; + typedef util::intrusive_list_accessor + structural_output_accessor; - virtual ~result() noexcept; + ~result() noexcept override; protected: result( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, + rvsdg::region * region, + rvsdg::output * origin, + structural_output * output, std::shared_ptr type); +public: result(const result &) = delete; result(result &&) = delete; @@ -115,8 +124,7 @@ class result : public input result & operator=(result &&) = delete; -public: - inline jlm::rvsdg::structural_output * + [[nodiscard]] structural_output * output() const noexcept { return output_; @@ -130,22 +138,12 @@ class result : public input * @param output The structural_output to the result, if any. * * @return A reference to the copied result. - * - * FIXME: This method should be made abstract once we enforced that no instances of result - * itself can be created any longer. */ virtual result & - Copy(rvsdg::output & origin, structural_output * output); - - static jlm::rvsdg::result * - create( - jlm::rvsdg::region * region, - jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, - std::shared_ptr type); + Copy(rvsdg::output & origin, structural_output * output) = 0; private: - jlm::rvsdg::structural_output * output_; + structural_output * output_; }; class region diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index e39b98b56..2de4efbc5 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -116,6 +116,8 @@ JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-NumRegions_NonEmptyRvsdg", NumRegi static int RemoveResultsWhere() { + using namespace jlm::tests; + // Arrange jlm::rvsdg::graph rvsdg; jlm::rvsdg::region region(rvsdg.root(), &rvsdg); @@ -123,15 +125,15 @@ RemoveResultsWhere() auto valueType = jlm::tests::valuetype::Create(); auto node = jlm::tests::test_op::Create(®ion, {}, {}, { valueType }); - auto result0 = jlm::rvsdg::result::create(®ion, node->output(0), nullptr, valueType); - auto result1 = jlm::rvsdg::result::create(®ion, node->output(0), nullptr, valueType); - auto result2 = jlm::rvsdg::result::create(®ion, node->output(0), nullptr, valueType); + auto & result0 = TestGraphResult::Create(*node->output(0), nullptr); + auto & result1 = TestGraphResult::Create(*node->output(0), nullptr); + auto & result2 = TestGraphResult::Create(*node->output(0), nullptr); // Act & Arrange assert(region.nresults() == 3); - assert(result0->index() == 0); - assert(result1->index() == 1); - assert(result2->index() == 2); + assert(result0.index() == 0); + assert(result1.index() == 1); + assert(result2.index() == 2); region.RemoveResultsWhere( [](const jlm::rvsdg::result & result) @@ -139,8 +141,8 @@ RemoveResultsWhere() return result.index() == 1; }); assert(region.nresults() == 2); - assert(result0->index() == 0); - assert(result2->index() == 1); + assert(result0.index() == 0); + assert(result2.index() == 1); region.RemoveResultsWhere( [](const jlm::rvsdg::result & result) @@ -148,8 +150,8 @@ RemoveResultsWhere() return false; }); assert(region.nresults() == 2); - assert(result0->index() == 0); - assert(result2->index() == 1); + assert(result0.index() == 0); + assert(result2.index() == 1); region.RemoveResultsWhere( [](const jlm::rvsdg::result & result) diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index 511cd832d..18d954a65 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -37,7 +37,8 @@ ResultNodeMismatch() bool outputErrorHandlerCalled = false; try { - result::create(structuralNode2->subregion(0), &argument, structuralOutput, valueType); + // Region mismatch + TestGraphResult::Create(*structuralNode2->subregion(0), argument, structuralOutput); } catch (jlm::util::error & e) { @@ -72,29 +73,9 @@ ResultInputTypeMismatch() try { auto simpleNode = test_op::create(structuralNode->subregion(0), {}, { stateType }); - jlm::rvsdg::result::create( - structuralNode->subregion(0), - simpleNode->output(0), - structuralOutput, - stateType); - // The line below should not be executed as the line above is expected to throw an exception. - assert(false); - } - catch (type_error &) - { - exceptionWasCaught = true; - } - assert(exceptionWasCaught); - exceptionWasCaught = false; - try - { - auto simpleNode = test_op::create(structuralNode->subregion(0), {}, { stateType }); - jlm::rvsdg::result::create( - structuralNode->subregion(0), - simpleNode->output(0), - structuralOutput, - stateType); + // Type mismatch between simple node output and structural output + TestGraphResult::Create(*simpleNode->output(0), structuralOutput); // The line below should not be executed as the line above is expected to throw an exception. assert(false); } diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 65980b5f4..7b655f5ae 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -44,7 +44,7 @@ test_recursive_prune() auto & a1 = TestGraphArgument::Create(*n3->subregion(0), nullptr, t); auto n4 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); auto n5 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); - result::create(n3->subregion(0), n4->output(0), nullptr, t); + TestGraphResult::Create(*n4->output(0), nullptr); auto o1 = structural_output::create(n3, t); auto n6 = jlm::tests::structural_node::create(n3->subregion(0), 1); @@ -149,7 +149,7 @@ Copy() jlm::rvsdg::graph graph; auto & argument = TestGraphArgument::Create(*graph.root(), nullptr, valueType); auto node = test_op::create(graph.root(), { &argument }, { valueType }); - TestGraphResult::Create(*node->output(0)); + TestGraphResult::Create(*node->output(0), nullptr); // Act auto newGraph = graph.copy(); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 8cab5f049..900c2ae7b 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -34,8 +34,8 @@ test_node_copy(void) auto n2 = jlm::tests::test_op::create(n1->subregion(0), { &a1 }, { stype }); auto n3 = jlm::tests::test_op::create(n1->subregion(0), { &a2 }, { vtype }); - result::create(n1->subregion(0), n2->output(0), o1, stype); - result::create(n1->subregion(0), n3->output(0), o2, vtype); + TestGraphResult::Create(*n2->output(0), o1); + TestGraphResult::Create(*n3->output(0), o2); jlm::rvsdg::view(graph.root(), stdout); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 14bd133b8..40052fced 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -376,25 +376,40 @@ class TestGraphArgument final : public jlm::rvsdg::argument class TestGraphResult final : public jlm::rvsdg::result { private: - explicit TestGraphResult(jlm::rvsdg::output & origin) - : jlm::rvsdg::result(origin.region(), &origin, nullptr, origin.Type()) + TestGraphResult( + jlm::rvsdg::region & region, + jlm::rvsdg::output & origin, + jlm::rvsdg::structural_output * output) + : jlm::rvsdg::result(®ion, &origin, output, origin.Type()) + {} + + TestGraphResult(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) + : TestGraphResult(*origin.region(), origin, output) {} public: TestGraphResult & Copy(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) override { - JLM_ASSERT(output == nullptr); - return Create(origin); + return Create(origin, output); } static TestGraphResult & - Create(jlm::rvsdg::output & origin) + Create( + jlm::rvsdg::region & region, + jlm::rvsdg::output & origin, + jlm::rvsdg::structural_output * output) { - auto graphResult = new TestGraphResult(origin); + auto graphResult = new TestGraphResult(region, origin, output); origin.region()->append_result(graphResult); return *graphResult; } + + static TestGraphResult & + Create(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) + { + return Create(*origin.region(), origin, output); + } }; } From ab375bf2a9f89a19f25d0faadd1698b2ac1790d1 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 1 Sep 2024 19:36:56 +0200 Subject: [PATCH 065/170] Add jlm-opt support for RVSDG tree annotations (#603) This PR does the following: 1. Adds support for annotating the number of RVSDG nodes to the RVSDG tree in the RvsdgTreePrinter pass. 2. Adds support for controlling the annotations from jlm-opt 3. Adds a unit test for the printing of the introduced annotation Usage: `jlm-opt --RvsdgTreePrinter -s /tmp --annotations=NumRvsdgNodes input.ll` Example output: ``` RootRegion NumRvsdgNodes:2 -STRUCTURAL_TEST_NODE NumRvsdgNodes:2 --Region[0] NumRvsdgNodes:1 --Region[1] NumRvsdgNodes:1 ``` --------- Co-authored-by: Magnus Sjalander --- jlm/llvm/opt/RvsdgTreePrinter.cpp | 60 ++++++++++++++++- jlm/llvm/opt/RvsdgTreePrinter.hpp | 64 ++++++++++++++++++- jlm/tooling/CommandGraphGenerator.cpp | 2 +- jlm/tooling/CommandLine.cpp | 16 ++++- tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp | 51 ++++++++++++++- tests/jlm/tooling/TestJlmOptCommand.cpp | 2 +- .../tooling/TestJlmOptCommandLineParser.cpp | 2 +- 7 files changed, 185 insertions(+), 12 deletions(-) diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 748999aa7..82245d3c5 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include @@ -48,7 +49,8 @@ RvsdgTreePrinter::run(RvsdgModule & rvsdgModule, util::StatisticsCollector & sta auto statistics = Statistics::Create(rvsdgModule.SourceFileName()); statistics->Start(); - auto tree = rvsdg::region::ToTree(*rvsdgModule.Rvsdg().root()); + auto annotationMap = ComputeAnnotationMap(rvsdgModule.Rvsdg()); + auto tree = rvsdg::region::ToTree(*rvsdgModule.Rvsdg().root(), annotationMap); WriteTreeToFile(rvsdgModule, tree); statistics->Stop(); @@ -61,6 +63,58 @@ RvsdgTreePrinter::run(RvsdgModule & rvsdgModule) run(rvsdgModule, collector); } +util::AnnotationMap +RvsdgTreePrinter::ComputeAnnotationMap(const rvsdg::graph & rvsdg) const +{ + util::AnnotationMap annotationMap; + for (auto annotation : Configuration_.RequiredAnnotations().Items()) + { + switch (annotation) + { + case Configuration::Annotation::NumRvsdgNodes: + AnnotateNumRvsdgNodes(rvsdg, annotationMap); + break; + default: + JLM_UNREACHABLE("Unhandled RVSDG tree annotation."); + } + } + + return annotationMap; +} + +void +RvsdgTreePrinter::AnnotateNumRvsdgNodes( + const rvsdg::graph & rvsdg, + util::AnnotationMap & annotationMap) +{ + static std::string_view label("NumRvsdgNodes"); + + std::function annotateRegion = [&](const rvsdg::region & region) + { + for (auto & node : region.nodes) + { + if (auto structuralNode = dynamic_cast(&node)) + { + size_t numSubregionNodes = 0; + for (size_t n = 0; n < structuralNode->nsubregions(); n++) + { + auto subregion = structuralNode->subregion(n); + numSubregionNodes += annotateRegion(*subregion); + } + + annotationMap.AddAnnotation(structuralNode, { label, numSubregionNodes }); + } + } + + auto numNodes = region.nnodes(); + annotationMap.AddAnnotation(®ion, { label, numNodes }); + + return numNodes; + }; + + annotateRegion(*rvsdg.root()); +} + void RvsdgTreePrinter::WriteTreeToFile(const RvsdgModule & rvsdgModule, const std::string & tree) const { @@ -86,9 +140,9 @@ RvsdgTreePrinter::CreateOutputFile(const RvsdgModule & rvsdgModule) const uint64_t RvsdgTreePrinter::GetOutputFileNameCounter(const RvsdgModule & rvsdgModule) { - static std::unordered_map RvsdgModuleCounterMap_; + static std::unordered_map RvsdgModuleCounterMap_; - return RvsdgModuleCounterMap_[&rvsdgModule]++; + return RvsdgModuleCounterMap_[rvsdgModule.SourceFileName().to_str()]++; } } diff --git a/jlm/llvm/opt/RvsdgTreePrinter.hpp b/jlm/llvm/opt/RvsdgTreePrinter.hpp index 3e8388ec2..bb55b0593 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.hpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.hpp @@ -7,7 +7,14 @@ #define JLM_LLVM_OPT_RVSDGTREEPRINTER_HPP #include +#include #include +#include + +namespace jlm::rvsdg +{ +class graph; +} namespace jlm::util { @@ -34,8 +41,29 @@ class RvsdgTreePrinter final : public optimization class Configuration final { public: - explicit Configuration(const util::filepath & outputDirectory) - : OutputDirectory_(std::move(outputDirectory)) + enum class Annotation + { + /** + * Must always be the first enum value. Used for iteration. + */ + FirstEnumValue, + + /** + * Annotate regions and structural nodes with the number of RVSDG nodes. + */ + NumRvsdgNodes, + + /** + * Must always be the last enum value. Used for iteration. + */ + LastEnumValue + }; + + Configuration( + const util::filepath & outputDirectory, + util::HashSet requiredAnnotations) + : OutputDirectory_(std::move(outputDirectory)), + RequiredAnnotations_(std::move(requiredAnnotations)) { JLM_ASSERT(outputDirectory.IsDirectory()); JLM_ASSERT(outputDirectory.Exists()); @@ -50,8 +78,18 @@ class RvsdgTreePrinter final : public optimization return OutputDirectory_; } + /** + * The required annotations for the RVSDG tree. + */ + [[nodiscard]] const util::HashSet & + RequiredAnnotations() const noexcept + { + return RequiredAnnotations_; + } + private: util::filepath OutputDirectory_; + util::HashSet RequiredAnnotations_ = {}; }; ~RvsdgTreePrinter() noexcept override; @@ -77,6 +115,28 @@ class RvsdgTreePrinter final : public optimization run(RvsdgModule & rvsdgModule); private: + /** + * Computes a map with annotations based on the required \ref jlm::util::Annotation%s in the \ref + * Configuration for the individual regions and structural nodes of the region tree. + * + * @param rvsdg The RVSDG for which to compute the annotations. + * @return An instance of \ref AnnotationMap. + */ + [[nodiscard]] util::AnnotationMap + ComputeAnnotationMap(const rvsdg::graph & rvsdg) const; + + /** + * Adds an annotation to \p annotationMap that indicates the number of RVSDG nodes for regions + * and structural nodes. + * + * @param rvsdg The RVSDG for which to compute the annotation. + * @param annotationMap The annotation map in which the annotation is inserted. + * + * @see NumRvsdgNodes + */ + static void + AnnotateNumRvsdgNodes(const rvsdg::graph & rvsdg, util::AnnotationMap & annotationMap); + void WriteTreeToFile(const RvsdgModule & rvsdgModule, const std::string & tree) const; diff --git a/jlm/tooling/CommandGraphGenerator.cpp b/jlm/tooling/CommandGraphGenerator.cpp index 5196888ea..1771015b9 100644 --- a/jlm/tooling/CommandGraphGenerator.cpp +++ b/jlm/tooling/CommandGraphGenerator.cpp @@ -142,7 +142,7 @@ JlcCommandGraphGenerator::GenerateCommandGraph(const JlcCommandLineOptions & com CreateJlmOptCommandOutputFile(compilation.InputFile()), JlmOptCommandLineOptions::OutputFormat::Llvm, statisticsCollectorSettings, - jlm::llvm::RvsdgTreePrinter::Configuration(tempDirectory), + jlm::llvm::RvsdgTreePrinter::Configuration(tempDirectory, {}), commandLineOptions.JlmOptOptimizations_); auto & jlmOptCommandNode = diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 26a9accd9..97630d762 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -939,6 +939,15 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const "Loop Unrolling")), cl::desc("Perform optimization")); + cl::list rvsdgTreePrinterAnnotations( + "annotations", + cl::values(::clEnumValN( + llvm::RvsdgTreePrinter::Configuration::Annotation::NumRvsdgNodes, + "NumRvsdgNodes", + "Annotate number of RVSDG nodes")), + cl::CommaSeparated, + cl::desc("Comma separated list of RVSDG tree printer annotations")); + cl::ParseCommandLineOptions(argc, argv); jlm::util::filepath statisticsDirectoryFilePath(statisticDirectory); @@ -961,13 +970,18 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const statisticsFilePath, demandedStatistics); + util::HashSet demandedAnnotations( + { rvsdgTreePrinterAnnotations.begin(), rvsdgTreePrinterAnnotations.end() }); + CommandLineOptions_ = JlmOptCommandLineOptions::Create( std::move(inputFilePath), inputFormat, outputFile, outputFormat, std::move(statisticsCollectorSettings), - llvm::RvsdgTreePrinter::Configuration(statisticsDirectoryFilePath), + llvm::RvsdgTreePrinter::Configuration( + statisticsDirectoryFilePath, + std::move(demandedAnnotations)), std::move(optimizationIds)); return *CommandLineOptions_; diff --git a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp index 612ad58e3..5d7704183 100644 --- a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp +++ b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include @@ -16,7 +17,6 @@ static int PrintRvsdgTree() { using namespace jlm::llvm; - using namespace jlm::tests; using namespace jlm::util; // Arrange @@ -31,10 +31,10 @@ PrintRvsdgTree() "f", linkage::external_linkage); auto lambdaOutput = lambda->finalize({ lambda->fctargument(0) }); - GraphExport::Create(*lambdaOutput, "f"); + jlm::tests::GraphExport::Create(*lambdaOutput, "f"); auto tempDirectory = std::filesystem::temp_directory_path(); - RvsdgTreePrinter::Configuration configuration({ tempDirectory }); + RvsdgTreePrinter::Configuration configuration({ tempDirectory }, {}); RvsdgTreePrinter printer(configuration); // Act @@ -53,3 +53,48 @@ PrintRvsdgTree() } JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/RvsdgTreePrinterTests-PrintRvsdgTree", PrintRvsdgTree) + +static int +PrintNumRvsdgNodesAnnotation() +{ + using namespace jlm::llvm; + using namespace jlm::util; + + // Arrange + std::string fileName = "PrintNumRvsdgNodesAnnotationTest"; + auto rvsdgModule = RvsdgModule::Create({ fileName }, "", ""); + auto rootRegion = rvsdgModule->Rvsdg().root(); + + auto structuralNode = jlm::tests::structural_node::create(rootRegion, 2); + jlm::tests::test_op::create(structuralNode->subregion(0), {}, {}); + jlm::tests::test_op::create(structuralNode->subregion(1), {}, {}); + + jlm::tests::test_op::create(rootRegion, {}, {}); + + auto tempDirectory = std::filesystem::temp_directory_path(); + RvsdgTreePrinter::Configuration configuration( + { tempDirectory }, + { RvsdgTreePrinter::Configuration::Annotation::NumRvsdgNodes }); + RvsdgTreePrinter printer(configuration); + + // Act + printer.run(*rvsdgModule); + + // Assert + auto outputFilePath = tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"; + + std::ifstream file(outputFilePath); + std::stringstream buffer; + buffer << file.rdbuf(); + + assert( + buffer.str() + == "RootRegion NumRvsdgNodes:2\n-STRUCTURAL_TEST_NODE NumRvsdgNodes:2\n--Region[0] " + "NumRvsdgNodes:1\n--Region[1] NumRvsdgNodes:1\n\n"); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/RvsdgTreePrinterTests-PrintNumRvsdgNodesAnnotation", + PrintNumRvsdgNodesAnnotation) diff --git a/tests/jlm/tooling/TestJlmOptCommand.cpp b/tests/jlm/tooling/TestJlmOptCommand.cpp index 3eecc3da8..ccc42a7e2 100644 --- a/tests/jlm/tooling/TestJlmOptCommand.cpp +++ b/tests/jlm/tooling/TestJlmOptCommand.cpp @@ -32,7 +32,7 @@ TestStatistics() jlm::util::filepath("outputFile.ll"), JlmOptCommandLineOptions::OutputFormat::Llvm, statisticsCollectorSettings, - RvsdgTreePrinter::Configuration(filepath(std::filesystem::temp_directory_path())), + RvsdgTreePrinter::Configuration({ std::filesystem::temp_directory_path() }, {}), { JlmOptCommandLineOptions::OptimizationId::DeadNodeElimination, JlmOptCommandLineOptions::OptimizationId::LoopUnrolling }); diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index db54d1948..27791fd54 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -75,7 +75,7 @@ TestOptimizationIdToOptimizationTranslation() filepath(""), JlmOptCommandLineOptions::OutputFormat::Llvm, StatisticsCollectorSettings(), - jlm::llvm::RvsdgTreePrinter::Configuration(filepath(std::filesystem::temp_directory_path())), + jlm::llvm::RvsdgTreePrinter::Configuration({ std::filesystem::temp_directory_path() }, {}), std::vector()); // Act & Assert From 89483eb27729168a217d111b95d534a07430dafb Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Tue, 3 Sep 2024 07:59:32 +0200 Subject: [PATCH 066/170] Skip predecessor basic blocks that are unreachable when converting LLVM phi instructions (#598) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As detailed in #597, the `::llvm::ReversePostOrderTraversal` does not visit unreachable LLVM basic blocks. As these BBs are never reached at runtime, and do not exist in our tac, this PR makes phi nodes ignore all operands from these basic blocks. If a phi node is left with only none operand, it is not converted to a `phi_op`, but instead uses the value directly. Fixes #597, (the minimal example given there now compiles), but there are still issues with the SPEC benchmark `577.xz`. --------- Co-authored-by: Magnus Sjalander Co-authored-by: Håvard Krogstie --- jlm/llvm/Makefile.sub | 1 + .../frontend/LlvmInstructionConversion.cpp | 45 ++- jlm/llvm/frontend/LlvmModuleConversion.cpp | 26 +- .../frontend/llvm/LlvmPhiConversionTests.cpp | 272 ++++++++++++++++++ tests/jlm/llvm/frontend/llvm/test-select.cpp | 2 +- 5 files changed, 334 insertions(+), 12 deletions(-) create mode 100644 tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 6bea7a6b9..92d9be7ec 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -150,6 +150,7 @@ libllvm_TESTS += \ tests/jlm/llvm/frontend/llvm/test-export \ tests/jlm/llvm/frontend/llvm/TestFNeg \ tests/jlm/llvm/frontend/llvm/test-function-call \ + tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests \ tests/jlm/llvm/frontend/llvm/test-recursive-data \ tests/jlm/llvm/frontend/llvm/test-restructuring \ tests/jlm/llvm/frontend/llvm/test-select \ diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index e1f390d08..5614e5c19 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -677,17 +677,56 @@ convert_store_instruction(::llvm::Instruction * i, tacsvector_t & tacs, context return nullptr; } -static inline const variable * +/** + * Given an LLVM phi instruction, checks if the instruction has only one predecessor basic block + * that is reachable (i.e., there exists a path from the entry point to the predecessor). + * + * @param phi the phi instruction + * @param ctx the context for the current LLVM to tac conversion + * @return the index of the single reachable predecessor basic block, or std::nullopt if it has many + */ +static std::optional +getSinglePredecessor(::llvm::PHINode * phi, context & ctx) +{ + std::optional predecessor = std::nullopt; + for (size_t n = 0; n < phi->getNumOperands(); n++) + { + if (!ctx.has(phi->getIncomingBlock(n))) + continue; // This predecessor was unreachable + if (predecessor.has_value()) + return std::nullopt; // This is the second reachable predecessor. Abort! + predecessor = n; + } + // Any visited phi should have at least one predecessor + JLM_ASSERT(predecessor); + return predecessor; +} + +static const variable * convert_phi_instruction(::llvm::Instruction * i, tacsvector_t & tacs, context & ctx) { - JLM_ASSERT(i->getOpcode() == ::llvm::Instruction::PHI); + auto phi = ::llvm::dyn_cast<::llvm::PHINode>(i); + // If this phi instruction only has one predecessor basic block that is reachable, + // the phi operation can be removed. + if (auto singlePredecessor = getSinglePredecessor(phi, ctx)) + { + // The incoming value is either a constant, + // or a value from the predecessor basic block that has already been converted + return ConvertValue(phi->getIncomingValue(*singlePredecessor), tacs, ctx); + } + + // This phi instruction can be reached from multiple basic blocks. + // As some of these blocks might not be converted yet, some of the phi's operands may reference + // instructions that have not yet been converted. + // For now, a phi_op with no operands is created. + // Once all basic blocks have been converted, all phi_ops get visited again and given operands. auto type = ConvertType(i->getType(), ctx); tacs.push_back(phi_op::create({}, type)); return tacs.back()->result(0); } -static inline const variable * +static const variable * convert_getelementptr_instruction(::llvm::Instruction * inst, tacsvector_t & tacs, context & ctx) { JLM_ASSERT(::llvm::dyn_cast(inst)); diff --git a/jlm/llvm/frontend/LlvmModuleConversion.cpp b/jlm/llvm/frontend/LlvmModuleConversion.cpp index d9a693ba8..312b74c3a 100644 --- a/jlm/llvm/frontend/LlvmModuleConversion.cpp +++ b/jlm/llvm/frontend/LlvmModuleConversion.cpp @@ -31,15 +31,16 @@ convert_instructions(::llvm::Function & function, context & ctx) if (auto result = ConvertInstruction(&instruction, tacs, ctx)) ctx.insert_value(&instruction, result); - if (auto phi = ::llvm::dyn_cast<::llvm::PHINode>(&instruction)) + // When an LLVM PhiNode is converted to a jlm phi_op, some of its operands may not be ready. + // The created phi_op therefore has no operands, but is instead added to a list. + // Once all basic blocks have been converted, all phi_ops are revisited and given operands. + if (!tacs.empty() && is(tacs.back()->operation())) { + auto phi = ::llvm::dyn_cast<::llvm::PHINode>(&instruction); phis.push_back(phi); - ctx.get(bb)->append_first(tacs); - } - else - { - ctx.get(bb)->append_last(tacs); } + + ctx.get(bb)->append_last(tacs); } } @@ -55,14 +56,23 @@ patch_phi_operands(const std::vector<::llvm::PHINode *> & phis, context & ctx) std::vector operands; for (size_t n = 0; n < phi->getNumOperands(); n++) { - tacsvector_t tacs; + // In LLVM, phi instructions may have incoming basic blocks that are unreachable. + // These are not visited during convert_basic_blocks, and thus do not have corresponding + // jlm::llvm::basic_blocks. The phi_op can safely ignore these, as they are dead. + if (!ctx.has(phi->getIncomingBlock(n))) + continue; + auto bb = ctx.get(phi->getIncomingBlock(n)); + tacsvector_t tacs; operands.push_back(ConvertValue(phi->getIncomingValue(n), tacs, ctx)); bb->insert_before_branch(tacs); nodes.push_back(bb); } - auto phi_tac = static_cast(ctx.lookup_value(phi))->tac(); + // Phi instructions with a single reachable predecessor should have already been elided + JLM_ASSERT(operands.size() >= 2); + + auto phi_tac = util::AssertedCast(ctx.lookup_value(phi))->tac(); phi_tac->replace(phi_op(nodes, phi_tac->result(0)->Type()), operands); } } diff --git a/tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests.cpp new file mode 100644 index 000000000..36c4ac2a5 --- /dev/null +++ b/tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests.cpp @@ -0,0 +1,272 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include + +/** + * Tests converting instances of ::llvm::PHINode. + * Some of the operands have constant values, and some are results from the predecessors. + * One of the phi node has its own result as one of its operands. + * + * The function corresponds to the C code + * uint64_t popcount(uint64_t x) { + * uint64_t popcnt = 0; + * + * while(true) { + * // 2 phi nodes with 3 possible predecessors here + * // one for x, and one for popcnt + * + * if (x == 0) + * break; + * uint64_t rem = x % 2 == 1; + * x >>= 1; + * if (rem) { + * popcnt++; + * continue; + * } + * else { + * continue; + * } + * } + * return popcnt; + * } + */ +static int +TestPhiConversion() +{ + // Arrange + llvm::LLVMContext ctx; + llvm::Module module("popcount.c", ctx); + + // Build LLVM module + { + llvm::IRBuilder builder(ctx); + + auto i64 = builder.getInt64Ty(); + auto prototype = llvm::FunctionType::get(i64, { i64 }, false); + llvm::Function * function = + llvm::Function::Create(prototype, llvm::Function::ExternalLinkage, "popcount", module); + + auto bb1 = llvm::BasicBlock::Create(ctx, "bb1", function); + auto bb2 = llvm::BasicBlock::Create(ctx, "bb2", function); + auto bb3 = llvm::BasicBlock::Create(ctx, "bb3", function); + auto bb4 = llvm::BasicBlock::Create(ctx, "bb4", function); + auto bb5 = llvm::BasicBlock::Create(ctx, "bb5", function); + auto bb6 = llvm::BasicBlock::Create(ctx, "bb6", function); + + builder.SetInsertPoint(bb1); // Entry block + builder.CreateBr(bb2); + + builder.SetInsertPoint(bb2); // Predecessors: bb1, bb4, bb5 + auto phiX = builder.CreatePHI(i64, 3); + auto phiPopcount = builder.CreatePHI(i64, 3); + + auto xIs0 = builder.CreateICmpEQ(phiX, llvm::ConstantInt::get(i64, 0, false)); + builder.CreateCondBr(xIs0, bb6, bb3); + + builder.SetInsertPoint(bb3); // Predecessors: bb2 + auto rem = builder.CreateURem(phiX, llvm::ConstantInt::get(i64, 2, false)); + auto remEq1 = builder.CreateICmpEQ(rem, llvm::ConstantInt::get(i64, 1, false)); + auto halfX = builder.CreateLShr(phiX, llvm::ConstantInt::get(i64, 1, false)); + builder.CreateCondBr(remEq1, bb4, bb5); + + builder.SetInsertPoint(bb4); // Predecessor: bb3 + auto popcountPlus1 = builder.CreateAdd(phiPopcount, llvm::ConstantInt::get(i64, 1, false)); + builder.CreateBr(bb2); + + builder.SetInsertPoint(bb5); // Predecessor: bb3 + builder.CreateBr(bb2); + + builder.SetInsertPoint(bb6); // Predecessor: bb2 + builder.CreateRet(phiPopcount); + + // Finally give the phi nodes their operands + phiX->addIncoming(function->getArg(0), bb1); + phiX->addIncoming(halfX, bb4); + phiX->addIncoming(halfX, bb5); + + phiPopcount->addIncoming(llvm::ConstantInt::get(i64, 0, false), bb1); + phiPopcount->addIncoming(popcountPlus1, bb4); + phiPopcount->addIncoming(phiPopcount, bb5); + } + + // jlm::tests::print(module); + + // Act + auto ipgmod = jlm::llvm::ConvertLlvmModule(module); + + // print(*ipgmod, stdout); + + // Assert + // First traverse from the function's entry node to bb2 + auto popcount = + jlm::util::AssertedCast(ipgmod->ipgraph().find("popcount")); + auto entry_node = popcount->cfg()->entry(); + assert(entry_node->single_successor()); + auto bb1_node = entry_node->outedge(0)->sink(); + assert(bb1_node->single_successor()); + auto bb2_node = bb1_node->outedge(0)->sink(); + auto bb2 = jlm::util::AssertedCast(bb2_node); + + // The first two tac instructions should be the phi representing x and popcnt respectively + auto tacs = bb2->begin(); + auto & phiX = *tacs; + auto & phiPopcnt = *std::next(tacs); + + // Check that they are both phi operations + auto phiXOp = *jlm::util::AssertedCast(&phiX->operation()); + auto phiPopcntOp = *jlm::util::AssertedCast(&phiPopcnt->operation()); + + // Both phi nodes should have 3 operands, representing the loop entry, and the two "continue"s + assert(phiX->noperands() == 3); + // The phi node for x takes its value from the function arg in the first operand + assert(phiX->operand(0) == popcount->cfg()->entry()->argument(0)); + // The last two predecessor basic blocks both use the same value for x + assert(phiX->operand(1) == phiX->operand(2)); + + assert(phiPopcnt->noperands() == 3); + // The first operand of the phi node is the constant integer 0 + auto constant0variable = + jlm::util::AssertedCast(phiPopcnt->operand(0)); + auto constant0op = jlm::util::AssertedCast( + &constant0variable->tac()->operation()); + assert(constant0op->value() == 0); + // The last operand of the popcnt phi is the result of the phi itself + assert(phiPopcnt->operand(2) == phiPopcnt->result(0)); + + return 0; +} +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/frontend/TestLlvmPhiConversion-TestPhiConversion", + TestPhiConversion) + +/** + * Tests converting instances of ::llvm::PHINode where some of the predecessors are "dead". + * A dead predecessor is a basic block that is not reachable from the function's entry. + * This test has one phi node with 4 operands, where two of them are dead, + * and one with 2 operands, where one of them is dead. + * The first should be converted to a jlm::llvm::phi_op with two operands, + * while the second should become a direct reference to the value from the only alive predecessor. + * Due to straightening, this last basic block is also merged into its predecessor. + */ +static int +TestPhiOperandElision() +{ + // Arrange + llvm::LLVMContext ctx; + llvm::Module module("phi-elide.c", ctx); + + // Build LLVM module + { + llvm::IRBuilder builder(ctx); + + auto i64 = builder.getInt64Ty(); + auto prototype = llvm::FunctionType::get(i64, { i64 }, false); + llvm::Function * function = + llvm::Function::Create(prototype, llvm::Function::ExternalLinkage, "phi_elide", module); + + auto bb1 = llvm::BasicBlock::Create(ctx, "bb1", function); + auto bb2 = llvm::BasicBlock::Create(ctx, "bb2", function); + auto bb3 = llvm::BasicBlock::Create(ctx, "bb3", function); + auto bb4 = llvm::BasicBlock::Create(ctx, "bb4", function); + auto bb5 = llvm::BasicBlock::Create(ctx, "bb5", function); + auto bb6 = llvm::BasicBlock::Create(ctx, "bb6", function); + auto bb7 = llvm::BasicBlock::Create(ctx, "bb7", function); + + builder.SetInsertPoint(bb1); // entry block + auto xIs0 = builder.CreateICmpEQ(function->getArg(0), llvm::ConstantInt::get(i64, 0)); + builder.CreateCondBr(xIs0, bb4, bb5); + + builder.SetInsertPoint(bb2); // No predecessors (dead) + auto xPlus1 = builder.CreateAdd(function->getArg(0), llvm::ConstantInt::get(i64, 1)); + auto xIs1 = builder.CreateICmpEQ(function->getArg(0), llvm::ConstantInt::get(i64, 1)); + builder.CreateCondBr(xIs1, bb3, bb5); + + builder.SetInsertPoint(bb3); // Predecessors: bb2 (dead) + builder.CreateBr(bb5); + + builder.SetInsertPoint(bb4); // Predecessors: bb1 + auto xPlus2 = builder.CreateAdd(function->getArg(0), llvm::ConstantInt::get(i64, 2)); + builder.CreateBr(bb5); + + builder.SetInsertPoint(bb5); // Predecessors: bb1, bb2 (dead), bb3 (dead), bb4 + auto bb5phi = builder.CreatePHI(i64, 4); + builder.CreateBr(bb7); + + builder.SetInsertPoint(bb6); // No predecessors + builder.CreateBr(bb7); + + builder.SetInsertPoint(bb7); // Predecessors: bb5, bb6 (dead) + auto bb7phi = builder.CreatePHI(i64, 2); + auto mul = builder.CreateMul(bb7phi, llvm::ConstantInt::get(i64, 10)); + builder.CreateRet(mul); + + bb5phi->addIncoming(llvm::ConstantInt::get(i64, 0), bb1); + bb5phi->addIncoming(xPlus1, bb2); // Dead + bb5phi->addIncoming(function->getArg(0), bb3); // Dead + bb5phi->addIncoming(xPlus2, bb4); + + bb7phi->addIncoming(bb5phi, bb5); + bb7phi->addIncoming(llvm::PoisonValue::get(i64), bb6); // Dead + } + + jlm::tests::print(module); + + // Act + auto ipgmod = jlm::llvm::ConvertLlvmModule(module); + + print(*ipgmod, stdout); + + // Assert + // Get the CFG of the function + auto phi_elide = + jlm::util::AssertedCast(ipgmod->ipgraph().find("phi_elide")); + + // Traverse the cfg and save every phi node + size_t numBasicBlocks = 0; + std::vector phiTacs; + for (auto & bb : *phi_elide->cfg()) + { + numBasicBlocks++; + for (auto tac : bb) + { + if (jlm::rvsdg::is(tac->operation())) + phiTacs.push_back(tac); + } + } + + // There should be 3 basic blocks left (bb1, bb5, bb7) + assert(numBasicBlocks == 3); + // There should be exactly one phi tac + assert(phiTacs.size() == 1); + auto phiTac = phiTacs[0]; + // The phi should have two operands + assert(phiTac->noperands() == 2); + // The first phi operand should be a constant 0 + auto constant0variable = + jlm::util::AssertedCast(phiTac->operand(0)); + auto constant0op = jlm::util::AssertedCast( + &constant0variable->tac()->operation()); + assert(constant0op->value() == 0); + + return 0; +} +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/frontend/TestLlvmPhiConversion-TestPhiOperandElision", + TestPhiOperandElision) diff --git a/tests/jlm/llvm/frontend/llvm/test-select.cpp b/tests/jlm/llvm/frontend/llvm/test-select.cpp index 43385b9fd..64ff07bf8 100644 --- a/tests/jlm/llvm/frontend/llvm/test-select.cpp +++ b/tests/jlm/llvm/frontend/llvm/test-select.cpp @@ -22,7 +22,7 @@ contains(const jlm::llvm::ipgraph_module & module, const std::string & fctname) using namespace jlm::llvm; bool has_select = false; - auto cfg = dynamic_cast(module.ipgraph().find("f"))->cfg(); + auto cfg = dynamic_cast(module.ipgraph().find(fctname))->cfg(); auto bb = dynamic_cast(cfg->entry()->outedge(0)->sink()); for (auto tac : *bb) has_select = has_select || is(tac); From 54537eabe4404ffbf567080ff86a29d75608a3df Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 4 Sep 2024 07:29:42 +0200 Subject: [PATCH 067/170] Ensure jlm-opt creates only single instances of optimizations (#605) PR #593 made optimizations dynamically configurable, but also screwed up the invariant that an optimization was only created once. This PR restores this invariant and moves the optimization creation from the JlmOptCommandLineOptions class to JlmOptCommand class. --------- Co-authored-by: HKrogstie --- jlm/llvm/opt/OptimizationSequence.hpp | 6 +- jlm/tooling/Command.cpp | 92 ++++++++++++++++++- jlm/tooling/Command.hpp | 15 ++- jlm/tooling/CommandLine.cpp | 57 ------------ jlm/tooling/CommandLine.hpp | 10 +- tests/jlm/tooling/TestJlmOptCommand.cpp | 38 ++++++++ .../tooling/TestJlmOptCommandLineParser.cpp | 30 ------ 7 files changed, 144 insertions(+), 104 deletions(-) diff --git a/jlm/llvm/opt/OptimizationSequence.hpp b/jlm/llvm/opt/OptimizationSequence.hpp index 4dc957d50..428ee1629 100644 --- a/jlm/llvm/opt/OptimizationSequence.hpp +++ b/jlm/llvm/opt/OptimizationSequence.hpp @@ -21,7 +21,7 @@ class OptimizationSequence final : public optimization ~OptimizationSequence() noexcept override; - explicit OptimizationSequence(std::vector> optimizations) + explicit OptimizationSequence(std::vector optimizations) : Optimizations_(std::move(optimizations)) {} @@ -32,14 +32,14 @@ class OptimizationSequence final : public optimization CreateAndRun( RvsdgModule & rvsdgModule, util::StatisticsCollector & statisticsCollector, - std::vector> optimizations) + std::vector optimizations) { OptimizationSequence sequentialApplication(std::move(optimizations)); sequentialApplication.run(rvsdgModule, statisticsCollector); } private: - std::vector> Optimizations_; + std::vector Optimizations_; }; } diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index 5d3e918aa..664a628a7 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -9,7 +9,22 @@ #include #include #include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include #include +#include +#include +#include +#include +#include #include #include #include @@ -270,6 +285,19 @@ LlcCommand::ToString(const RelocationModel & relocationModel) JlmOptCommand::~JlmOptCommand() = default; +JlmOptCommand::JlmOptCommand( + std::string programName, + const jlm::tooling::JlmOptCommandLineOptions & commandLineOptions) + : ProgramName_(std::move(programName)), + CommandLineOptions_(std::move(commandLineOptions)) +{ + for (auto optimizationId : CommandLineOptions_.GetOptimizationIds()) + { + if (auto it = Optimizations_.find(optimizationId); it == Optimizations_.end()) + Optimizations_[optimizationId] = CreateOptimization(optimizationId); + } +} + std::string JlmOptCommand::ToString() const { @@ -320,10 +348,7 @@ JlmOptCommand::Run() const CommandLineOptions_.GetInputFormat(), statisticsCollector); - llvm::OptimizationSequence::CreateAndRun( - *rvsdgModule, - statisticsCollector, - CommandLineOptions_.GetOptimizations()); + llvm::OptimizationSequence::CreateAndRun(*rvsdgModule, statisticsCollector, GetOptimizations()); PrintRvsdgModule( *rvsdgModule, @@ -334,6 +359,65 @@ JlmOptCommand::Run() const statisticsCollector.PrintStatistics(); } +std::vector +JlmOptCommand::GetOptimizations() const +{ + std::vector optimizations; + for (auto optimizationId : CommandLineOptions_.GetOptimizationIds()) + { + auto it = Optimizations_.find(optimizationId); + JLM_ASSERT(it != Optimizations_.end()); + optimizations.emplace_back(it->second.get()); + } + + return optimizations; +} + +std::unique_ptr +JlmOptCommand::CreateOptimization( + enum JlmOptCommandLineOptions::OptimizationId optimizationId) const +{ + using Andersen = llvm::aa::Andersen; + using Steensgaard = llvm::aa::Steensgaard; + using AgnosticMnp = llvm::aa::AgnosticMemoryNodeProvider; + using RegionAwareMnp = llvm::aa::RegionAwareMemoryNodeProvider; + + switch (optimizationId) + { + case JlmOptCommandLineOptions::OptimizationId::AAAndersenAgnostic: + return std::make_unique>(); + case JlmOptCommandLineOptions::OptimizationId::AAAndersenRegionAware: + return std::make_unique>(); + case JlmOptCommandLineOptions::OptimizationId::AASteensgaardAgnostic: + return std::make_unique>(); + case JlmOptCommandLineOptions::OptimizationId::AASteensgaardRegionAware: + return std::make_unique>(); + case JlmOptCommandLineOptions::OptimizationId::CommonNodeElimination: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::DeadNodeElimination: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::FunctionInlining: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::InvariantValueRedirection: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::LoopUnrolling: + return std::make_unique(4); + case JlmOptCommandLineOptions::OptimizationId::NodePullIn: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::NodePushOut: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::NodeReduction: + return std::make_unique(); + case JlmOptCommandLineOptions::OptimizationId::RvsdgTreePrinter: + return std::make_unique( + CommandLineOptions_.GetRvsdgTreePrinterConfiguration()); + case JlmOptCommandLineOptions::OptimizationId::ThetaGammaInversion: + return std::make_unique(); + default: + JLM_UNREACHABLE("Unhandled optimization id."); + } +} + std::unique_ptr JlmOptCommand::ParseLlvmIrFile( const util::filepath & llvmIrFile, diff --git a/jlm/tooling/Command.hpp b/jlm/tooling/Command.hpp index 78f3f6ee1..720fa4fba 100644 --- a/jlm/tooling/Command.hpp +++ b/jlm/tooling/Command.hpp @@ -341,10 +341,7 @@ class JlmOptCommand final : public Command public: ~JlmOptCommand() override; - JlmOptCommand(std::string programName, JlmOptCommandLineOptions commandLineOptions) - : ProgramName_(std::move(programName)), - CommandLineOptions_(std::move(commandLineOptions)) - {} + JlmOptCommand(std::string programName, const JlmOptCommandLineOptions & commandLineOptions); [[nodiscard]] std::string ToString() const override; @@ -356,7 +353,7 @@ class JlmOptCommand final : public Command Create( CommandGraph & commandGraph, std::string programName, - JlmOptCommandLineOptions commandLineOptions) + const JlmOptCommandLineOptions & commandLineOptions) { auto command = std::make_unique(std::move(programName), std::move(commandLineOptions)); @@ -421,8 +418,16 @@ class JlmOptCommand final : public Command const util::filepath & outputFile, util::StatisticsCollector & statisticsCollector); + [[nodiscard]] std::vector + GetOptimizations() const; + + [[nodiscard]] std::unique_ptr + CreateOptimization(enum JlmOptCommandLineOptions::OptimizationId optimizationId) const; + std::string ProgramName_; JlmOptCommandLineOptions CommandLineOptions_; + std::unordered_map> + Optimizations_ = {}; }; /** diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 97630d762..46185e868 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -99,20 +99,6 @@ JlmOptCommandLineOptions::Reset() noexcept OptimizationIds_.clear(); } -std::vector> -JlmOptCommandLineOptions::GetOptimizations() const noexcept -{ - std::vector> optimizations; - optimizations.reserve(OptimizationIds_.size()); - - for (auto & optimizationId : OptimizationIds_) - { - optimizations.emplace_back(GetOptimization(optimizationId)); - } - - return optimizations; -} - JlmOptCommandLineOptions::OptimizationId JlmOptCommandLineOptions::FromCommandLineArgumentToOptimizationId( const std::string & commandLineArgument) @@ -226,49 +212,6 @@ JlmOptCommandLineOptions::ToCommandLineArgument(OutputFormat outputFormat) return mapping.at(outputFormat).data(); } -std::unique_ptr -JlmOptCommandLineOptions::GetOptimization(enum OptimizationId optimizationId) const -{ - using Andersen = llvm::aa::Andersen; - using Steensgaard = llvm::aa::Steensgaard; - using AgnosticMnp = llvm::aa::AgnosticMemoryNodeProvider; - using RegionAwareMnp = llvm::aa::RegionAwareMemoryNodeProvider; - - switch (optimizationId) - { - case OptimizationId::AAAndersenAgnostic: - return std::make_unique>(); - case OptimizationId::AAAndersenRegionAware: - return std::make_unique>(); - case OptimizationId::AASteensgaardAgnostic: - return std::make_unique>(); - case OptimizationId::AASteensgaardRegionAware: - return std::make_unique>(); - case OptimizationId::CommonNodeElimination: - return std::make_unique(); - case OptimizationId::DeadNodeElimination: - return std::make_unique(); - case OptimizationId::FunctionInlining: - return std::make_unique(); - case OptimizationId::InvariantValueRedirection: - return std::make_unique(); - case OptimizationId::LoopUnrolling: - return std::make_unique(4); - case OptimizationId::NodePullIn: - return std::make_unique(); - case OptimizationId::NodePushOut: - return std::make_unique(); - case OptimizationId::NodeReduction: - return std::make_unique(); - case OptimizationId::RvsdgTreePrinter: - return std::make_unique(RvsdgTreePrinterConfiguration_); - case OptimizationId::ThetaGammaInversion: - return std::make_unique(); - default: - JLM_UNREACHABLE("Unhandled optimization id."); - } -} - const util::BijectiveMap & JlmOptCommandLineOptions::GetStatisticsIdCommandLineArguments() { diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index 5e37c470e..2c4292026 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -139,8 +139,11 @@ class JlmOptCommandLineOptions final : public CommandLineOptions return OptimizationIds_; } - [[nodiscard]] std::vector> - GetOptimizations() const noexcept; + [[nodiscard]] const llvm::RvsdgTreePrinter::Configuration & + GetRvsdgTreePrinterConfiguration() const noexcept + { + return RvsdgTreePrinterConfiguration_; + } static OptimizationId FromCommandLineArgumentToOptimizationId(const std::string & commandLineArgument); @@ -160,9 +163,6 @@ class JlmOptCommandLineOptions final : public CommandLineOptions static const char * ToCommandLineArgument(OutputFormat outputFormat); - [[nodiscard]] std::unique_ptr - GetOptimization(enum OptimizationId optimizationId) const; - static std::unique_ptr Create( util::filepath inputFile, diff --git a/tests/jlm/tooling/TestJlmOptCommand.cpp b/tests/jlm/tooling/TestJlmOptCommand.cpp index ccc42a7e2..79e6c8cd2 100644 --- a/tests/jlm/tooling/TestJlmOptCommand.cpp +++ b/tests/jlm/tooling/TestJlmOptCommand.cpp @@ -64,6 +64,44 @@ TestJlmOptCommand() JLM_UNIT_TEST_REGISTER("jlm/tooling/TestJlmOptCommand", TestJlmOptCommand) +static int +OptimizationIdToOptimizationTranslation() +{ + using namespace jlm::llvm; + using namespace jlm::tooling; + using namespace jlm::util; + + // Arrange + std::vector optimizationIds; + for (size_t n = + static_cast(JlmOptCommandLineOptions::OptimizationId::FirstEnumValue) + 1; + n != static_cast(JlmOptCommandLineOptions::OptimizationId::LastEnumValue); + n++) + { + auto optimizationId = static_cast(n); + optimizationIds.emplace_back(optimizationId); + } + + JlmOptCommandLineOptions options( + filepath(""), + JlmOptCommandLineOptions::InputFormat::Llvm, + filepath(""), + JlmOptCommandLineOptions::OutputFormat::Llvm, + StatisticsCollectorSettings(), + RvsdgTreePrinter::Configuration(filepath(std::filesystem::temp_directory_path()), {}), + optimizationIds); + + // Act & Assert + // terminates on unhandled optimization id + JlmOptCommand command("jlm-opt", options); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/tooling/TestJlmOptCommand-OptimizationIdToOptimizationTranslation", + OptimizationIdToOptimizationTranslation) + static int PrintRvsdgTreeToFile() { diff --git a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp index 27791fd54..bfcfe7330 100644 --- a/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp +++ b/tests/jlm/tooling/TestJlmOptCommandLineParser.cpp @@ -62,35 +62,6 @@ TestStatisticsCommandLineArgumentConversion() } } -static void -TestOptimizationIdToOptimizationTranslation() -{ - using namespace jlm::tooling; - using namespace jlm::util; - - // Arrange - JlmOptCommandLineOptions options( - filepath(""), - JlmOptCommandLineOptions::InputFormat::Llvm, - filepath(""), - JlmOptCommandLineOptions::OutputFormat::Llvm, - StatisticsCollectorSettings(), - jlm::llvm::RvsdgTreePrinter::Configuration({ std::filesystem::temp_directory_path() }, {}), - std::vector()); - - // Act & Assert - for (size_t n = - static_cast(JlmOptCommandLineOptions::OptimizationId::FirstEnumValue) + 1; - n != static_cast(JlmOptCommandLineOptions::OptimizationId::LastEnumValue); - n++) - { - auto optimizationId = static_cast(n); - - // terminates on unhandled optimization id - static_cast(options.GetOptimization(optimizationId)); - } -} - static int TestOutputFormatToCommandLineArgument() { @@ -120,7 +91,6 @@ Test() { TestOptimizationCommandLineArgumentConversion(); TestStatisticsCommandLineArgumentConversion(); - TestOptimizationIdToOptimizationTranslation(); return 0; } From 490023883c88459c3d79744fe0a9ed6ab1eabacf Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 5 Sep 2024 08:24:47 +0200 Subject: [PATCH 068/170] Rename argument class to RegionArgument (#607) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 12 +++---- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.hpp | 8 ++--- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.hpp | 2 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 6 ++-- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 6 ++-- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 6 ++-- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 5 +-- .../rvsdg2rhls/remove-unused-state.cpp | 6 ++-- .../rvsdg2rhls/remove-unused-state.hpp | 4 +-- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 2 +- jlm/hls/ir/hls.hpp | 8 ++--- jlm/hls/opt/cne.cpp | 12 +++---- jlm/hls/util/view.cpp | 4 +-- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 4 +-- jlm/llvm/ir/operators/Phi.hpp | 14 ++++---- jlm/llvm/ir/operators/call.cpp | 6 ++-- jlm/llvm/ir/operators/call.hpp | 10 +++--- jlm/llvm/ir/operators/delta.hpp | 6 ++-- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/ir/operators/lambda.hpp | 12 +++---- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +-- jlm/llvm/opt/alias-analyses/PointsToGraph.hpp | 4 +-- .../RegionAwareMemoryNodeProvider.cpp | 11 +++--- jlm/llvm/opt/cne.cpp | 8 ++--- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 4 +-- jlm/llvm/opt/push.cpp | 32 ++++++++--------- jlm/llvm/opt/unroll.cpp | 10 +++--- jlm/llvm/opt/unroll.hpp | 18 +++++----- jlm/mlir/backend/JlmToMlirConverter.cpp | 4 +-- jlm/rvsdg/gamma.cpp | 8 ++--- jlm/rvsdg/gamma.hpp | 6 ++-- jlm/rvsdg/graph.cpp | 2 +- jlm/rvsdg/graph.hpp | 2 +- jlm/rvsdg/node.cpp | 4 +-- jlm/rvsdg/region.cpp | 8 ++--- jlm/rvsdg/region.hpp | 36 +++++++++---------- jlm/rvsdg/structural-node.hpp | 5 ++- jlm/rvsdg/theta.hpp | 12 +++---- jlm/rvsdg/view.cpp | 2 +- tests/TestRvsdgs.cpp | 8 ++--- tests/TestRvsdgs.hpp | 26 +++++++------- tests/jlm/llvm/ir/operators/TestCall.cpp | 6 ++-- tests/jlm/llvm/ir/operators/TestLambda.cpp | 4 +-- tests/jlm/llvm/ir/operators/TestPhi.cpp | 10 +++--- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 8 +++-- tests/jlm/llvm/opt/test-cne.cpp | 6 ++-- .../mlir/frontend/TestMlirToJlmConverter.cpp | 10 +++--- tests/jlm/rvsdg/ArgumentTests.cpp | 2 +- tests/jlm/rvsdg/RegionTests.cpp | 6 ++-- tests/test-operation.hpp | 4 +-- 56 files changed, 211 insertions(+), 206 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 1848c4166..ae55eccdd 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -2455,7 +2455,7 @@ RhlsToFirrtlConverter::DropMSBs(mlir::Block * body, mlir::Value value, int amoun // Returns the output of a node or the argument of a region that has // been instantiated as a module jlm::rvsdg::output * -RhlsToFirrtlConverter::TraceArgument(jlm::rvsdg::argument * arg) +RhlsToFirrtlConverter::TraceArgument(rvsdg::RegionArgument * arg) { // Check if the argument is part of a hls::loop_node auto region = arg->region(); @@ -2474,7 +2474,7 @@ RhlsToFirrtlConverter::TraceArgument(jlm::rvsdg::argument * arg) // Check if we are in a nested region and directly // connected to the outer regions argument auto origin = arg->input()->origin(); - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { // Need to find the source of the outer regions argument return TraceArgument(o); @@ -2554,7 +2554,7 @@ RhlsToFirrtlConverter::MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * cir // Get the RVSDG node that's the origin of this input jlm::rvsdg::simple_input * input = rvsdgNode->input(i); auto origin = input->origin(); - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { origin = TraceArgument(o); } @@ -2564,7 +2564,7 @@ RhlsToFirrtlConverter::MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * cir origin = TraceStructuralOutput(o); } // now origin is either a simple_output or a top-level argument - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { // The port of the instance is connected to an argument // of the region @@ -2619,7 +2619,7 @@ RhlsToFirrtlConverter::MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * cir // Get the RVSDG node that's the origin of this input auto * input = dynamic_cast(requestNode->input(i)); auto origin = input->origin(); - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { origin = TraceArgument(o); } @@ -2765,7 +2765,7 @@ RhlsToFirrtlConverter::TraceStructuralOutput(jlm::rvsdg::structural_output * out // Found the source node return o; } - else if (dynamic_cast(origin)) + else if (dynamic_cast(origin)) { throw std::logic_error("Encountered pass through argument - should be eliminated"); } diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 4127e799a..f5fade519 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -247,7 +247,7 @@ class RhlsToFirrtlConverter : public BaseHLS DropMSBs(mlir::Block * body, mlir::Value value, int amount); jlm::rvsdg::output * - TraceArgument(jlm::rvsdg::argument * arg); + TraceArgument(rvsdg::RegionArgument * arg); jlm::rvsdg::simple_output * TraceStructuralOutput(jlm::rvsdg::structural_output * out); diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 9af0fd651..5cedadf48 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -82,7 +82,7 @@ BaseHLS::get_port_name(jlm::rvsdg::output * port) throw std::logic_error("nullptr!"); } std::string result; - if (dynamic_cast(port)) + if (dynamic_cast(port)) { result += "a"; } diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index 1f37e74d7..db6fd4d57 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -72,10 +72,10 @@ class BaseHLS static std::string get_base_file_name(const llvm::RvsdgModule & rm); - std::vector + std::vector get_mem_resps(const llvm::lambda::node * lambda) { - std::vector mem_resps; + std::vector mem_resps; for (size_t i = 0; i < lambda->subregion()->narguments(); ++i) { auto arg = lambda->subregion()->argument(i); @@ -101,10 +101,10 @@ class BaseHLS return mem_resps; } - std::vector + std::vector get_reg_args(const llvm::lambda::node * lambda) { - std::vector args; + std::vector args; for (size_t i = 0; i < lambda->subregion()->narguments(); ++i) { auto argtype = &lambda->subregion()->argument(i)->type(); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index ed0cc5877..7d5bcfcd1 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -26,7 +26,7 @@ DotHLS::get_text(llvm::RvsdgModule & rm) } std::string -DotHLS::argument_to_dot(jlm::rvsdg::argument * port) +DotHLS::argument_to_dot(rvsdg::RegionArgument * port) { auto name = get_port_name(port); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp index b87b8d93b..328b2f297 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp @@ -22,7 +22,7 @@ class DotHLS : public BaseHLS private: std::string - argument_to_dot(jlm::rvsdg::argument * port); + argument_to_dot(rvsdg::RegionArgument * port); std::string result_to_dot(jlm::rvsdg::result * port); diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 023e05a40..7a4f5b966 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -14,7 +14,7 @@ namespace jlm::hls { static bool -IsPassthroughArgument(const jlm::rvsdg::argument & argument) +IsPassthroughArgument(const rvsdg::RegionArgument & argument) { if (argument.nusers() != 1) { @@ -27,7 +27,7 @@ IsPassthroughArgument(const jlm::rvsdg::argument & argument) static bool IsPassthroughResult(const rvsdg::result & result) { - auto argument = dynamic_cast(result.origin()); + auto argument = dynamic_cast(result.origin()); return argument != nullptr; } @@ -115,7 +115,7 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) } static void -RemovePassthroughArgument(const jlm::rvsdg::argument & argument) +RemovePassthroughArgument(const rvsdg::RegionArgument & argument) { auto origin = argument.input()->origin(); auto result = dynamic_cast(*argument.begin()); diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index eaa3b1795..37a6a7d1b 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -43,7 +43,7 @@ find_slice_output(jlm::rvsdg::output * output, std::unordered_setnode())); find_slice_node(no->node(), slice); } - else if (dynamic_cast(output)) + else if (dynamic_cast(output)) { if (auto be = dynamic_cast(output)) { @@ -237,7 +237,7 @@ decouple_load( { if (loop_slice.count(ni->node())) { - jlm::rvsdg::argument * new_arg; + rvsdg::RegionArgument * new_arg; if (auto be = dynamic_cast(arg)) { new_arg = new_loop->add_backedge(arg->Type()); @@ -332,7 +332,7 @@ decouple_load( ExitResult::Create(*load_addr, *addr_output); // trace and remove loop input for mem data reponse auto mem_data_loop_out = new_load->input(new_load->ninputs() - 1)->origin(); - auto mem_data_loop_arg = dynamic_cast(mem_data_loop_out); + auto mem_data_loop_arg = dynamic_cast(mem_data_loop_out); auto mem_data_loop_in = mem_data_loop_arg->input(); auto mem_data_resp = mem_data_loop_in->origin(); dump_xml(new_loop->subregion(), "new_loop_before_remove.rvsdg"); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 0b3f0d61f..e6db79b7f 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -84,7 +84,7 @@ replace_load(jlm::rvsdg::simple_node * orig, jlm::rvsdg::output * resp) const jlm::rvsdg::bitconstant_op * trace_channel(const jlm::rvsdg::output * dst) { - if (auto arg = dynamic_cast(dst)) + if (auto arg = dynamic_cast(dst)) { return trace_channel(arg->input()->origin()); } @@ -120,7 +120,7 @@ const jlm::rvsdg::output * trace_call(const jlm::rvsdg::output * output) { // version of trace call for rhls - if (auto argument = dynamic_cast(output)) + if (auto argument = dynamic_cast(output)) { auto graph = output->region()->graph(); if (argument->region() == graph->root()) @@ -235,7 +235,7 @@ find_decouple_response( const jlm::llvm::lambda::node * lambda, const jlm::rvsdg::bitconstant_op * request_constant) { - jlm::rvsdg::argument * response_function = nullptr; + jlm::rvsdg::RegionArgument * response_function = nullptr; for (size_t i = 0; i < lambda->ncvarguments(); ++i) { auto ip = lambda->cvargument(i)->input(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 370fda00c..ca5683495 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -39,7 +39,7 @@ mem_sep_argument(llvm::RvsdgModule & rm) } // from MemoryStateEncoder.cpp -jlm::rvsdg::argument * +rvsdg::RegionArgument * GetMemoryStateArgument(const llvm::lambda::node & lambda) { auto subregion = lambda.subregion(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp index 787eed28e..818b1b504 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp @@ -23,7 +23,7 @@ mem_sep_argument(jlm::rvsdg::region * region); void mem_sep_argument(llvm::RvsdgModule & rm); -jlm::rvsdg::argument * +rvsdg::RegionArgument * GetMemoryStateArgument(const llvm::lambda::node & lambda); jlm::rvsdg::result * diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 5cdcc72d7..1bba9fe79 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -176,7 +176,8 @@ eliminate_gamma_eol(rvsdg::gamma_node * gamma) if (res->output() && res->output()->nusers() == 0) { // continue loop subregion - if (auto arg = dynamic_cast(gamma->subregion(1)->result(i)->origin())) + if (auto arg = + dynamic_cast(gamma->subregion(1)->result(i)->origin())) { // value is just passed through if (o->nusers()) @@ -228,7 +229,7 @@ is_output_of(jlm::rvsdg::output * output, jlm::rvsdg::node * node) bool depends_on(jlm::rvsdg::output * output, jlm::rvsdg::node * node) { - auto arg = dynamic_cast(output); + auto arg = dynamic_cast(output); if (arg) { return false; diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index 8f47c4a03..ef2fa0b7d 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -248,7 +248,7 @@ remove_lambda_passthrough(llvm::lambda::node * ln) } void -remove_region_passthrough(const jlm::rvsdg::argument * arg) +remove_region_passthrough(const rvsdg::RegionArgument * arg) { auto res = dynamic_cast(*arg->begin()); auto origin = arg->input()->origin(); @@ -264,7 +264,7 @@ remove_region_passthrough(const jlm::rvsdg::argument * arg) bool is_passthrough(const jlm::rvsdg::result * res) { - auto arg = dynamic_cast(res->origin()); + auto arg = dynamic_cast(res->origin()); if (arg) { return true; @@ -273,7 +273,7 @@ is_passthrough(const jlm::rvsdg::result * res) } bool -is_passthrough(const jlm::rvsdg::argument * arg) +is_passthrough(const rvsdg::RegionArgument * arg) { if (arg->nusers() == 1) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp index 783369c35..193cc18bb 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp @@ -15,7 +15,7 @@ namespace jlm::hls { bool -is_passthrough(const jlm::rvsdg::argument * arg); +is_passthrough(const rvsdg::RegionArgument * arg); bool is_passthrough(const jlm::rvsdg::result * res); @@ -24,7 +24,7 @@ llvm::lambda::node * remove_lambda_passthrough(llvm::lambda::node * ln); void -remove_region_passthrough(const jlm::rvsdg::argument * arg); +remove_region_passthrough(const rvsdg::RegionArgument * arg); void remove_gamma_passthrough(jlm::rvsdg::gamma_node * gn); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index aa81009b5..93e2f91e0 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -111,7 +111,7 @@ trace_call(jlm::rvsdg::input * input) { auto graph = input->region()->graph(); - auto argument = dynamic_cast(input->origin()); + auto argument = dynamic_cast(input->origin()); const jlm::rvsdg::output * result; if (auto to = dynamic_cast(input->origin())) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index d33100b70..38b144f3e 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -610,7 +610,7 @@ class loop_node; /** * Represents the entry argument for the HLS loop. */ -class EntryArgument : public rvsdg::argument +class EntryArgument : public rvsdg::RegionArgument { friend loop_node; @@ -622,7 +622,7 @@ class EntryArgument : public rvsdg::argument rvsdg::region & region, rvsdg::structural_input & input, const std::shared_ptr type) - : rvsdg::argument(®ion, &input, std::move(type)) + : rvsdg::RegionArgument(®ion, &input, std::move(type)) {} public: @@ -643,7 +643,7 @@ class EntryArgument : public rvsdg::argument } }; -class backedge_argument : public jlm::rvsdg::argument +class backedge_argument : public rvsdg::RegionArgument { friend loop_node; friend backedge_result; @@ -664,7 +664,7 @@ class backedge_argument : public jlm::rvsdg::argument backedge_argument( jlm::rvsdg::region * region, const std::shared_ptr & type) - : jlm::rvsdg::argument(region, nullptr, type), + : rvsdg::RegionArgument(region, nullptr, type), result_(nullptr) {} diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 770379464..dcbb9777b 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -186,8 +186,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (is(o1) && is(o2)) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); + auto a1 = static_cast(o1); + auto a2 = static_cast(o2); vs.insert(a1, a2); auto i1 = a1->input(), i2 = a2->input(); if (!congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx)) @@ -211,8 +211,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return congruent(r1->origin(), r2->origin(), vs, ctx); } - auto a1 = dynamic_cast(o1); - auto a2 = dynamic_cast(o2); + auto a1 = dynamic_cast(o1); + auto a2 = dynamic_cast(o2); if (a1 && is(a1->region()->node()) && a2 && is(a2->region()->node())) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); @@ -242,8 +242,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (is(o1) && is(o2)) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); + auto a1 = static_cast(o1); + auto a2 = static_cast(o2); return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); } diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 170390ec3..4151ae717 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -36,7 +36,7 @@ get_dot_name(jlm::rvsdg::node * node) std::string get_dot_name(jlm::rvsdg::output * output) { - if (dynamic_cast(output)) + if (dynamic_cast(output)) { return jlm::util::strfmt("a", hex((intptr_t)output), ":", "default"); } @@ -89,7 +89,7 @@ port_to_dot(const std::string & display_name, const std::string & dot_name) } std::string -argument_to_dot(jlm::rvsdg::argument * argument) +argument_to_dot(rvsdg::RegionArgument * argument) { auto display_name = jlm::util::strfmt("a", argument->index()); auto dot_name = jlm::util::strfmt("a", hex((intptr_t)argument)); diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index ca7f8ac4e..512822ee8 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -190,8 +190,8 @@ convert_empty_gamma_node(const rvsdg::gamma_node * gamma, context & ctx) { auto output = gamma->output(n); - auto a0 = static_cast(gamma->subregion(0)->result(n)->origin()); - auto a1 = static_cast(gamma->subregion(1)->result(n)->origin()); + auto a0 = static_cast(gamma->subregion(0)->result(n)->origin()); + auto a1 = static_cast(gamma->subregion(1)->result(n)->origin()); auto o0 = a0->input()->origin(); auto o1 = a1->input()->origin(); diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index c3a672bab..8b24a299c 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -403,7 +403,7 @@ class node final : public jlm::rvsdg::structural_node * outputs might refer to arguments that have been removed by the application of this method. It * is up to the caller to ensure that the invariants of the phi node will eventually be met again. * - * \see argument#IsDead() + * \see RegionArgument#IsDead() * \see PrunePhiArguments() * \see RemovePhiOutputsWhere() * \see PrunePhiOutputs() @@ -426,7 +426,7 @@ class node final : public jlm::rvsdg::structural_node size_t PrunePhiArguments() { - auto match = [](const jlm::rvsdg::argument &) + auto match = [](const rvsdg::RegionArgument &) { return true; }; @@ -651,7 +651,7 @@ class rvoutput final : public jlm::rvsdg::structural_output class rvresult; -class rvargument final : public jlm::rvsdg::argument +class rvargument final : public rvsdg::RegionArgument { friend class phi::builder; friend class phi::rvoutput; @@ -661,7 +661,7 @@ class rvargument final : public jlm::rvsdg::argument private: rvargument(jlm::rvsdg::region * region, const std::shared_ptr type) - : argument(region, nullptr, std::move(type)), + : RegionArgument(region, nullptr, std::move(type)), output_(nullptr) {} @@ -709,7 +709,7 @@ class rvargument final : public jlm::rvsdg::argument class cvinput; class node; -class cvargument final : public jlm::rvsdg::argument +class cvargument final : public rvsdg::RegionArgument { friend class phi::node; @@ -720,7 +720,7 @@ class cvargument final : public jlm::rvsdg::argument jlm::rvsdg::region * region, phi::cvinput * input, std::shared_ptr type) - : jlm::rvsdg::argument(region, input, std::move(type)) + : rvsdg::RegionArgument(region, input, std::move(type)) {} private: @@ -749,7 +749,7 @@ class cvargument final : public jlm::rvsdg::argument cvinput * input() const noexcept { - return static_cast(argument::input()); + return static_cast(RegionArgument::input()); } }; diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 6bfbec681..b8ba9bb90 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -32,7 +32,7 @@ invariantInput(const rvsdg::gamma_output & output, InvariantOutputMap & invarian bool resultIsInvariant = false; while (true) { - if (auto argument = dynamic_cast(origin)) + if (auto argument = dynamic_cast(origin)) { resultIsInvariant = true; input = argument->input(); @@ -176,7 +176,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) if (is(origin)) { - auto argument = util::AssertedCast(origin); + auto argument = util::AssertedCast(origin); origin = argument->input()->origin(); continue; } @@ -246,7 +246,7 @@ CallNode::ClassifyCall(const CallNode & callNode) return CallTypeClassifier::CreateNonRecursiveDirectCallClassifier(*lambdaOutput); } - if (auto argument = dynamic_cast(output)) + if (auto argument = dynamic_cast(output)) { if (is(argument)) { diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index 1aa87e620..b2592af79 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -181,7 +181,7 @@ class CallTypeClassifier final } JLM_ASSERT(GetCallType() == CallType::RecursiveDirectCall); - auto argument = jlm::util::AssertedCast(Output_); + auto argument = jlm::util::AssertedCast(Output_); /* * FIXME: This assumes that all recursion variables where added before the dependencies. It * would be better if we did not use the index for retrieving the result, but instead @@ -197,11 +197,11 @@ class CallTypeClassifier final * * @return The imported function. */ - [[nodiscard]] jlm::rvsdg::argument & + [[nodiscard]] rvsdg::RegionArgument & GetImport() const noexcept { JLM_ASSERT(GetCallType() == CallType::ExternalCall); - return *jlm::util::AssertedCast(Output_); + return *jlm::util::AssertedCast(Output_); } /** \brief Return origin of a call node's function input. @@ -225,14 +225,14 @@ class CallTypeClassifier final } static std::unique_ptr - CreateRecursiveDirectCallClassifier(jlm::rvsdg::argument & output) + CreateRecursiveDirectCallClassifier(rvsdg::RegionArgument & output) { JLM_ASSERT(is(&output)); return std::make_unique(CallType::RecursiveDirectCall, output); } static std::unique_ptr - CreateExternalCallClassifier(jlm::rvsdg::argument & argument) + CreateExternalCallClassifier(rvsdg::RegionArgument & argument) { JLM_ASSERT(argument.region() == argument.region()->graph()->root()); return std::make_unique(CallType::ExternalCall, argument); diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 178a50d9f..3e75b87ed 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -406,7 +406,7 @@ class output final : public rvsdg::structural_output /** \brief Delta context variable argument */ -class cvargument final : public rvsdg::argument +class cvargument final : public rvsdg::RegionArgument { friend ::jlm::llvm::delta::node; @@ -418,7 +418,7 @@ class cvargument final : public rvsdg::argument private: cvargument(rvsdg::region * region, cvinput * input) - : rvsdg::argument(region, input, input->Type()) + : rvsdg::RegionArgument(region, input, input->Type()) {} static cvargument * @@ -433,7 +433,7 @@ class cvargument final : public rvsdg::argument cvinput * input() const noexcept { - return static_cast(rvsdg::argument::input()); + return static_cast(rvsdg::RegionArgument::input()); } }; diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index ddf902092..7491c236f 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -150,7 +150,7 @@ node::add_ctxvar(jlm::rvsdg::output * origin) return cvargument::create(subregion(), input); } -rvsdg::argument & +rvsdg::RegionArgument & node::GetMemoryStateRegionArgument() const noexcept { auto argument = fctargument(nfctarguments() - 1); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index b1e334179..a8af382dd 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -306,7 +306,7 @@ class node final : public jlm::rvsdg::structural_node /** * @return The memory state argument of the lambda subregion. */ - [[nodiscard]] rvsdg::argument & + [[nodiscard]] rvsdg::RegionArgument & GetMemoryStateRegionArgument() const noexcept; /** @@ -507,7 +507,7 @@ class output final : public jlm::rvsdg::structural_output /** \brief Lambda function argument */ -class fctargument final : public jlm::rvsdg::argument +class fctargument final : public rvsdg::RegionArgument { friend ::jlm::llvm::lambda::node; @@ -531,7 +531,7 @@ class fctargument final : public jlm::rvsdg::argument private: fctargument(jlm::rvsdg::region * region, std::shared_ptr type) - : jlm::rvsdg::argument(region, nullptr, std::move(type)) + : rvsdg::RegionArgument(region, nullptr, std::move(type)) {} static fctargument * @@ -596,7 +596,7 @@ class node::fctargconstiterator final /** \brief Lambda context variable argument */ -class cvargument final : public jlm::rvsdg::argument +class cvargument final : public rvsdg::RegionArgument { friend ::jlm::llvm::lambda::node; @@ -608,7 +608,7 @@ class cvargument final : public jlm::rvsdg::argument private: cvargument(jlm::rvsdg::region * region, cvinput * input) - : jlm::rvsdg::argument(region, input, input->Type()) + : rvsdg::RegionArgument(region, input, input->Type()) {} static cvargument * @@ -623,7 +623,7 @@ class cvargument final : public jlm::rvsdg::argument cvinput * input() const noexcept { - return jlm::util::AssertedCast(jlm::rvsdg::argument::input()); + return jlm::util::AssertedCast(rvsdg::RegionArgument::input()); } }; diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index cb4ba2a0f..0b218f5d6 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -460,7 +460,7 @@ DeadNodeElimination::SweepLambda(lambda::node & lambdaNode) const void DeadNodeElimination::SweepPhi(phi::node & phiNode) const { - util::HashSet deadRecursionArguments; + util::HashSet deadRecursionArguments; auto isDeadOutput = [&](const phi::rvoutput & output) { @@ -479,7 +479,7 @@ DeadNodeElimination::SweepPhi(phi::node & phiNode) const SweepRegion(*phiNode.subregion()); - auto isDeadArgument = [&](const rvsdg::argument & argument) + auto isDeadArgument = [&](const rvsdg::RegionArgument & argument) { if (argument.input()) { diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp index 32d94c82e..67c1c5a65 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp @@ -54,7 +54,7 @@ class PointsToGraph final using DeltaNodeMap = std::unordered_map>; using ImportNodeMap = - std::unordered_map>; + std::unordered_map>; using LambdaNodeMap = std::unordered_map>; using MallocNodeMap = @@ -289,7 +289,7 @@ class PointsToGraph final } const PointsToGraph::ImportNode & - GetImportNode(const jlm::rvsdg::argument & argument) const + GetImportNode(const rvsdg::RegionArgument & argument) const { auto it = ImportNodes_.find(&argument); if (it == ImportNodes_.end()) diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index b5b7ebcc8..b824affc4 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -444,7 +444,7 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning } bool - ContainsExternalFunctionNodes(const rvsdg::argument & import) const + ContainsExternalFunctionNodes(const rvsdg::RegionArgument & import) const { return ExternalFunctionNodes_.find(&import) != ExternalFunctionNodes_.end(); } @@ -463,7 +463,7 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning } const util::HashSet & - GetExternalFunctionNodes(const rvsdg::argument & import) const + GetExternalFunctionNodes(const rvsdg::RegionArgument & import) const { JLM_ASSERT(ContainsExternalFunctionNodes(import)); @@ -483,7 +483,7 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning void AddExternalFunctionNodes( - const rvsdg::argument & import, + const rvsdg::RegionArgument & import, util::HashSet memoryNodes) { JLM_ASSERT(!ContainsExternalFunctionNodes(import)); @@ -609,8 +609,9 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning RegionSummaryMap RegionSummaries_; const PointsToGraph & PointsToGraph_; - std::unordered_map> - ExternalFunctionNodes_; + std:: + unordered_map> + ExternalFunctionNodes_; }; RegionAwareMemoryNodeProvider::~RegionAwareMemoryNodeProvider() noexcept = default; diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 10f832dd9..d8129f368 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -183,8 +183,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (is(o1) && is(o2)) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); + auto a1 = static_cast(o1); + auto a2 = static_cast(o2); vs.insert(a1, a2); auto i1 = a1->input(), i2 = a2->input(); if (!congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx)) @@ -226,8 +226,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (is(o1) && is(o2)) { JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); + auto a1 = static_cast(o1); + auto a2 = static_cast(o2); return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); } diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 0d0bfe298..ad236916b 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -50,7 +50,7 @@ find_producer(jlm::rvsdg::input * input) { auto graph = input->region()->graph(); - auto argument = dynamic_cast(input->origin()); + auto argument = dynamic_cast(input->origin()); if (argument == nullptr) return input->origin(); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index f87f583be..ad166f401 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -124,10 +124,10 @@ copy_condition_nodes( } } -static jlm::rvsdg::argument * +static rvsdg::RegionArgument * to_argument(jlm::rvsdg::output * output) { - return dynamic_cast(output); + return dynamic_cast(output); } static jlm::rvsdg::structural_output * diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 523e68766..7008d4969 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -94,7 +94,7 @@ has_side_effects(const jlm::rvsdg::node * node) return false; } -static std::vector +static std::vector copy_from_gamma(jlm::rvsdg::node * node, size_t r) { JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); @@ -106,12 +106,12 @@ copy_from_gamma(jlm::rvsdg::node * node, size_t r) std::vector operands; for (size_t n = 0; n < node->ninputs(); n++) { - JLM_ASSERT(dynamic_cast(node->input(n)->origin())); - auto argument = static_cast(node->input(n)->origin()); + JLM_ASSERT(dynamic_cast(node->input(n)->origin())); + auto argument = static_cast(node->input(n)->origin()); operands.push_back(argument->input()->origin()); } - std::vector arguments; + std::vector arguments; auto copy = node->copy(target, operands); for (size_t n = 0; n < copy->noutputs(); n++) { @@ -123,7 +123,7 @@ copy_from_gamma(jlm::rvsdg::node * node, size_t r) return arguments; } -static std::vector +static std::vector copy_from_theta(jlm::rvsdg::node * node) { JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); @@ -135,12 +135,12 @@ copy_from_theta(jlm::rvsdg::node * node) std::vector operands; for (size_t n = 0; n < node->ninputs(); n++) { - JLM_ASSERT(dynamic_cast(node->input(n)->origin())); - auto argument = static_cast(node->input(n)->origin()); + JLM_ASSERT(dynamic_cast(node->input(n)->origin())); + auto argument = static_cast(node->input(n)->origin()); operands.push_back(argument->input()->origin()); } - std::vector arguments; + std::vector arguments; auto copy = node->copy(target, operands); for (size_t n = 0; n < copy->noutputs(); n++) { @@ -220,15 +220,15 @@ push(jlm::rvsdg::gamma_node * gamma) static bool is_theta_invariant( const jlm::rvsdg::node * node, - const std::unordered_set & invariants) + const std::unordered_set & invariants) { JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); JLM_ASSERT(node->depth() == 0); for (size_t n = 0; n < node->ninputs(); n++) { - JLM_ASSERT(dynamic_cast(node->input(n)->origin())); - auto argument = static_cast(node->input(n)->origin()); + JLM_ASSERT(dynamic_cast(node->input(n)->origin())); + auto argument = static_cast(node->input(n)->origin()); if (invariants.find(argument) == invariants.end()) return false; } @@ -249,7 +249,7 @@ push_top(jlm::rvsdg::theta_node * theta) } /* collect loop invariant arguments */ - std::unordered_set invariants; + std::unordered_set invariants; for (const auto & lv : *theta) { if (lv->result()->origin() == lv->argument()) @@ -295,7 +295,7 @@ push_top(jlm::rvsdg::theta_node * theta) } static bool -is_invariant(const jlm::rvsdg::argument * argument) +is_invariant(const rvsdg::RegionArgument * argument) { JLM_ASSERT(jlm::rvsdg::is(argument->region()->node())); return argument->region()->result(argument->index() + 1)->origin() == argument; @@ -307,13 +307,13 @@ is_movable_store(jlm::rvsdg::node * node) JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); JLM_ASSERT(jlm::rvsdg::is(node)); - auto address = dynamic_cast(node->input(0)->origin()); + auto address = dynamic_cast(node->input(0)->origin()); if (!address || !is_invariant(address) || address->nusers() != 2) return false; for (size_t n = 2; n < node->ninputs(); n++) { - auto argument = dynamic_cast(node->input(n)->origin()); + auto argument = dynamic_cast(node->input(n)->origin()); if (!argument || argument->nusers() > 1) return false; } @@ -338,7 +338,7 @@ pushout_store(jlm::rvsdg::node * storenode) JLM_ASSERT(jlm::rvsdg::is(storenode) && is_movable_store(storenode)); auto theta = static_cast(storenode->region()->node()); auto storeop = static_cast(&storenode->operation()); - auto oaddress = static_cast(storenode->input(0)->origin()); + auto oaddress = static_cast(storenode->input(0)->origin()); auto ovalue = storenode->input(1)->origin(); /* insert new value for store */ diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 3774eb622..689a536e9 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -65,17 +65,17 @@ is_theta_invariant(const jlm::rvsdg::output * output) if (jlm::rvsdg::is(jlm::rvsdg::node_output::node(output))) return true; - auto argument = dynamic_cast(output); + auto argument = dynamic_cast(output); if (!argument) return false; return is_invariant(static_cast(argument->input())); } -static jlm::rvsdg::argument * +static rvsdg::RegionArgument * push_from_theta(jlm::rvsdg::output * output) { - auto argument = dynamic_cast(output); + auto argument = dynamic_cast(output); if (argument) return argument; @@ -99,7 +99,7 @@ is_idv(jlm::rvsdg::input * input) auto node = rvsdg::input::GetNode(*input); JLM_ASSERT(is(node) || is(node)); - auto a = dynamic_cast(input->origin()); + auto a = dynamic_cast(input->origin()); if (!a) return false; @@ -160,7 +160,7 @@ unrollinfo::create(jlm::rvsdg::theta_node * theta) if (!is_idv(i0) && !is_idv(i1)) return nullptr; - auto idv = static_cast(is_idv(i0) ? i0->origin() : i1->origin()); + auto idv = static_cast(is_idv(i0) ? i0->origin() : i1->origin()); auto step = idv == i0->origin() ? i1->origin() : i0->origin(); if (!is_theta_invariant(step)) diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index 5120fd828..0ab02fad6 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -53,9 +53,9 @@ class unrollinfo final inline unrollinfo( jlm::rvsdg::node * cmpnode, jlm::rvsdg::node * armnode, - jlm::rvsdg::argument * idv, - jlm::rvsdg::argument * step, - jlm::rvsdg::argument * end) + rvsdg::RegionArgument * idv, + rvsdg::RegionArgument * step, + rvsdg::RegionArgument * end) : end_(end), step_(step), cmpnode_(cmpnode), @@ -133,7 +133,7 @@ class unrollinfo final return *static_cast(&armnode()->operation()); } - inline jlm::rvsdg::argument * + inline rvsdg::RegionArgument * idv() const noexcept { return idv_; @@ -151,7 +151,7 @@ class unrollinfo final return value(init()); } - inline jlm::rvsdg::argument * + inline rvsdg::RegionArgument * step() const noexcept { return step_; @@ -163,7 +163,7 @@ class unrollinfo final return value(step()); } - inline jlm::rvsdg::argument * + inline rvsdg::RegionArgument * end() const noexcept { return end_; @@ -225,11 +225,11 @@ class unrollinfo final return &static_cast(&p->operation())->value(); } - jlm::rvsdg::argument * end_; - jlm::rvsdg::argument * step_; + rvsdg::RegionArgument * end_; + rvsdg::RegionArgument * step_; jlm::rvsdg::node * cmpnode_; jlm::rvsdg::node * armnode_; - jlm::rvsdg::argument * idv_; + rvsdg::RegionArgument * idv_; }; /** diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 1b3ec54c6..972c64f62 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -90,7 +90,7 @@ JlmToMlirConverter::ConvertRegion(rvsdg::region & region, ::mlir::Block & block) { results.push_back(operationsMap.at(nodeOuput->node())->getResult(nodeOuput->index())); } - else if (auto arg = dynamic_cast(region.result(i)->origin())) + else if (auto arg = dynamic_cast(region.result(i)->origin())) { results.push_back(block.getArgument(arg->index())); } @@ -125,7 +125,7 @@ JlmToMlirConverter::GetConvertedInputs( { inputs.push_back(operationsMap.at(nodeOuput->node())->getResult(nodeOuput->index())); } - else if (auto arg = dynamic_cast(node.input(i)->origin())) + else if (auto arg = dynamic_cast(node.input(i)->origin())) { inputs.push_back(block.getArgument(arg->index())); } diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index d73eb15f9..24f79fd29 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -46,7 +46,7 @@ perform_invariant_reduction(jlm::rvsdg::gamma_node * gamma) bool was_normalized = true; for (auto it = gamma->begin_exitvar(); it != gamma->end_exitvar(); it++) { - auto argument = dynamic_cast(it->result(0)->origin()); + auto argument = dynamic_cast(it->result(0)->origin()); if (!argument) continue; @@ -54,7 +54,7 @@ perform_invariant_reduction(jlm::rvsdg::gamma_node * gamma) auto input = argument->input(); for (n = 1; n < it->nresults(); n++) { - auto argument = dynamic_cast(it->result(n)->origin()); + auto argument = dynamic_cast(it->result(n)->origin()); if (!argument && argument->input() != input) break; } @@ -279,7 +279,7 @@ gamma_output::~gamma_output() noexcept bool gamma_output::IsInvariant(rvsdg::output ** invariantOrigin) const noexcept { - auto argument = dynamic_cast(result(0)->origin()); + auto argument = dynamic_cast(result(0)->origin()); if (!argument) { return false; @@ -289,7 +289,7 @@ gamma_output::IsInvariant(rvsdg::output ** invariantOrigin) const noexcept auto origin = argument->input()->origin(); for (n = 1; n < nresults(); n++) { - argument = dynamic_cast(result(n)->origin()); + argument = dynamic_cast(result(n)->origin()); if (argument == nullptr || argument->input()->origin() != origin) break; } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index aedc7ff6e..47c28ba74 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -368,7 +368,7 @@ class gamma_input final : public structural_input return arguments.size(); } - inline jlm::rvsdg::argument * + [[nodiscard]] RegionArgument * argument(size_t n) const noexcept { JLM_ASSERT(n < narguments()); @@ -462,7 +462,7 @@ inline gamma_node::gamma_node(jlm::rvsdg::output * predicate, size_t nalternativ /** * Represents a region argument in a gamma subregion. */ -class GammaArgument final : public argument +class GammaArgument final : public RegionArgument { friend gamma_node; @@ -474,7 +474,7 @@ class GammaArgument final : public argument private: GammaArgument(rvsdg::region & region, gamma_input & input) - : argument(®ion, &input, input.Type()) + : RegionArgument(®ion, &input, input.Type()) {} static GammaArgument & diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 47b63c289..046cd282f 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -18,7 +18,7 @@ GraphImport::GraphImport( rvsdg::graph & graph, std::shared_ptr type, std::string name) - : argument(graph.root(), nullptr, std::move(type)), + : RegionArgument(graph.root(), nullptr, std::move(type)), Name_(std::move(name)) {} diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index b00ae85d5..88fe632f9 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -25,7 +25,7 @@ namespace jlm::rvsdg /** * Represents an import into the RVSDG of an external entity. */ -class GraphImport : public argument +class GraphImport : public RegionArgument { protected: GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name); diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 78d9922dd..04c2d4164 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -305,8 +305,8 @@ producer(const jlm::rvsdg::output * output) noexcept if (auto node = node_output::node(output)) return node; - JLM_ASSERT(dynamic_cast(output)); - auto argument = static_cast(output); + JLM_ASSERT(dynamic_cast(output)); + auto argument = static_cast(output); if (!argument->input()) return nullptr; diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 632780e4d..8309363e3 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -14,7 +14,7 @@ namespace jlm::rvsdg { -argument::~argument() noexcept +RegionArgument::~RegionArgument() noexcept { on_output_destroy(this); @@ -22,7 +22,7 @@ argument::~argument() noexcept input()->arguments.erase(this); } -argument::argument( +RegionArgument::RegionArgument( jlm::rvsdg::region * region, jlm::rvsdg::structural_input * input, std::shared_ptr type) @@ -106,7 +106,7 @@ region::region(jlm::rvsdg::structural_node * node, size_t index) } void -region::append_argument(jlm::rvsdg::argument * argument) +region::append_argument(RegionArgument * argument) { if (argument->region() != this) throw jlm::util::error("Appending argument to wrong region."); @@ -125,7 +125,7 @@ void region::RemoveArgument(size_t index) { JLM_ASSERT(index < narguments()); - jlm::rvsdg::argument * argument = arguments_[index]; + RegionArgument * argument = arguments_[index]; delete argument; for (size_t n = index; n < arguments_.size() - 1; n++) diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 8bdd7f2d3..4adf4c1cc 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -39,32 +39,32 @@ class substitution_map; * depends on the structural node the region is part of. A region argument is either linked * with a \ref structural_input or is a standalone argument. */ -class argument : public output +class RegionArgument : public output { - util::intrusive_list_anchor structural_input_anchor_; + util::intrusive_list_anchor structural_input_anchor_; public: - typedef util::intrusive_list_accessor + typedef util::intrusive_list_accessor structural_input_accessor; - ~argument() noexcept override; + ~RegionArgument() noexcept override; protected: - argument( + RegionArgument( rvsdg::region * region, structural_input * input, std::shared_ptr type); public: - argument(const argument &) = delete; + RegionArgument(const RegionArgument &) = delete; - argument(argument &&) = delete; + RegionArgument(RegionArgument &&) = delete; - argument & - operator=(const argument &) = delete; + RegionArgument & + operator=(const RegionArgument &) = delete; - argument & - operator=(argument &&) = delete; + RegionArgument & + operator=(RegionArgument &&) = delete; [[nodiscard]] structural_input * input() const noexcept @@ -80,7 +80,7 @@ class argument : public output * * @return A reference to the copied argument. */ - virtual argument & + virtual RegionArgument & Copy(rvsdg::region & region, structural_input * input) = 0; private: @@ -221,7 +221,7 @@ class region * Multiple invocations of append_argument for the same argument are undefined. */ void - append_argument(jlm::rvsdg::argument * argument); + append_argument(RegionArgument * argument); /** * Removes an argument from the region given an arguments' index. @@ -235,8 +235,8 @@ class region * runtime is therefore O(n), where n is the region's number of arguments. * * \see narguments() - * \see argument#index() - * \see argument::nusers() + * \see RegionArgument#index() + * \see RegionArgument::nusers() */ void RemoveArgument(size_t index); @@ -268,7 +268,7 @@ class region return arguments_.size(); } - inline jlm::rvsdg::argument * + inline RegionArgument * argument(size_t index) const noexcept { JLM_ASSERT(index < narguments()); @@ -325,7 +325,7 @@ class region void PruneArguments() { - auto match = [](const rvsdg::argument &) + auto match = [](const RegionArgument &) { return true; }; @@ -484,7 +484,7 @@ class region jlm::rvsdg::graph * graph_; jlm::rvsdg::structural_node * node_; std::vector results_; - std::vector arguments_; + std::vector arguments_; }; static inline void diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 34d3a6cf1..951eb129d 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -66,9 +66,8 @@ class structural_node : public node /* structural input class */ -typedef jlm::util:: - intrusive_list - argument_list; +typedef jlm::util::intrusive_list + argument_list; class structural_input : public node_input { diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index defe8fa10..5fd8e9fcc 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -207,7 +207,7 @@ class theta_node final : public structural_node * again. * * \see RemoveThetaOutputsWhere() - * \see argument#IsDead() + * \see RegionArgument#IsDead() */ template util::HashSet @@ -224,7 +224,7 @@ class theta_node final : public structural_node * again. * * \see RemoveThetaInputsWhere() - * \see argument#IsDead() + * \see RegionArgument#IsDead() */ util::HashSet PruneThetaInputs() @@ -280,7 +280,7 @@ class theta_input final : public structural_input return output_; } - inline jlm::rvsdg::argument * + inline RegionArgument * argument() const noexcept { JLM_ASSERT(arguments.size() == 1); @@ -327,7 +327,7 @@ class theta_output final : public structural_output return input_; } - inline jlm::rvsdg::argument * + inline RegionArgument * argument() const noexcept { return input_->argument(); @@ -347,7 +347,7 @@ class theta_output final : public structural_output /** * Represents a region argument in a theta subregion. */ -class ThetaArgument final : public argument +class ThetaArgument final : public RegionArgument { friend theta_node; @@ -359,7 +359,7 @@ class ThetaArgument final : public argument private: ThetaArgument(rvsdg::region & region, theta_input & input) - : argument(®ion, &input, input.Type()) + : RegionArgument(®ion, &input, input.Type()) { JLM_ASSERT(is(region.node())); } diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index fde1e968a..7163b6d2a 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -28,7 +28,7 @@ create_port_name( const jlm::rvsdg::output * port, std::unordered_map & map) { - std::string name = dynamic_cast(port) ? "a" : "o"; + std::string name = dynamic_cast(port) ? "a" : "o"; name += jlm::util::strfmt(map.size()); return name; } diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 89753a7f3..b467590dd 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1132,7 +1132,7 @@ ExternalCallTest1::SetupRvsdg() return &GraphImport::Create(*rvsdg, functionGType, "g", linkage::external_linkage); }; - auto SetupFunctionF = [&](jlm::rvsdg::argument * functionG) + auto SetupFunctionF = [&](jlm::rvsdg::RegionArgument * functionG) { auto pointerType = PointerType::Create(); auto iOStateType = iostatetype::Create(); @@ -2790,7 +2790,7 @@ EscapedMemoryTest2::SetupRvsdg() return std::make_tuple(lambdaOutput, jlm::rvsdg::node_output::node(mallocResults[0])); }; - auto SetupCallExternalFunction1 = [&](jlm::rvsdg::argument * externalFunction1Argument) + auto SetupCallExternalFunction1 = [&](jlm::rvsdg::RegionArgument * externalFunction1Argument) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -2826,7 +2826,7 @@ EscapedMemoryTest2::SetupRvsdg() return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(mallocResults[0])); }; - auto SetupCallExternalFunction2 = [&](jlm::rvsdg::argument * externalFunction2Argument) + auto SetupCallExternalFunction2 = [&](jlm::rvsdg::RegionArgument * externalFunction2Argument) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -2942,7 +2942,7 @@ EscapedMemoryTest3::SetupRvsdg() return deltaOutput; }; - auto SetupTestFunction = [&](jlm::rvsdg::argument * externalFunctionArgument) + auto SetupTestFunction = [&](jlm::rvsdg::RegionArgument * externalFunctionArgument) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index 8486d4c0c..914b3bdf0 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -837,7 +837,7 @@ class ExternalCallTest1 final : public RvsdgTest return *CallG_; } - [[nodiscard]] const jlm::rvsdg::argument & + [[nodiscard]] const jlm::rvsdg::RegionArgument & ExternalGArgument() const noexcept { return *ExternalGArgument_; @@ -851,7 +851,7 @@ class ExternalCallTest1 final : public RvsdgTest jlm::llvm::CallNode * CallG_; - jlm::rvsdg::argument * ExternalGArgument_; + jlm::rvsdg::RegionArgument * ExternalGArgument_; }; /** @@ -901,7 +901,7 @@ class ExternalCallTest2 final : public RvsdgTest return *CallF_; } - [[nodiscard]] jlm::rvsdg::argument & + [[nodiscard]] jlm::rvsdg::RegionArgument & ExternalF() { JLM_ASSERT(ExternalFArgument_ != nullptr); @@ -916,7 +916,7 @@ class ExternalCallTest2 final : public RvsdgTest jlm::llvm::CallNode * CallF_ = {}; - jlm::rvsdg::argument * ExternalFArgument_ = {}; + jlm::rvsdg::RegionArgument * ExternalFArgument_ = {}; }; /** \brief GammaTest class @@ -1321,8 +1321,8 @@ class ImportTest final : public RvsdgTest jlm::llvm::lambda::node * lambda_f1; jlm::llvm::lambda::node * lambda_f2; - jlm::rvsdg::argument * import_d1; - jlm::rvsdg::argument * import_d2; + jlm::rvsdg::RegionArgument * import_d1; + jlm::rvsdg::RegionArgument * import_d2; private: std::unique_ptr @@ -1770,8 +1770,8 @@ class EscapedMemoryTest2 final : public RvsdgTest jlm::rvsdg::node * ReturnAddressMalloc; jlm::rvsdg::node * CallExternalFunction1Malloc; - jlm::rvsdg::argument * ExternalFunction1Import; - jlm::rvsdg::argument * ExternalFunction2Import; + jlm::rvsdg::RegionArgument * ExternalFunction1Import; + jlm::rvsdg::RegionArgument * ExternalFunction2Import; jlm::llvm::LoadNonVolatileNode * LoadNode; }; @@ -1804,7 +1804,7 @@ class EscapedMemoryTest3 final : public RvsdgTest jlm::llvm::delta::node * DeltaGlobal; - jlm::rvsdg::argument * ImportExternalFunction; + jlm::rvsdg::RegionArgument * ImportExternalFunction; jlm::llvm::CallNode * CallExternalFunction; @@ -2279,7 +2279,7 @@ class EscapingLocalFunctionTest final : public RvsdgTest return *LocalFuncRegister_; } - [[nodiscard]] const jlm::rvsdg::argument & + [[nodiscard]] const jlm::rvsdg::RegionArgument & GetLocalFunctionParam() const noexcept { JLM_ASSERT(LocalFuncParam_); @@ -2306,7 +2306,7 @@ class EscapingLocalFunctionTest final : public RvsdgTest jlm::llvm::delta::node * Global_ = {}; jlm::llvm::lambda::node * LocalFunc_ = {}; - jlm::rvsdg::argument * LocalFuncParam_ = {}; + jlm::rvsdg::RegionArgument * LocalFuncParam_ = {}; jlm::rvsdg::output * LocalFuncRegister_ = {}; jlm::rvsdg::node * LocalFuncParamAllocaNode_ = {}; jlm::llvm::lambda::node * ExportedFunc_ = {}; @@ -2438,7 +2438,7 @@ class VariadicFunctionTest1 final : public RvsdgTest return *LambdaG_; } - [[nodiscard]] rvsdg::argument & + [[nodiscard]] rvsdg::RegionArgument & GetImportH() const noexcept { JLM_ASSERT(ImportH_ != nullptr); @@ -2466,7 +2466,7 @@ class VariadicFunctionTest1 final : public RvsdgTest llvm::lambda::node * LambdaF_ = {}; llvm::lambda::node * LambdaG_ = {}; - rvsdg::argument * ImportH_ = {}; + rvsdg::RegionArgument * ImportH_ = {}; llvm::CallNode * CallH_ = {}; diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 1518c9e21..b8d5cd9bc 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -188,7 +188,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto SetupFunctionF = [&](lambda::output * g) { - auto SetupOuterTheta = [](jlm::rvsdg::region * region, jlm::rvsdg::argument * functionG) + auto SetupOuterTheta = [](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * functionG) { auto outerTheta = jlm::rvsdg::theta_node::create(region); auto otf = outerTheta->add_loopvar(functionG); @@ -282,12 +282,12 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto SetupFunctionF = [&](lambda::output * g) { auto SetupOuterTheta = [&](jlm::rvsdg::region * region, - jlm::rvsdg::argument * g, + jlm::rvsdg::RegionArgument * g, jlm::rvsdg::output * value, jlm::rvsdg::output * iOState, jlm::rvsdg::output * memoryState) { - auto SetupInnerTheta = [&](jlm::rvsdg::region * region, jlm::rvsdg::argument * g) + auto SetupInnerTheta = [&](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * g) { auto innerTheta = jlm::rvsdg::theta_node::create(region); auto thetaOutputG = innerTheta->add_loopvar(g); diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index 0736368ce..5f7debd2d 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -29,7 +29,7 @@ TestArgumentIterators() linkage::external_linkage); lambda->finalize({ lambda->fctargument(0) }); - std::vector functionArguments; + std::vector functionArguments; for (auto & argument : lambda->fctarguments()) functionArguments.push_back(&argument); @@ -67,7 +67,7 @@ TestArgumentIterators() lambda->finalize({ lambda->fctargument(0), cv }); - std::vector functionArguments; + std::vector functionArguments; for (auto & argument : lambda->fctarguments()) functionArguments.push_back(&argument); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index 47e50ed41..c52424431 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -38,7 +38,7 @@ TestPhiCreation() return lambda->finalize({ iOStateArgument, memoryStateArgument }); }; - auto SetupF2 = [&](jlm::rvsdg::region * region, jlm::rvsdg::argument * f2) + auto SetupF2 = [&](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * f2) { auto lambda = lambda::node::create(region, f1type, "f2", linkage::external_linkage); auto ctxVarF2 = lambda->add_ctxvar(f2); @@ -111,7 +111,7 @@ TestRemovePhiArgumentsWhere() // Act & Assert // Try to remove phiArgument0 even though it is used auto numRemovedArguments = phiNode.RemovePhiArgumentsWhere( - [&](const jlm::rvsdg::argument & argument) + [&](const jlm::rvsdg::RegionArgument & argument) { return argument.index() == phiOutput0->argument()->index(); }); @@ -121,7 +121,7 @@ TestRemovePhiArgumentsWhere() // Remove phiArgument1 numRemovedArguments = phiNode.RemovePhiArgumentsWhere( - [&](const jlm::rvsdg::argument & argument) + [&](const jlm::rvsdg::RegionArgument & argument) { return argument.index() == 1; }); @@ -135,7 +135,7 @@ TestRemovePhiArgumentsWhere() // Try to remove anything else, but the only dead argument, i.e, phiArgument3 numRemovedArguments = phiNode.RemovePhiArgumentsWhere( - [&](const jlm::rvsdg::argument & argument) + [&](const jlm::rvsdg::RegionArgument & argument) { return argument.index() != phiArgument3->index(); }); @@ -145,7 +145,7 @@ TestRemovePhiArgumentsWhere() // Remove everything that is dead, i.e., phiArgument3 numRemovedArguments = phiNode.RemovePhiArgumentsWhere( - [&](const jlm::rvsdg::argument & argument) + [&](const jlm::rvsdg::RegionArgument & argument) { return true; }); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index eac59e739..4cf003ed9 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -289,7 +289,8 @@ TestPhi() auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); - auto setupF1 = [&](jlm::rvsdg::region & region, phi::rvoutput & rv2, jlm::rvsdg::argument & dx) + auto setupF1 = + [&](jlm::rvsdg::region & region, phi::rvoutput & rv2, jlm::rvsdg::RegionArgument & dx) { auto lambda1 = lambda::node::create(®ion, functionType, "f1", linkage::external_linkage); auto f2Argument = lambda1->add_ctxvar(rv2.argument()); @@ -304,7 +305,8 @@ TestPhi() return lambda1->finalize({ result }); }; - auto setupF2 = [&](jlm::rvsdg::region & region, phi::rvoutput & rv1, jlm::rvsdg::argument & dy) + auto setupF2 = + [&](jlm::rvsdg::region & region, phi::rvoutput & rv1, jlm::rvsdg::RegionArgument & dy) { auto lambda2 = lambda::node::create(®ion, functionType, "f2", linkage::external_linkage); auto f1Argument = lambda2->add_ctxvar(rv1.argument()); @@ -319,7 +321,7 @@ TestPhi() return lambda2->finalize({ result }); }; - auto setupF3 = [&](jlm::rvsdg::region & region, jlm::rvsdg::argument & dz) + auto setupF3 = [&](jlm::rvsdg::region & region, jlm::rvsdg::RegionArgument & dz) { auto lambda3 = lambda::node::create(®ion, functionType, "f3", linkage::external_linkage); auto zArgument = lambda3->add_ctxvar(&dz); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 5cfb9c86f..2cc7debf6 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -122,8 +122,10 @@ test_gamma() assert(subregion1->result(0)->origin() == subregion1->result(1)->origin()); assert(graph.root()->result(0)->origin() == graph.root()->result(1)->origin()); - auto argument0 = dynamic_cast(subregion0->result(6)->origin()); - auto argument1 = dynamic_cast(subregion1->result(6)->origin()); + auto argument0 = + dynamic_cast(subregion0->result(6)->origin()); + auto argument1 = + dynamic_cast(subregion1->result(6)->origin()); assert(argument0->input() == argument1->input()); } diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 947b73035..4dcd4dcc0 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -284,10 +284,10 @@ TestDivOperation() assert(lambdaResultOriginNode->ninputs() == 2); // Check first input - jlm::rvsdg::argument * DivInput0; + jlm::rvsdg::RegionArgument * DivInput0; assert( - DivInput0 = - dynamic_cast(lambdaResultOriginNode->input(0)->origin())); + DivInput0 = dynamic_cast( + lambdaResultOriginNode->input(0)->origin())); assert(dynamic_cast(&DivInput0->type())); assert(dynamic_cast(&DivInput0->type())->nbits() == 32); @@ -507,8 +507,8 @@ TestCompZeroExt() assert(AddOp->type().nbits() == 32); // Check add input0 - jlm::rvsdg::argument * AddInput0; - assert(AddInput0 = dynamic_cast(AddNode->input(0)->origin())); + jlm::rvsdg::RegionArgument * AddInput0; + assert(AddInput0 = dynamic_cast(AddNode->input(0)->origin())); assert(dynamic_cast(&AddInput0->type())); assert(dynamic_cast(&AddInput0->type())->nbits() == 32); diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index f7251e6a3..aa942bb4c 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -10,7 +10,7 @@ #include /** - * Test check for adding argument to input of wrong structural node. + * Test check for adding a region argument to input of wrong structural node. */ static int ArgumentNodeMismatch() diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 2de4efbc5..6d0c2b6e3 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -191,7 +191,7 @@ RemoveArgumentsWhere() assert(argument2.index() == 2); region.RemoveArgumentsWhere( - [](const jlm::rvsdg::argument & argument) + [](const jlm::rvsdg::RegionArgument & argument) { return true; }); @@ -200,7 +200,7 @@ RemoveArgumentsWhere() region.remove_node(node); region.RemoveArgumentsWhere( - [](const jlm::rvsdg::argument & argument) + [](const jlm::rvsdg::RegionArgument & argument) { return false; }); @@ -208,7 +208,7 @@ RemoveArgumentsWhere() assert(argument1.index() == 0); region.RemoveArgumentsWhere( - [](const jlm::rvsdg::argument & argument) + [](const jlm::rvsdg::RegionArgument & argument) { return argument.index() == 0; }); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 40052fced..78ddf8800 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -344,14 +344,14 @@ create_testop( return rvsdg::simple_node::create_normalized(region, op, { operands }); } -class TestGraphArgument final : public jlm::rvsdg::argument +class TestGraphArgument final : public jlm::rvsdg::RegionArgument { private: TestGraphArgument( jlm::rvsdg::region & region, jlm::rvsdg::structural_input * input, std::shared_ptr type) - : jlm::rvsdg::argument(®ion, input, type) + : jlm::rvsdg::RegionArgument(®ion, input, type) {} public: From 6a30988f8c402a2c0c239b9a26d885af23aa1e01 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 5 Sep 2024 21:48:46 +0200 Subject: [PATCH 069/170] Rename result class to RegionResult (#608) --- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.hpp | 8 ++--- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.hpp | 2 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 15 ++++---- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 10 +++--- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 6 ++-- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 9 ++--- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 6 ++-- jlm/hls/backend/rvsdg2rhls/mem-sep.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- .../rvsdg2rhls/remove-unused-state.cpp | 10 +++--- .../rvsdg2rhls/remove-unused-state.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp | 2 +- jlm/hls/ir/hls.hpp | 10 +++--- jlm/hls/util/view.cpp | 4 +-- jlm/llvm/ir/operators/Phi.hpp | 6 ++-- jlm/llvm/ir/operators/delta.hpp | 6 ++-- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/ir/operators/lambda.hpp | 8 ++--- jlm/llvm/opt/push.cpp | 4 +-- jlm/rvsdg/gamma.hpp | 6 ++-- jlm/rvsdg/graph.cpp | 2 +- jlm/rvsdg/graph.hpp | 2 +- jlm/rvsdg/region.cpp | 8 ++--- jlm/rvsdg/region.hpp | 34 +++++++++---------- jlm/rvsdg/structural-node.hpp | 5 ++- jlm/rvsdg/theta.hpp | 16 ++++----- tests/jlm/rvsdg/RegionTests.cpp | 6 ++-- tests/test-operation.hpp | 4 +-- 31 files changed, 102 insertions(+), 101 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 5cedadf48..3edbf9d50 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -62,7 +62,7 @@ BaseHLS::get_port_name(jlm::rvsdg::input * port) { result += "i"; } - else if (dynamic_cast(port)) + else if (dynamic_cast(port)) { result += "r"; } diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index db6fd4d57..64a400813 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -87,10 +87,10 @@ class BaseHLS return mem_resps; } - std::vector + std::vector get_mem_reqs(const llvm::lambda::node * lambda) { - std::vector mem_resps; + std::vector mem_resps; for (size_t i = 0; i < lambda->subregion()->nresults(); ++i) { if (dynamic_cast(&lambda->subregion()->result(i)->type())) @@ -117,10 +117,10 @@ class BaseHLS return args; } - std::vector + std::vector get_reg_results(const llvm::lambda::node * lambda) { - std::vector results; + std::vector results; for (size_t i = 0; i < lambda->subregion()->nresults(); ++i) { auto argtype = &lambda->subregion()->result(i)->type(); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index 7d5bcfcd1..75d2b3c52 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -45,7 +45,7 @@ DotHLS::argument_to_dot(rvsdg::RegionArgument * port) } std::string -DotHLS::result_to_dot(jlm::rvsdg::result * port) +DotHLS::result_to_dot(rvsdg::RegionResult * port) { auto name = get_port_name(port); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp index 328b2f297..841119524 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp @@ -25,7 +25,7 @@ class DotHLS : public BaseHLS argument_to_dot(rvsdg::RegionArgument * port); std::string - result_to_dot(jlm::rvsdg::result * port); + result_to_dot(rvsdg::RegionResult * port); std::string node_to_dot(const jlm::rvsdg::node * node); diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 7a4f5b966..16f6b7d3a 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -21,11 +21,11 @@ IsPassthroughArgument(const rvsdg::RegionArgument & argument) return false; } - return rvsdg::is(**argument.begin()); + return rvsdg::is(**argument.begin()); } static bool -IsPassthroughResult(const rvsdg::result & result) +IsPassthroughResult(const rvsdg::RegionResult & result) { auto argument = dynamic_cast(result.origin()); return argument != nullptr; @@ -118,7 +118,7 @@ static void RemovePassthroughArgument(const rvsdg::RegionArgument & argument) { auto origin = argument.input()->origin(); - auto result = dynamic_cast(*argument.begin()); + auto result = dynamic_cast(*argument.begin()); argument.region()->node()->output(result->output()->index())->divert_users(origin); auto inputIndex = argument.input()->index(); @@ -139,7 +139,7 @@ RemoveUnusedStatesFromGammaNode(rvsdg::gamma_node & gammaNode) auto argument = gammaNode.subregion(0)->argument(i); if (argument->nusers() == 1) { - auto result = dynamic_cast(*argument->begin()); + auto result = dynamic_cast(*argument->begin()); resultIndex = result ? result->index() : resultIndex; } @@ -147,9 +147,10 @@ RemoveUnusedStatesFromGammaNode(rvsdg::gamma_node & gammaNode) for (size_t n = 0; n < gammaNode.nsubregions(); n++) { auto subregion = gammaNode.subregion(n); - shouldRemove &= IsPassthroughArgument(*subregion->argument(i)) - && dynamic_cast(*subregion->argument(i)->begin())->index() - == resultIndex; + shouldRemove &= + IsPassthroughArgument(*subregion->argument(i)) + && dynamic_cast(*subregion->argument(i)->begin())->index() + == resultIndex; } if (shouldRemove) diff --git a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp index 5b6e078c0..cf07760c9 100644 --- a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp @@ -80,7 +80,7 @@ class TraceAllocaUses trace(&arg); } } - else if (auto r = dynamic_cast(user)) + else if (auto r = dynamic_cast(user)) { if (auto ber = dynamic_cast(r)) { diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 37a6a7d1b..ba7dc61a2 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -112,7 +112,7 @@ is_slice_exclusive_input_( return false; } } - else if (dynamic_cast(source)) + else if (dynamic_cast(source)) { if (auto be = dynamic_cast(source)) { @@ -138,11 +138,11 @@ is_slice_exclusive_input_( } void -trace_to_loop_results(jlm::rvsdg::output * out, std::vector & results) +trace_to_loop_results(jlm::rvsdg::output * out, std::vector & results) { for (auto user : *out) { - if (auto res = dynamic_cast(user)) + if (auto res = dynamic_cast(user)) { results.push_back(res); } @@ -284,7 +284,7 @@ decouple_load( // redirect state edges to new loop outputs for (size_t i = 1; i < loadNode->noutputs() - 1; ++i) { - std::vector results; + std::vector results; trace_to_loop_results(loadNode->output(i), results); JLM_ASSERT(results.size() <= 2); for (auto res : results) @@ -345,7 +345,7 @@ decouple_load( // redirect mem_req_addr to dload_out[1] auto old_mem_req_res = - dynamic_cast(*loadNode->output(loadNode->noutputs() - 1)->begin()); + dynamic_cast(*loadNode->output(loadNode->noutputs() - 1)->begin()); auto old_mem_req_out = old_mem_req_res->output(); auto mem_req_in = *old_mem_req_out->begin(); mem_req_in->divert_to(dload_out[1]); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index e6db79b7f..414ed8cfa 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -212,7 +212,7 @@ trace_function_calls( trace_function_calls(&arg, calls, visited); } } - else if (auto r = dynamic_cast(user)) + else if (auto r = dynamic_cast(user)) { if (auto ber = dynamic_cast(r)) { @@ -423,7 +423,7 @@ trace_pointer_argument( trace_pointer_argument(&arg, load_nodes, store_nodes, decouple_nodes, visited); } } - else if (auto r = dynamic_cast(user)) + else if (auto r = dynamic_cast(user)) { if (auto ber = dynamic_cast(r)) { @@ -496,7 +496,7 @@ IsDecoupledFunctionPointer( isDecoupled |= IsDecoupledFunctionPointer(&arg, visited); } } - else if (auto result = dynamic_cast(user)) + else if (auto result = dynamic_cast(user)) { if (auto backedgeResult = dynamic_cast(result)) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index 8384093b0..fde25af0d 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -77,7 +77,7 @@ find_load_store( find_load_store(&arg, load_nodes, store_nodes, visited); } } - else if (auto r = dynamic_cast(user)) + else if (auto r = dynamic_cast(user)) { if (auto ber = dynamic_cast(r)) { @@ -121,7 +121,8 @@ find_loop_output(jlm::rvsdg::structural_input * sti) for (size_t j = 0; j < 2; ++j) { JLM_ASSERT(branch_out->node()->output(j)->nusers() == 1); - auto result = dynamic_cast(*branch_out->node()->output(j)->begin()); + auto result = + dynamic_cast(*branch_out->node()->output(j)->begin()); if (result) { return result->output(); @@ -201,7 +202,7 @@ separate_load_edge( JLM_ASSERT(mem_edge->region() == addr_edge->region()); auto user = *mem_edge->begin(); auto addr_edge_user = *addr_edge->begin(); - if (dynamic_cast(user)) + if (dynamic_cast(user)) { JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN"); // end of region reached @@ -412,7 +413,7 @@ process_loops(jlm::rvsdg::output * state_edge) // each iteration should update state_edge JLM_ASSERT(state_edge->nusers() == 1); auto user = *state_edge->begin(); - if (dynamic_cast(user)) + if (dynamic_cast(user)) { // end of region reached JLM_UNREACHABLE("This should never happen"); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index ca5683495..1a03342a2 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -52,7 +52,7 @@ GetMemoryStateArgument(const llvm::lambda::node & lambda) return nullptr; } -jlm::rvsdg::result * +rvsdg::RegionResult * GetMemoryStateResult(const llvm::lambda::node & lambda) { auto subregion = lambda.subregion(); @@ -170,7 +170,7 @@ mem_sep_independent(jlm::rvsdg::region * region) } } -jlm::rvsdg::result * +rvsdg::RegionResult * trace_edge( jlm::rvsdg::output * common_edge, jlm::rvsdg::output * new_edge, @@ -188,7 +188,7 @@ trace_edge( JLM_ASSERT(new_edge->nusers() == 1); auto user = *common_edge->begin(); auto new_next = *new_edge->begin(); - if (auto res = dynamic_cast(user)) + if (auto res = dynamic_cast(user)) { // end of region reached return res; diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp index 818b1b504..bd212835b 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp @@ -26,7 +26,7 @@ mem_sep_argument(llvm::RvsdgModule & rm); rvsdg::RegionArgument * GetMemoryStateArgument(const llvm::lambda::node & lambda); -jlm::rvsdg::result * +rvsdg::RegionResult * GetMemoryStateResult(const llvm::lambda::node & lambda); } // namespace jlm::hls diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 1bba9fe79..8f5ae6206 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -171,7 +171,7 @@ eliminate_gamma_eol(rvsdg::gamma_node * gamma) continue; } auto user = *o->begin(); - if (auto res = dynamic_cast(user)) + if (auto res = dynamic_cast(user)) { if (res->output() && res->output()->nusers() == 0) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index ef2fa0b7d..c1340d0e6 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -136,7 +136,7 @@ remove_gamma_passthrough(jlm::rvsdg::gamma_node * gn) auto arg = gn->subregion(0)->argument(i); if (arg->nusers() == 1) { - auto res = dynamic_cast(*arg->begin()); + auto res = dynamic_cast(*arg->begin()); res_index = res ? res->index() : res_index; } for (size_t n = 0; n < gn->nsubregions(); n++) @@ -145,7 +145,7 @@ remove_gamma_passthrough(jlm::rvsdg::gamma_node * gn) can_remove &= is_passthrough(sr->argument(i)) && // check that all subregions pass through to the same result - dynamic_cast(*sr->argument(i)->begin())->index() == res_index; + dynamic_cast(*sr->argument(i)->begin())->index() == res_index; } if (can_remove) { @@ -250,7 +250,7 @@ remove_lambda_passthrough(llvm::lambda::node * ln) void remove_region_passthrough(const rvsdg::RegionArgument * arg) { - auto res = dynamic_cast(*arg->begin()); + auto res = dynamic_cast(*arg->begin()); auto origin = arg->input()->origin(); // divert users of output to origin of input arg->region()->node()->output(res->output()->index())->divert_users(origin); @@ -262,7 +262,7 @@ remove_region_passthrough(const rvsdg::RegionArgument * arg) } bool -is_passthrough(const jlm::rvsdg::result * res) +is_passthrough(const rvsdg::RegionResult * res) { auto arg = dynamic_cast(res->origin()); if (arg) @@ -277,7 +277,7 @@ is_passthrough(const rvsdg::RegionArgument * arg) { if (arg->nusers() == 1) { - auto res = dynamic_cast(*arg->begin()); + auto res = dynamic_cast(*arg->begin()); // used only by a result if (res) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp index 193cc18bb..74532837c 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp @@ -18,7 +18,7 @@ bool is_passthrough(const rvsdg::RegionArgument * arg); bool -is_passthrough(const jlm::rvsdg::result * res); +is_passthrough(const rvsdg::RegionResult * res); llvm::lambda::node * remove_lambda_passthrough(llvm::lambda::node * ln); diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp index b45088413..c7c6596e1 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp @@ -68,7 +68,7 @@ remove_loop_passthrough(loop_node * ln) if (arg->nusers() == 1) { auto user = *arg->begin(); - if (auto result = dynamic_cast(user)) + if (auto result = dynamic_cast(user)) { auto out = result->output(); out->divert_users(in->origin()); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 38b144f3e..b105bde1e 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -679,7 +679,7 @@ class backedge_argument : public rvsdg::RegionArgument backedge_result * result_; }; -class backedge_result : public jlm::rvsdg::result +class backedge_result : public rvsdg::RegionResult { friend loop_node; friend backedge_argument; @@ -698,7 +698,7 @@ class backedge_result : public jlm::rvsdg::result private: backedge_result(jlm::rvsdg::output * origin) - : jlm::rvsdg::result(origin->region(), origin, nullptr, origin->Type()), + : rvsdg::RegionResult(origin->region(), origin, nullptr, origin->Type()), argument_(nullptr) {} @@ -716,7 +716,7 @@ class backedge_result : public jlm::rvsdg::result /** * Represents the exit result of the HLS loop. */ -class ExitResult final : public rvsdg::result +class ExitResult final : public rvsdg::RegionResult { friend loop_node; @@ -725,7 +725,7 @@ class ExitResult final : public rvsdg::result private: ExitResult(rvsdg::output & origin, rvsdg::structural_output & output) - : rvsdg::result(origin.region(), &origin, &output, origin.Type()) + : rvsdg::RegionResult(origin.region(), &origin, &output, origin.Type()) { JLM_ASSERT(rvsdg::is(origin.region()->node())); } @@ -768,7 +768,7 @@ class loop_node final : public jlm::rvsdg::structural_node return structural_node::subregion(0); } - inline jlm::rvsdg::result * + [[nodiscard]] rvsdg::RegionResult * predicate() const noexcept { auto result = subregion()->result(0); diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 4151ae717..270862b69 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -54,7 +54,7 @@ get_dot_name(jlm::rvsdg::output * output) std::string get_dot_name(jlm::rvsdg::input * input) { - if (dynamic_cast(input)) + if (dynamic_cast(input)) { return jlm::util::strfmt("r", hex((intptr_t)input), ":", "default"); } @@ -97,7 +97,7 @@ argument_to_dot(rvsdg::RegionArgument * argument) } std::string -result_to_dot(jlm::rvsdg::result * result) +result_to_dot(rvsdg::RegionResult * result) { auto display_name = jlm::util::strfmt("r", result->index()); auto dot_name = jlm::util::strfmt("r", hex((intptr_t)result)); diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 8b24a299c..f5eece551 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -755,7 +755,7 @@ class cvargument final : public rvsdg::RegionArgument /* phi recursion variable result class */ -class rvresult final : public jlm::rvsdg::result +class rvresult final : public rvsdg::RegionResult { friend class phi::builder; @@ -768,7 +768,7 @@ class rvresult final : public jlm::rvsdg::result jlm::rvsdg::output * origin, rvoutput * output, std::shared_ptr type) - : jlm::rvsdg::result(region, origin, output, std::move(type)) + : RegionResult(region, origin, output, std::move(type)) {} rvresult(const rvresult &) = delete; @@ -800,7 +800,7 @@ class rvresult final : public jlm::rvsdg::result rvoutput * output() const noexcept { - return static_cast(result::output()); + return static_cast(RegionResult::output()); } rvargument * diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 3e75b87ed..32015dcc6 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -439,7 +439,7 @@ class cvargument final : public rvsdg::RegionArgument /** \brief Delta result */ -class result final : public rvsdg::result +class result final : public rvsdg::RegionResult { friend ::jlm::llvm::delta::node; @@ -451,7 +451,7 @@ class result final : public rvsdg::result private: explicit result(rvsdg::output * origin) - : rvsdg::result(origin->region(), origin, nullptr, origin->Type()) + : rvsdg::RegionResult(origin->region(), origin, nullptr, origin->Type()) {} static result * @@ -466,7 +466,7 @@ class result final : public rvsdg::result delta::output * output() const noexcept { - return static_cast(rvsdg::result::output()); + return static_cast(rvsdg::RegionResult::output()); } }; diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 7491c236f..e367611b7 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -158,7 +158,7 @@ node::GetMemoryStateRegionArgument() const noexcept return *argument; } -rvsdg::result & +rvsdg::RegionResult & node::GetMemoryStateRegionResult() const noexcept { auto result = fctresult(nfctresults() - 1); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index a8af382dd..3f7f77ec7 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -312,7 +312,7 @@ class node final : public jlm::rvsdg::structural_node /** * @return The memory state result of the lambda subregion. */ - [[nodiscard]] rvsdg::result & + [[nodiscard]] rvsdg::RegionResult & GetMemoryStateRegionResult() const noexcept; /** @@ -629,7 +629,7 @@ class cvargument final : public rvsdg::RegionArgument /** \brief Lambda result */ -class result final : public jlm::rvsdg::result +class result final : public rvsdg::RegionResult { friend ::jlm::llvm::lambda::node; @@ -641,7 +641,7 @@ class result final : public jlm::rvsdg::result private: explicit result(jlm::rvsdg::output * origin) - : rvsdg::result(origin->region(), origin, nullptr, origin->Type()) + : rvsdg::RegionResult(origin->region(), origin, nullptr, origin->Type()) {} static result * @@ -656,7 +656,7 @@ class result final : public jlm::rvsdg::result lambda::output * output() const noexcept { - return jlm::util::AssertedCast(jlm::rvsdg::result::output()); + return jlm::util::AssertedCast(rvsdg::RegionResult::output()); } }; diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 7008d4969..8122bd93b 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -324,7 +324,7 @@ is_movable_store(jlm::rvsdg::node * node) if (output->nusers() != 1) return false; - if (!dynamic_cast(*output->begin())) + if (!dynamic_cast(*output->begin())) return false; } @@ -351,7 +351,7 @@ pushout_store(jlm::rvsdg::node * storenode) for (size_t n = 0; n < storenode->noutputs(); n++) { JLM_ASSERT(storenode->output(n)->nusers() == 1); - auto result = static_cast(*storenode->output(n)->begin()); + auto result = static_cast(*storenode->output(n)->begin()); result->divert_to(storenode->input(n + 2)->origin()); states.push_back(result->output()); } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 47c28ba74..aaa0a52a1 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -427,7 +427,7 @@ class gamma_output final : public structural_output return results.size(); } - inline jlm::rvsdg::result * + [[nodiscard]] RegionResult * result(size_t n) const noexcept { JLM_ASSERT(n < nresults()); @@ -489,7 +489,7 @@ class GammaArgument final : public RegionArgument /** * Represents a region result in a gamma subregion. */ -class GammaResult final : public result +class GammaResult final : public RegionResult { friend gamma_node; @@ -498,7 +498,7 @@ class GammaResult final : public result private: GammaResult(rvsdg::region & region, rvsdg::output & origin, gamma_output & gammaOutput) - : result(®ion, &origin, &gammaOutput, origin.Type()) + : RegionResult(®ion, &origin, &gammaOutput, origin.Type()) {} GammaResult & diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 046cd282f..278719d40 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -23,7 +23,7 @@ GraphImport::GraphImport( {} GraphExport::GraphExport(rvsdg::output & origin, std::string name) - : result(origin.region()->graph()->root(), &origin, nullptr, origin.Type()), + : RegionResult(origin.region()->graph()->root(), &origin, nullptr, origin.Type()), Name_(std::move(name)) {} diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index 88fe632f9..e0748e12c 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -44,7 +44,7 @@ class GraphImport : public RegionArgument /** * Represents an export from the RVSDG of an internal entity. */ -class GraphExport : public result +class GraphExport : public RegionResult { protected: GraphExport(rvsdg::output & origin, std::string name); diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 8309363e3..598866206 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -43,7 +43,7 @@ RegionArgument::RegionArgument( } } -result::~result() noexcept +RegionResult::~RegionResult() noexcept { on_input_destroy(this); @@ -51,7 +51,7 @@ result::~result() noexcept output()->results.erase(this); } -result::result( +RegionResult::RegionResult( jlm::rvsdg::region * region, jlm::rvsdg::output * origin, jlm::rvsdg::structural_output * output, @@ -137,7 +137,7 @@ region::RemoveArgument(size_t index) } void -region::append_result(jlm::rvsdg::result * result) +region::append_result(RegionResult * result) { if (result->region() != this) throw jlm::util::error("Appending result to wrong region."); @@ -160,7 +160,7 @@ void region::RemoveResult(size_t index) { JLM_ASSERT(index < results_.size()); - jlm::rvsdg::result * result = results_[index]; + RegionResult * result = results_[index]; delete result; for (size_t n = index; n < results_.size() - 1; n++) diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 4adf4c1cc..f95be439c 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -96,33 +96,33 @@ class RegionArgument : public output * depends on the structural node the region is part of. A region result is either linked * with a \ref structural_output or is a standalone result. */ -class result : public input +class RegionResult : public input { - util::intrusive_list_anchor structural_output_anchor_; + util::intrusive_list_anchor structural_output_anchor_; public: - typedef util::intrusive_list_accessor + typedef util::intrusive_list_accessor structural_output_accessor; - ~result() noexcept override; + ~RegionResult() noexcept override; protected: - result( + RegionResult( rvsdg::region * region, rvsdg::output * origin, structural_output * output, std::shared_ptr type); public: - result(const result &) = delete; + RegionResult(const RegionResult &) = delete; - result(result &&) = delete; + RegionResult(RegionResult &&) = delete; - result & - operator=(const result &) = delete; + RegionResult & + operator=(const RegionResult &) = delete; - result & - operator=(result &&) = delete; + RegionResult & + operator=(RegionResult &&) = delete; [[nodiscard]] structural_output * output() const noexcept @@ -139,7 +139,7 @@ class result : public input * * @return A reference to the copied result. */ - virtual result & + virtual RegionResult & Copy(rvsdg::output & origin, structural_output * output) = 0; private: @@ -280,7 +280,7 @@ class region * Multiple invocations of append_result for the same result are undefined. */ void - append_result(jlm::rvsdg::result * result); + append_result(RegionResult * result); /** * Removes a result from the region given a results' index. @@ -293,7 +293,7 @@ class region * runtime is therefore O(n), where n is the region's number of results. * * \see nresults() - * \see result#index() + * \see RegionResult#index() */ void RemoveResult(size_t index); @@ -301,7 +301,7 @@ class region /** * Remove all results that match the condition specified by \p match. * - * @tparam F A type that supports the function call operator: bool operator(const result&) + * @tparam F A type that supports the function call operator: bool operator(const RegionResult&) * @param match Defines the condition for the results to remove. */ template @@ -339,7 +339,7 @@ class region return results_.size(); } - inline jlm::rvsdg::result * + [[nodiscard]] RegionResult * result(size_t index) const noexcept { JLM_ASSERT(index < nresults()); @@ -483,7 +483,7 @@ class region size_t index_; jlm::rvsdg::graph * graph_; jlm::rvsdg::structural_node * node_; - std::vector results_; + std::vector results_; std::vector arguments_; }; diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 951eb129d..72c0fee0f 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -102,9 +102,8 @@ class structural_input : public node_input /* structural output class */ -typedef jlm::util:: - intrusive_list - result_list; +typedef jlm::util::intrusive_list + result_list; class structural_output : public node_output { diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 5fd8e9fcc..581e84efa 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -106,7 +106,7 @@ class theta_node final : public structural_node return structural_node::subregion(0); } - inline jlm::rvsdg::result * + [[nodiscard]] RegionResult * predicate() const noexcept { auto result = subregion()->result(0); @@ -287,7 +287,7 @@ class theta_input final : public structural_input return arguments.first(); } - jlm::rvsdg::result * + [[nodiscard]] inline RegionResult * result() const noexcept; private: @@ -333,7 +333,7 @@ class theta_output final : public structural_output return input_->argument(); } - inline jlm::rvsdg::result * + [[nodiscard]] RegionResult * result() const noexcept { JLM_ASSERT(results.size() == 1); @@ -376,7 +376,7 @@ class ThetaArgument final : public RegionArgument /** * Represents a region result in a theta subregion. */ -class ThetaResult final : public result +class ThetaResult final : public RegionResult { friend theta_node; @@ -388,7 +388,7 @@ class ThetaResult final : public result private: ThetaResult(rvsdg::output & origin, theta_output & thetaOutput) - : result(origin.region(), &origin, &thetaOutput, origin.Type()) + : RegionResult(origin.region(), &origin, &thetaOutput, origin.Type()) { JLM_ASSERT(is(origin.region()->node())); } @@ -405,7 +405,7 @@ class ThetaResult final : public result /** * Represents the predicate result of a theta subregion. */ -class ThetaPredicateResult final : public result +class ThetaPredicateResult final : public RegionResult { friend theta_node; @@ -417,7 +417,7 @@ class ThetaPredicateResult final : public result private: explicit ThetaPredicateResult(rvsdg::output & origin) - : result(origin.region(), &origin, nullptr, ctltype::Create(2)) + : RegionResult(origin.region(), &origin, nullptr, ctltype::Create(2)) { JLM_ASSERT(is(origin.region()->node())); } @@ -499,7 +499,7 @@ theta_node::RemoveThetaInputsWhere(const F & match) /* theta input method definitions */ -inline jlm::rvsdg::result * +[[nodiscard]] inline RegionResult * theta_input::result() const noexcept { return output_->result(); diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 6d0c2b6e3..acee0ced5 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -136,7 +136,7 @@ RemoveResultsWhere() assert(result2.index() == 2); region.RemoveResultsWhere( - [](const jlm::rvsdg::result & result) + [](const jlm::rvsdg::RegionResult & result) { return result.index() == 1; }); @@ -145,7 +145,7 @@ RemoveResultsWhere() assert(result2.index() == 1); region.RemoveResultsWhere( - [](const jlm::rvsdg::result & result) + [](const jlm::rvsdg::RegionResult & result) { return false; }); @@ -154,7 +154,7 @@ RemoveResultsWhere() assert(result2.index() == 1); region.RemoveResultsWhere( - [](const jlm::rvsdg::result & result) + [](const jlm::rvsdg::RegionResult & result) { return true; }); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 78ddf8800..92f23f77b 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -373,14 +373,14 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument } }; -class TestGraphResult final : public jlm::rvsdg::result +class TestGraphResult final : public jlm::rvsdg::RegionResult { private: TestGraphResult( jlm::rvsdg::region & region, jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) - : jlm::rvsdg::result(®ion, &origin, output, origin.Type()) + : jlm::rvsdg::RegionResult(®ion, &origin, output, origin.Type()) {} TestGraphResult(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) From 05e0fd92ebf42aed171424687f7e04b1bb9c780a Mon Sep 17 00:00:00 2001 From: halvorlinder <56249210+halvorlinder@users.noreply.github.com> Date: Fri, 6 Sep 2024 21:11:55 +0200 Subject: [PATCH 070/170] Add fp128 (#606) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Tests involving the long double type would fail on ARM processors, because of a discrepancy in bit-length. A new fpsize is now added for ARM. --------- Co-authored-by: Håvard Krogstie Co-authored-by: Magnus Sjalander Co-authored-by: Nico Reissmann --- jlm/llvm/Makefile.sub | 1 + jlm/llvm/backend/jlm2llvm/type.cpp | 3 +- .../frontend/LlvmInstructionConversion.cpp | 3 +- jlm/llvm/frontend/LlvmTypeConversion.cpp | 7 ++- jlm/llvm/ir/types.cpp | 8 +++- jlm/llvm/ir/types.hpp | 3 +- .../frontend/llvm/LlvmTypeConversionTests.cpp | 48 +++++++++++++++++++ 7 files changed, 67 insertions(+), 6 deletions(-) create mode 100644 tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 92d9be7ec..69b660e9e 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -142,6 +142,7 @@ libllvm_TESTS += \ tests/jlm/llvm/backend/llvm/jlm-llvm/test-function-calls \ tests/jlm/llvm/backend/llvm/jlm-llvm/test-select-with-state \ tests/jlm/llvm/backend/llvm/jlm-llvm/test-type-conversion \ + tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests \ tests/jlm/llvm/frontend/llvm/LoadTests \ tests/jlm/llvm/frontend/llvm/MemCpyTests \ tests/jlm/llvm/frontend/llvm/StoreTests \ diff --git a/jlm/llvm/backend/jlm2llvm/type.cpp b/jlm/llvm/backend/jlm2llvm/type.cpp index 6136899e3..f93469242 100644 --- a/jlm/llvm/backend/jlm2llvm/type.cpp +++ b/jlm/llvm/backend/jlm2llvm/type.cpp @@ -85,7 +85,8 @@ convert(const fptype & type, context & ctx) { { fpsize::half, ::llvm::Type::getHalfTy }, { fpsize::flt, ::llvm::Type::getFloatTy }, { fpsize::dbl, ::llvm::Type::getDoubleTy }, - { fpsize::x86fp80, ::llvm::Type::getX86_FP80Ty } }); + { fpsize::x86fp80, ::llvm::Type::getX86_FP80Ty }, + { fpsize::fp128, ::llvm::Type::getFP128Ty } }); JLM_ASSERT(map.find(type.size()) != map.end()); return map[type.size()](ctx.llvm_module().getContext()); diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index 5614e5c19..78609ec51 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -1021,7 +1021,8 @@ convert_binary_operator(::llvm::Instruction * instruction, tacsvector_t & tacs, { { ::llvm::Type::HalfTyID, fpsize::half }, { ::llvm::Type::FloatTyID, fpsize::flt }, { ::llvm::Type::DoubleTyID, fpsize::dbl }, - { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 } }); + { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 }, + { ::llvm::Type::FP128TyID, fpsize::fp128 } }); std::unique_ptr operation; auto t = i->getType()->isVectorTy() ? i->getType()->getScalarType() : i->getType(); diff --git a/jlm/llvm/frontend/LlvmTypeConversion.cpp b/jlm/llvm/frontend/LlvmTypeConversion.cpp index 286e30b23..8ace3b345 100644 --- a/jlm/llvm/frontend/LlvmTypeConversion.cpp +++ b/jlm/llvm/frontend/LlvmTypeConversion.cpp @@ -23,7 +23,8 @@ ExtractFloatingPointSize(const ::llvm::Type * type) { { ::llvm::Type::HalfTyID, fpsize::half }, { ::llvm::Type::FloatTyID, fpsize::flt }, { ::llvm::Type::DoubleTyID, fpsize::dbl }, - { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 } }); + { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 }, + { ::llvm::Type::FP128TyID, fpsize::fp128 } }); auto i = map.find(type->getTypeID()); JLM_ASSERT(i != map.end()); @@ -78,7 +79,8 @@ convert_fp_type(const ::llvm::Type * t, context & ctx) { { ::llvm::Type::HalfTyID, fpsize::half }, { ::llvm::Type::FloatTyID, fpsize::flt }, { ::llvm::Type::DoubleTyID, fpsize::dbl }, - { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 } }); + { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 }, + { ::llvm::Type::FP128TyID, fpsize::fp128 } }); auto i = map.find(t->getTypeID()); JLM_ASSERT(i != map.end()); @@ -139,6 +141,7 @@ ConvertType(const ::llvm::Type * t, context & ctx) { ::llvm::Type::FloatTyID, convert_fp_type }, { ::llvm::Type::DoubleTyID, convert_fp_type }, { ::llvm::Type::X86_FP80TyID, convert_fp_type }, + { ::llvm::Type::FP128TyID, convert_fp_type }, { ::llvm::Type::StructTyID, convert_struct_type }, { ::llvm::Type::ArrayTyID, convert_array_type }, { ::llvm::Type::FixedVectorTyID, convert_fixed_vector_type }, diff --git a/jlm/llvm/ir/types.cpp b/jlm/llvm/ir/types.cpp index 5a87fae8c..5b7217986 100644 --- a/jlm/llvm/ir/types.cpp +++ b/jlm/llvm/ir/types.cpp @@ -182,7 +182,8 @@ fptype::debug_string() const static std::unordered_map map({ { fpsize::half, "half" }, { fpsize::flt, "float" }, { fpsize::dbl, "double" }, - { fpsize::x86fp80, "x86fp80" } }); + { fpsize::x86fp80, "x86fp80" }, + { fpsize::fp128, "fp128" } }); JLM_ASSERT(map.find(size()) != map.end()); return map[size()]; @@ -229,6 +230,11 @@ fptype::Create(fpsize size) static const fptype instance(fpsize::x86fp80); return std::shared_ptr(std::shared_ptr(), &instance); } + case fpsize::fp128: + { + static const fptype instance(fpsize::fp128); + return std::shared_ptr(std::shared_ptr(), &instance); + } default: { JLM_UNREACHABLE("unknown fpsize"); diff --git a/jlm/llvm/ir/types.hpp b/jlm/llvm/ir/types.hpp index 12f985a1f..104cac2f1 100644 --- a/jlm/llvm/ir/types.hpp +++ b/jlm/llvm/ir/types.hpp @@ -181,7 +181,8 @@ enum class fpsize half, flt, dbl, - x86fp80 + x86fp80, + fp128 }; class fptype final : public jlm::rvsdg::valuetype diff --git a/tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests.cpp new file mode 100644 index 000000000..3333c4752 --- /dev/null +++ b/tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests.cpp @@ -0,0 +1,48 @@ +/* + * Copyright 2024 Halvor Linder Henriksen + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include +#include + +static void +TestTypeConversion( + jlm::llvm::context & jlm_context, + llvm::Type * llvm_type, + jlm::llvm::fpsize jlm_type_size) +{ + using namespace llvm; + + auto jlm_type = jlm::llvm::ConvertType(llvm_type, jlm_context); + auto floating_point_type = dynamic_cast(jlm_type.get()); + + assert(floating_point_type && floating_point_type->size() == jlm_type_size); +} + +static int +TypeConversion() +{ + using namespace jlm::llvm; + + llvm::LLVMContext llvm_ctx; + llvm::Module lm("module", llvm_ctx); + + ipgraph_module im(jlm::util::filepath(""), "", ""); + auto jlm_ctx = context(im); + + TestTypeConversion(jlm_ctx, ::llvm::Type::getHalfTy(llvm_ctx), jlm::llvm::fpsize::half); + TestTypeConversion(jlm_ctx, ::llvm::Type::getFloatTy(llvm_ctx), jlm::llvm::fpsize::flt); + TestTypeConversion(jlm_ctx, ::llvm::Type::getDoubleTy(llvm_ctx), jlm::llvm::fpsize::dbl); + TestTypeConversion(jlm_ctx, ::llvm::Type::getX86_FP80Ty(llvm_ctx), jlm::llvm::fpsize::x86fp80); + TestTypeConversion(jlm_ctx, ::llvm::Type::getFP128Ty(llvm_ctx), jlm::llvm::fpsize::fp128); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/frontend/llvm/LlvmTypeConversionTests-TypeConversion", + TypeConversion) From 57186c7066d619797e688caf6e2ae14b63cf73aa Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 7 Sep 2024 07:12:58 +0200 Subject: [PATCH 071/170] Rename gamma_op class to GammaOperation (#609) Co-authored-by: Magnus Sjalander --- jlm/hls/opt/cne.cpp | 10 +++++----- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 4 ++-- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 4 ++-- .../alias-analyses/TopDownMemoryNodeEliminator.cpp | 2 +- jlm/llvm/opt/cne.cpp | 10 +++++----- jlm/llvm/opt/inversion.cpp | 4 ++-- jlm/llvm/opt/push.cpp | 2 +- jlm/llvm/opt/reduction.cpp | 2 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 2 +- jlm/rvsdg/gamma.cpp | 14 +++++++------- jlm/rvsdg/gamma.hpp | 11 ++++++----- jlm/rvsdg/view.cpp | 2 +- tests/jlm/llvm/opt/test-inversion.cpp | 8 ++++---- tests/jlm/llvm/opt/test-unroll.cpp | 4 ++-- tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp | 4 ++-- tests/jlm/rvsdg/test-gamma.cpp | 12 ++++++------ 17 files changed, 49 insertions(+), 48 deletions(-) diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index dcbb9777b..3d872c813 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -224,7 +224,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & } } - if (jlm::rvsdg::is(n1) && n1 == n2) + if (rvsdg::is(n1) && n1 == n2) { auto so1 = static_cast(o1); auto so2 = static_cast(o2); @@ -293,7 +293,7 @@ mark(jlm::rvsdg::region *, cnectx &); static void mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node->operation())); + JLM_ASSERT(rvsdg::is(node->operation())); /* mark entry variables */ for (size_t i1 = 1; i1 < node->ninputs(); i1++) @@ -413,7 +413,7 @@ mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) { static std:: unordered_map - map({ { std::type_index(typeid(jlm::rvsdg::gamma_op)), mark_gamma }, + map({ { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, { std::type_index(typeid(jlm::rvsdg::theta_op)), mark_theta }, { std::type_index(typeid(jlm::hls::loop_op)), mark_loop }, { typeid(llvm::lambda::operation), mark_lambda }, @@ -507,7 +507,7 @@ divert(jlm::rvsdg::region *, cnectx &); static void divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(rvsdg::is(node)); auto gamma = static_cast(node); for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) @@ -575,7 +575,7 @@ static void divert(jlm::rvsdg::structural_node * node, cnectx & ctx) { static std::unordered_map map( - { { std::type_index(typeid(jlm::rvsdg::gamma_op)), divert_gamma }, + { { std::type_index(typeid(rvsdg::GammaOperation)), divert_gamma }, { std::type_index(typeid(jlm::rvsdg::theta_op)), divert_theta }, { std::type_index(typeid(jlm::hls::loop_op)), divert_loop }, { typeid(llvm::lambda::operation), divert_lambda }, diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 512822ee8..30b02c54b 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -229,7 +229,7 @@ convert_empty_gamma_node(const rvsdg::gamma_node * gamma, context & ctx) static inline void convert_gamma_node(const rvsdg::node & node, context & ctx) { - JLM_ASSERT(is(&node)); + JLM_ASSERT(is(&node)); auto gamma = static_cast(&node); auto nalternatives = gamma->nsubregions(); auto predicate = gamma->predicate()->origin(); @@ -519,7 +519,7 @@ convert_node(const rvsdg::node & node, context & ctx) static std:: unordered_map> map({ { typeid(lambda::operation), convert_lambda_node }, - { std::type_index(typeid(rvsdg::gamma_op)), convert_gamma_node }, + { std::type_index(typeid(rvsdg::GammaOperation)), convert_gamma_node }, { std::type_index(typeid(rvsdg::theta_op)), convert_theta_node }, { typeid(phi::operation), convert_phi_node }, { typeid(delta::operation), convert_delta_node } }); diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 0b218f5d6..ea857092b 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -366,7 +366,7 @@ DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) con static std::unordered_map< std::type_index, std::function> - map({ { typeid(jlm::rvsdg::gamma_op), sweepGamma }, + map({ { typeid(rvsdg::GammaOperation), sweepGamma }, { typeid(jlm::rvsdg::theta_op), sweepTheta }, { typeid(lambda::operation), sweepLambda }, { typeid(phi::operation), sweepPhi }, diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 0789465e5..5e9f929ea 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -94,7 +94,7 @@ InvariantValueRedirection::RedirectInRootRegion(rvsdg::graph & rvsdg) void InvariantValueRedirection::RedirectInRegion(rvsdg::region & region) { - auto isGammaNode = is(region.node()); + auto isGammaNode = is(region.node()); auto isThetaNode = is(region.node()); auto isLambdaNode = is(region.node()); JLM_ASSERT(isGammaNode || isThetaNode || isLambdaNode); @@ -127,7 +127,7 @@ InvariantValueRedirection::RedirectInRegion(rvsdg::region & region) void InvariantValueRedirection::RedirectInSubregions(rvsdg::structural_node & structuralNode) { - auto isGammaNode = is(&structuralNode); + auto isGammaNode = is(&structuralNode); auto isThetaNode = is(&structuralNode); JLM_ASSERT(isGammaNode || isThetaNode); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 8f18a77fa..20b1627af 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -502,7 +502,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::region & region) { auto isLambdaSubregion = rvsdg::is(region.node()); auto isThetaSubregion = rvsdg::is(region.node()); - auto isGammaSubregion = rvsdg::is(region.node()); + auto isGammaSubregion = rvsdg::is(region.node()); JLM_ASSERT(isLambdaSubregion || isThetaSubregion || isGammaSubregion); // Process the intra-procedural nodes top-down. diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index d8129f368..3ec3d247a 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -208,7 +208,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return congruent(r1->origin(), r2->origin(), vs, ctx); } - if (jlm::rvsdg::is(n1) && n1 == n2) + if (rvsdg::is(n1) && n1 == n2) { auto so1 = static_cast(o1); auto so2 = static_cast(o2); @@ -277,7 +277,7 @@ mark(jlm::rvsdg::region *, cnectx &); static void mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node->operation())); + JLM_ASSERT(rvsdg::is(node->operation())); /* mark entry variables */ for (size_t i1 = 1; i1 < node->ninputs(); i1++) @@ -375,7 +375,7 @@ mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) { static std:: unordered_map - map({ { std::type_index(typeid(jlm::rvsdg::gamma_op)), mark_gamma }, + map({ { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, { std::type_index(typeid(jlm::rvsdg::theta_op)), mark_theta }, { typeid(lambda::operation), mark_lambda }, { typeid(phi::operation), mark_phi }, @@ -468,7 +468,7 @@ divert(jlm::rvsdg::region *, cnectx &); static void divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(rvsdg::is(node)); auto gamma = static_cast(node); for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) @@ -528,7 +528,7 @@ static void divert(jlm::rvsdg::structural_node * node, cnectx & ctx) { static std::unordered_map map( - { { std::type_index(typeid(jlm::rvsdg::gamma_op)), divert_gamma }, + { { std::type_index(typeid(rvsdg::GammaOperation)), divert_gamma }, { std::type_index(typeid(jlm::rvsdg::theta_op)), divert_theta }, { typeid(lambda::operation), divert_lambda }, { typeid(phi::operation), divert_phi }, diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index ad166f401..265a27e6b 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -64,7 +64,7 @@ is_applicable(const jlm::rvsdg::theta_node * theta) if (user == theta->predicate()) continue; - if (!rvsdg::is(rvsdg::input::GetNode(*user))) + if (!rvsdg::is(rvsdg::input::GetNode(*user))) return nullptr; gnode = dynamic_cast(rvsdg::input::GetNode(*user)); @@ -94,7 +94,7 @@ static std::vector> collect_condition_nodes(jlm::rvsdg::structural_node * tnode, jlm::rvsdg::structural_node * gnode) { JLM_ASSERT(jlm::rvsdg::is(tnode)); - JLM_ASSERT(jlm::rvsdg::is(gnode)); + JLM_ASSERT(rvsdg::is(gnode)); JLM_ASSERT(gnode->region()->node() == tnode); std::vector> nodes; diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 8122bd93b..d69b07e29 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -97,7 +97,7 @@ has_side_effects(const jlm::rvsdg::node * node) static std::vector copy_from_gamma(jlm::rvsdg::node * node, size_t r) { - JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); + JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); JLM_ASSERT(node->depth() == 0); auto target = node->region()->node()->region(); diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index 2e7421549..67148872a 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -83,7 +83,7 @@ enable_load_reductions(jlm::rvsdg::graph & graph) static void enable_gamma_reductions(jlm::rvsdg::graph & graph) { - auto nf = jlm::rvsdg::gamma_op::normal_form(&graph); + auto nf = rvsdg::GammaOperation::normal_form(&graph); nf->set_mutable(true); nf->set_predicate_reduction(true); // set_control_constante_reduction cause a PHI node input type error diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 972c64f62..34b7d5ba6 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -431,7 +431,7 @@ JlmToMlirConverter::ConvertGamma( ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs) { - auto & gammaOp = *util::AssertedCast(&gammaNode.operation()); + auto & gammaOp = *util::AssertedCast(&gammaNode.operation()); ::llvm::SmallVector<::mlir::Type> typeRangeOuput; for (size_t i = 0; i < gammaNode.noutputs(); ++i) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 24f79fd29..0ed36061b 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -244,25 +244,25 @@ gamma_normal_form::set_control_constant_reduction(bool enable) /* gamma operation */ -gamma_op::~gamma_op() noexcept +GammaOperation::~GammaOperation() noexcept {} std::string -gamma_op::debug_string() const +GammaOperation::debug_string() const { return "GAMMA"; } std::unique_ptr -gamma_op::copy() const +GammaOperation::copy() const { - return std::unique_ptr(new gamma_op(*this)); + return std::unique_ptr(new GammaOperation(*this)); } bool -gamma_op::operator==(const operation & other) const noexcept +GammaOperation::operator==(const operation & other) const noexcept { - auto op = dynamic_cast(&other); + auto op = dynamic_cast(&other); return op && op->nalternatives_ == nalternatives_; } @@ -408,6 +408,6 @@ static void __attribute__((constructor)) register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::gamma_op), + typeid(jlm::rvsdg::GammaOperation), gamma_node_get_default_normal_form_); } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index aaa0a52a1..cfc77f8ff 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -68,12 +68,12 @@ class gamma_normal_form final : public structural_normal_form class output; class type; -class gamma_op final : public structural_op +class GammaOperation final : public structural_op { public: - virtual ~gamma_op() noexcept; + ~GammaOperation() noexcept override; - inline constexpr gamma_op(size_t nalternatives) noexcept + explicit constexpr GammaOperation(size_t nalternatives) noexcept : structural_op(), nalternatives_(nalternatives) {} @@ -96,7 +96,8 @@ class gamma_op final : public structural_op static jlm::rvsdg::gamma_normal_form * normal_form(jlm::rvsdg::graph * graph) noexcept { - return static_cast(graph->node_normal_form(typeid(gamma_op))); + return static_cast( + graph->node_normal_form(typeid(GammaOperation))); } private: @@ -453,7 +454,7 @@ class gamma_output final : public structural_output /* gamma node method definitions */ inline gamma_node::gamma_node(jlm::rvsdg::output * predicate, size_t nalternatives) - : structural_node(jlm::rvsdg::gamma_op(nalternatives), predicate->region(), nalternatives) + : structural_node(GammaOperation(nalternatives), predicate->region(), nalternatives) { node::add_input(std::unique_ptr( new gamma_input(this, predicate, ctltype::Create(nalternatives)))); diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 7163b6d2a..dbae3553c 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -266,7 +266,7 @@ edge_tag(const std::string & srcid, const std::string & dstid) static inline std::string type(const jlm::rvsdg::node * n) { - if (dynamic_cast(&n->operation())) + if (dynamic_cast(&n->operation())) return "gamma"; if (dynamic_cast(&n->operation())) diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index b5cad8218..dc09d320f 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -70,9 +70,9 @@ test1() tginversion.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex1.origin()))); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex2.origin()))); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex3.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex1.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex2.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex3.origin()))); } static inline void @@ -117,7 +117,7 @@ test2() tginversion.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); } static int diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index bec3997a4..9b7faa50f 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -263,9 +263,9 @@ test_unknown_boundaries() // jlm::rvsdg::view(graph, stdout); auto node = jlm::rvsdg::node_output::node(ex1.origin()); - assert(jlm::rvsdg::is(node)); + assert(jlm::rvsdg::is(node)); node = jlm::rvsdg::node_output::node(node->input(1)->origin()); - assert(jlm::rvsdg::is(node)); + assert(jlm::rvsdg::is(node)); /* Create cleaner output */ DeadNodeElimination dne; diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 4dcd4dcc0..ec1b8e4a6 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -844,10 +844,10 @@ TestGammaOp() assert( gammaOutput = dynamic_cast(lambdaRegion->result(0)->origin())); jlm::rvsdg::node * gammaNode = gammaOutput->node(); - assert(is(gammaNode->operation())); + assert(is(gammaNode->operation())); std::cout << "Checking gamma operation" << std::endl; - auto gammaOp = dynamic_cast(&gammaNode->operation()); + auto gammaOp = dynamic_cast(&gammaNode->operation()); assert(gammaNode->ninputs() == 3); assert(gammaOp->nalternatives() == 3); assert(gammaNode->noutputs() == 2); diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index b2f013be0..a8b274e90 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -33,13 +33,13 @@ test_gamma(void) jlm::tests::GraphExport::Create(*gamma->output(0), "dummy"); - assert(gamma && gamma->operation() == jlm::rvsdg::gamma_op(3)); + assert(gamma && gamma->operation() == GammaOperation(3)); /* test gamma copy */ auto gamma2 = static_cast(gamma)->copy(graph.root(), { pred, v0, v1, v2 }); view(graph.root(), stdout); - assert(is(gamma2)); + assert(is(gamma2)); /* test entry and exit variable iterators */ @@ -54,7 +54,7 @@ test_predicate_reduction(void) using namespace jlm::rvsdg; jlm::rvsdg::graph graph; - gamma_op::normal_form(&graph)->set_predicate_reduction(true); + GammaOperation::normal_form(&graph)->set_predicate_reduction(true); bittype bits2(2); @@ -88,7 +88,7 @@ test_invariant_reduction(void) auto vtype = jlm::tests::valuetype::Create(); jlm::rvsdg::graph graph; - gamma_op::normal_form(&graph)->set_invariant_reduction(true); + GammaOperation::normal_form(&graph)->set_invariant_reduction(true); auto pred = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), ""); auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); @@ -113,7 +113,7 @@ test_control_constant_reduction() using namespace jlm::rvsdg; jlm::rvsdg::graph graph; - gamma_op::normal_form(&graph)->set_control_constant_reduction(true); + GammaOperation::normal_form(&graph)->set_control_constant_reduction(true); auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(1), "x"); @@ -151,7 +151,7 @@ test_control_constant_reduction2() using namespace jlm::rvsdg; jlm::rvsdg::graph graph; - gamma_op::normal_form(&graph)->set_control_constant_reduction(true); + GammaOperation::normal_form(&graph)->set_control_constant_reduction(true); auto import = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), "import"); From 4a2b7d67a101f3c4bd05658dbe0b46d779bbbfd9 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 9 Sep 2024 03:57:02 +0200 Subject: [PATCH 072/170] Rename gamma_node class to GammaNode (#610) --- .../backend/rvsdg2rhls/GammaConversion.cpp | 10 ++-- .../backend/rvsdg2rhls/GammaConversion.hpp | 2 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 14 ++--- jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp | 2 +- .../rvsdg2rhls/remove-unused-state.cpp | 4 +- .../rvsdg2rhls/remove-unused-state.hpp | 2 +- jlm/hls/opt/cne.cpp | 2 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 4 +- .../InterProceduralGraphConversion.cpp | 4 +- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +- jlm/llvm/opt/DeadNodeElimination.hpp | 4 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 4 +- jlm/llvm/opt/InvariantValueRedirection.hpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.hpp | 2 +- .../alias-analyses/MemoryNodeProvisioning.hpp | 4 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 8 +-- .../opt/alias-analyses/MemoryStateEncoder.hpp | 6 +-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 4 +- jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 4 +- .../TopDownMemoryNodeEliminator.cpp | 14 ++--- .../TopDownMemoryNodeEliminator.hpp | 4 +- jlm/llvm/opt/cne.cpp | 2 +- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 13 +++-- jlm/llvm/opt/pull.cpp | 16 +++--- jlm/llvm/opt/pull.hpp | 8 +-- jlm/llvm/opt/push.cpp | 8 +-- jlm/llvm/opt/push.hpp | 4 +- jlm/llvm/opt/unroll.cpp | 4 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 4 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 2 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 2 +- jlm/rvsdg/gamma.cpp | 28 +++++----- jlm/rvsdg/gamma.hpp | 54 +++++++++---------- tests/TestRvsdgs.cpp | 8 +-- tests/TestRvsdgs.hpp | 8 +-- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 4 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 2 +- .../backend/llvm/r2j/test-empty-gamma.cpp | 6 +-- .../backend/llvm/r2j/test-partial-gamma.cpp | 2 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 4 +- .../opt/InvariantValueRedirectionTests.cpp | 8 +-- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 4 +- .../alias-analyses/TestMemoryStateEncoder.cpp | 4 +- tests/jlm/llvm/opt/test-cne.cpp | 2 +- tests/jlm/llvm/opt/test-inlining.cpp | 2 +- tests/jlm/llvm/opt/test-inversion.cpp | 4 +- tests/jlm/llvm/opt/test-pull.cpp | 8 +-- tests/jlm/llvm/opt/test-push.cpp | 2 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 2 +- tests/jlm/rvsdg/test-gamma.cpp | 18 +++---- 57 files changed, 177 insertions(+), 180 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 774f019e7..91aa41103 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -12,7 +12,7 @@ namespace jlm::hls { static void -ConvertGammaNodeWithoutSpeculation(rvsdg::gamma_node & gammaNode) +ConvertGammaNodeWithoutSpeculation(rvsdg::GammaNode & gammaNode) { rvsdg::substitution_map substitutionMap; @@ -53,7 +53,7 @@ ConvertGammaNodeWithoutSpeculation(rvsdg::gamma_node & gammaNode) } static void -ConvertGammaNodeWithSpeculation(rvsdg::gamma_node & gammaNode) +ConvertGammaNodeWithSpeculation(rvsdg::GammaNode & gammaNode) { rvsdg::substitution_map substitutionMap; @@ -91,7 +91,7 @@ ConvertGammaNodeWithSpeculation(rvsdg::gamma_node & gammaNode) } static bool -CanGammaNodeBeSpeculative(const rvsdg::gamma_node & gammaNode) +CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) { for (size_t i = 0; i < gammaNode.noutputs(); ++i) { @@ -112,7 +112,7 @@ CanGammaNodeBeSpeculative(const rvsdg::gamma_node & gammaNode) // don't allow thetas or loops since they could potentially block forever return false; } - else if (auto innerGammaNode = dynamic_cast(&node)) + else if (auto innerGammaNode = dynamic_cast(&node)) { if (!CanGammaNodeBeSpeculative(*innerGammaNode)) { @@ -141,7 +141,7 @@ ConvertGammaNodesInStructuralNode(rvsdg::structural_node & structuralNode) ConvertGammaNodesInRegion(*structuralNode.subregion(n)); } - if (auto gammaNode = dynamic_cast(&structuralNode)) + if (auto gammaNode = dynamic_cast(&structuralNode)) { if (CanGammaNodeBeSpeculative(*gammaNode)) { diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.hpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.hpp index 58764778e..83de46956 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.hpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.hpp @@ -13,7 +13,7 @@ namespace jlm::hls { /** - * Converts every rvsdg::gamma_node in \p rvsdgModule to its respective HLS equivalent. + * Converts every rvsdg::GammaNode in \p rvsdgModule to its respective HLS equivalent. * * @param rvsdgModule The RVSDG module the transformation is performed on. */ diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 16f6b7d3a..5e987f862 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -131,7 +131,7 @@ RemovePassthroughArgument(const rvsdg::RegionArgument & argument) } static void -RemoveUnusedStatesFromGammaNode(rvsdg::gamma_node & gammaNode) +RemoveUnusedStatesFromGammaNode(rvsdg::GammaNode & gammaNode) { for (int i = gammaNode.nentryvars() - 1; i >= 0; --i) { @@ -199,7 +199,7 @@ RemoveUnusedStatesInStructuralNode(rvsdg::structural_node & structuralNode) RemoveUnusedStatesInRegion(*structuralNode.subregion(n)); } - if (auto gammaNode = dynamic_cast(&structuralNode)) + if (auto gammaNode = dynamic_cast(&structuralNode)) { RemoveUnusedStatesFromGammaNode(*gammaNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 599947172..cbbc3f4b2 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -77,7 +77,7 @@ route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) output = route_to_region(output, region->node()->region()); - if (auto gamma = dynamic_cast(region->node())) + if (auto gamma = dynamic_cast(region->node())) { gamma->add_entryvar(output); output = region->argument(region->narguments() - 1); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 2884e410e..16a205412 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -107,7 +107,7 @@ add_triggers(jlm::rvsdg::region * region) t->add_loopvar(trigger); add_triggers(t->subregion()); } - else if (auto gn = dynamic_cast(node)) + else if (auto gn = dynamic_cast(node)) { JLM_ASSERT(trigger != nullptr); JLM_ASSERT(get_trigger(gn->subregion(0)) == nullptr); diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index d6cfb8a2c..f8741e601 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -87,7 +87,7 @@ hls::distribute_constants(rvsdg::region * region) { distribute_constants(t->subregion()); } - else if (auto gn = dynamic_cast(node)) + else if (auto gn = dynamic_cast(node)) { for (size_t i = 0; i < gn->nsubregions(); ++i) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 1a03342a2..5d3472588 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -101,7 +101,7 @@ route_through(jlm::rvsdg::region * target, jlm::rvsdg::output * response) { auto parent_response = route_through(target->node()->region(), response); auto parrent_user = *parent_response->begin(); - if (auto gn = dynamic_cast(target->node())) + if (auto gn = dynamic_cast(target->node())) { auto ip = gn->add_entryvar(parent_response); std::vector vec; diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 8f5ae6206..172c5065e 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -22,7 +22,7 @@ merge_gamma(llvm::RvsdgModule & rm) } bool -eliminate_gamma_ctl(rvsdg::gamma_node * gamma) +eliminate_gamma_ctl(rvsdg::GammaNode * gamma) { // eliminates gammas that just replicate the ctl input bool changed = false; @@ -61,7 +61,7 @@ eliminate_gamma_ctl(rvsdg::gamma_node * gamma) } bool -fix_match_inversion(rvsdg::gamma_node * old_gamma) +fix_match_inversion(rvsdg::GammaNode * old_gamma) { // inverts match and swaps regions for gammas that contain swapped control constants if (old_gamma->nsubregions() != 2) @@ -118,7 +118,7 @@ fix_match_inversion(rvsdg::gamma_node * old_gamma) no->region(), op, { no->node()->input(0)->origin() })[0]; - auto new_gamma = rvsdg::gamma_node::create(new_match, match->nalternatives()); + auto new_gamma = rvsdg::GammaNode::create(new_match, match->nalternatives()); rvsdg::substitution_map rmap0; // subregion 0 of the new gamma - 1 of the old rvsdg::substitution_map rmap1; for (auto oev = old_gamma->begin_entryvar(); oev != old_gamma->end_entryvar(); oev++) @@ -149,7 +149,7 @@ fix_match_inversion(rvsdg::gamma_node * old_gamma) } bool -eliminate_gamma_eol(rvsdg::gamma_node * gamma) +eliminate_gamma_eol(rvsdg::GammaNode * gamma) { // eliminates gammas that are only active at the end of the loop and have unused outputs // seems to be mostly loop variables @@ -205,7 +205,7 @@ merge_gamma(jlm::rvsdg::region * region) { for (size_t n = 0; n < structnode->nsubregions(); n++) merge_gamma(structnode->subregion(n)); - if (auto gamma = dynamic_cast(node)) + if (auto gamma = dynamic_cast(node)) { if (fix_match_inversion(gamma) || eliminate_gamma_ctl(gamma) || eliminate_gamma_eol(gamma) || merge_gamma(gamma)) @@ -251,7 +251,7 @@ depends_on(jlm::rvsdg::output * output, jlm::rvsdg::node * node) } jlm::rvsdg::gamma_input * -get_entryvar(jlm::rvsdg::output * origin, jlm::rvsdg::gamma_node * gamma) +get_entryvar(jlm::rvsdg::output * origin, rvsdg::GammaNode * gamma) { for (auto user : *origin) { @@ -265,7 +265,7 @@ get_entryvar(jlm::rvsdg::output * origin, jlm::rvsdg::gamma_node * gamma) } bool -merge_gamma(jlm::rvsdg::gamma_node * gamma) +merge_gamma(rvsdg::GammaNode * gamma) { for (auto user : *gamma->predicate()->origin()) { diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp index af240a663..dd5475889 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp @@ -19,7 +19,7 @@ void merge_gamma(llvm::RvsdgModule & rm); bool -merge_gamma(jlm::rvsdg::gamma_node * gamma); +merge_gamma(rvsdg::GammaNode * gamma); } diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index c1340d0e6..e35f333d7 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -20,7 +20,7 @@ remove_unused_state(jlm::rvsdg::region * region, bool can_remove_arguments) { if (auto structnode = dynamic_cast(node)) { - if (auto gn = dynamic_cast(node)) + if (auto gn = dynamic_cast(node)) { // process subnodes first for (size_t n = 0; n < gn->nsubregions(); n++) @@ -127,7 +127,7 @@ remove_unused_state(llvm::RvsdgModule & rm) } void -remove_gamma_passthrough(jlm::rvsdg::gamma_node * gn) +remove_gamma_passthrough(rvsdg::GammaNode * gn) { // remove inputs in reverse for (int i = gn->nentryvars() - 1; i >= 0; --i) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp index 74532837c..e0a11ad54 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp @@ -27,7 +27,7 @@ void remove_region_passthrough(const rvsdg::RegionArgument * arg); void -remove_gamma_passthrough(jlm::rvsdg::gamma_node * gn); +remove_gamma_passthrough(rvsdg::GammaNode * gn); void remove_unused_state(llvm::RvsdgModule & rm); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 3d872c813..c0bb55c6b 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -508,7 +508,7 @@ static void divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node)); - auto gamma = static_cast(node); + auto gamma = static_cast(node); for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) { diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 30b02c54b..96e4e7a93 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -173,7 +173,7 @@ convert_simple_node(const rvsdg::node & node, context & ctx) } static void -convert_empty_gamma_node(const rvsdg::gamma_node * gamma, context & ctx) +convert_empty_gamma_node(const rvsdg::GammaNode * gamma, context & ctx) { JLM_ASSERT(gamma->nsubregions() == 2); JLM_ASSERT(gamma->subregion(0)->nnodes() == 0 && gamma->subregion(1)->nnodes() == 0); @@ -230,7 +230,7 @@ static inline void convert_gamma_node(const rvsdg::node & node, context & ctx) { JLM_ASSERT(is(&node)); - auto gamma = static_cast(&node); + auto gamma = static_cast(&node); auto nalternatives = gamma->nsubregions(); auto predicate = gamma->predicate()->origin(); auto cfg = ctx.cfg(); diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 71cd342e7..9f912547e 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -484,7 +484,7 @@ ConvertSelect( auto p = variableMap.lookup(threeAddressCode.operand(0)); auto predicate = rvsdg::simple_node::create_normalized(®ion, op, { p })[0]; - auto gamma = rvsdg::gamma_node::create(predicate, 2); + auto gamma = rvsdg::GammaNode::create(predicate, 2); auto ev1 = gamma->add_entryvar(variableMap.lookup(threeAddressCode.operand(2))); auto ev2 = gamma->add_entryvar(variableMap.lookup(threeAddressCode.operand(1))); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); @@ -701,7 +701,7 @@ Convert( JLM_ASSERT(is(sb.last()->operation())); auto predicate = regionalizedVariableMap.GetTopVariableMap().lookup(sb.last()->operand(0)); - auto gamma = rvsdg::gamma_node::create(predicate, branchAggregationNode.nchildren()); + auto gamma = rvsdg::GammaNode::create(predicate, branchAggregationNode.nchildren()); /* * Add gamma inputs. diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index ea857092b..0c5f2ba8c 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -344,7 +344,7 @@ DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) con { auto sweepGamma = [](auto & d, auto & n) { - d.SweepGamma(*util::AssertedCast(&n)); + d.SweepGamma(*util::AssertedCast(&n)); }; auto sweepTheta = [](auto & d, auto & n) { @@ -378,7 +378,7 @@ DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) con } void -DeadNodeElimination::SweepGamma(jlm::rvsdg::gamma_node & gammaNode) const +DeadNodeElimination::SweepGamma(rvsdg::GammaNode & gammaNode) const { // Remove dead outputs and results for (size_t n = gammaNode.noutputs() - 1; n != static_cast(-1); n--) diff --git a/jlm/llvm/opt/DeadNodeElimination.hpp b/jlm/llvm/opt/DeadNodeElimination.hpp index cd0c44c4e..1aa401744 100644 --- a/jlm/llvm/opt/DeadNodeElimination.hpp +++ b/jlm/llvm/opt/DeadNodeElimination.hpp @@ -12,7 +12,7 @@ namespace jlm::rvsdg { -class gamma_node; +class GammaNode; class theta_node; } @@ -94,7 +94,7 @@ class DeadNodeElimination final : public optimization SweepStructuralNode(jlm::rvsdg::structural_node & node) const; void - SweepGamma(rvsdg::gamma_node & gammaNode) const; + SweepGamma(rvsdg::GammaNode & gammaNode) const; void SweepTheta(rvsdg::theta_node & thetaNode) const; diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 5e9f929ea..85e72cb00 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -103,7 +103,7 @@ InvariantValueRedirection::RedirectInRegion(rvsdg::region & region) // it is irrelevant in which order we handle the nodes. for (auto & node : region.nodes) { - if (auto gammaNode = dynamic_cast(&node)) + if (auto gammaNode = dynamic_cast(&node)) { // Ensure we redirect invariant values of all nodes in the gamma subregions first, otherwise // we might not be able to redirect some of the gamma outputs. @@ -138,7 +138,7 @@ InvariantValueRedirection::RedirectInSubregions(rvsdg::structural_node & structu } void -InvariantValueRedirection::RedirectGammaOutputs(rvsdg::gamma_node & gammaNode) +InvariantValueRedirection::RedirectGammaOutputs(rvsdg::GammaNode & gammaNode) { for (auto it = gammaNode.begin_exitvar(); it != gammaNode.end_exitvar(); it++) { diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index a3b4a1ae1..487eea662 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -10,7 +10,7 @@ namespace jlm::rvsdg { -class gamma_node; +class GammaNode; class theta_node; } @@ -67,7 +67,7 @@ class InvariantValueRedirection final : public optimization RedirectInSubregions(rvsdg::structural_node & structuralNode); static void - RedirectGammaOutputs(rvsdg::gamma_node & gammaNode); + RedirectGammaOutputs(rvsdg::GammaNode & gammaNode); static void RedirectThetaOutputs(rvsdg::theta_node & thetaNode); diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 12a6f08a3..4d7b065a5 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -802,7 +802,7 @@ Andersen::AnalyzeStructuralNode(const rvsdg::structural_node & node) AnalyzeDelta(*deltaNode); else if (const auto phiNode = dynamic_cast(&node)) AnalyzePhi(*phiNode); - else if (const auto gammaNode = dynamic_cast(&node)) + else if (const auto gammaNode = dynamic_cast(&node)) AnalyzeGamma(*gammaNode); else if (const auto thetaNode = dynamic_cast(&node)) AnalyzeTheta(*thetaNode); @@ -926,7 +926,7 @@ Andersen::AnalyzePhi(const phi::node & phi) } void -Andersen::AnalyzeGamma(const rvsdg::gamma_node & gamma) +Andersen::AnalyzeGamma(const rvsdg::GammaNode & gamma) { // Handle input variables for (auto ev = gamma.begin_entryvar(); ev != gamma.end_entryvar(); ++ev) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index a83610917..e5bd942c3 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -406,7 +406,7 @@ class Andersen final : public AliasAnalysis AnalyzePhi(const phi::node & node); void - AnalyzeGamma(const rvsdg::gamma_node & node); + AnalyzeGamma(const rvsdg::GammaNode & node); void AnalyzeTheta(const rvsdg::theta_node & node); diff --git a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp index b668bda57..f3d1a1ccd 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp @@ -66,7 +66,7 @@ class MemoryNodeProvisioning } [[nodiscard]] virtual jlm::util::HashSet - GetGammaEntryNodes(const jlm::rvsdg::gamma_node & gammaNode) const + GetGammaEntryNodes(const rvsdg::GammaNode & gammaNode) const { jlm::util::HashSet allMemoryNodes; for (size_t n = 0; n < gammaNode.nsubregions(); n++) @@ -80,7 +80,7 @@ class MemoryNodeProvisioning } [[nodiscard]] virtual jlm::util::HashSet - GetGammaExitNodes(const jlm::rvsdg::gamma_node & gammaNode) const + GetGammaExitNodes(const rvsdg::GammaNode & gammaNode) const { jlm::util::HashSet allMemoryNodes; for (size_t n = 0; n < gammaNode.nsubregions(); n++) diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 8cad50d4d..497767a16 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -522,7 +522,7 @@ MemoryStateEncoder::EncodeStructuralNode(rvsdg::structural_node & structuralNode { EncodePhi(*phiNode); } - else if (auto gammaNode = dynamic_cast(&structuralNode)) + else if (auto gammaNode = dynamic_cast(&structuralNode)) { EncodeGamma(*gammaNode); } @@ -832,7 +832,7 @@ MemoryStateEncoder::EncodeDelta(const delta::node &) } void -MemoryStateEncoder::EncodeGamma(rvsdg::gamma_node & gammaNode) +MemoryStateEncoder::EncodeGamma(rvsdg::GammaNode & gammaNode) { for (size_t n = 0; n < gammaNode.nsubregions(); n++) Context_->GetRegionalizedStateMap().PushRegion(*gammaNode.subregion(n)); @@ -849,7 +849,7 @@ MemoryStateEncoder::EncodeGamma(rvsdg::gamma_node & gammaNode) } void -MemoryStateEncoder::EncodeGammaEntry(rvsdg::gamma_node & gammaNode) +MemoryStateEncoder::EncodeGammaEntry(rvsdg::GammaNode & gammaNode) { auto region = gammaNode.region(); auto & stateMap = Context_->GetRegionalizedStateMap(); @@ -865,7 +865,7 @@ MemoryStateEncoder::EncodeGammaEntry(rvsdg::gamma_node & gammaNode) } void -MemoryStateEncoder::EncodeGammaExit(rvsdg::gamma_node & gammaNode) +MemoryStateEncoder::EncodeGammaExit(rvsdg::GammaNode & gammaNode) { auto & stateMap = Context_->GetRegionalizedStateMap(); auto memoryNodes = Context_->GetMemoryNodeProvisioning().GetGammaExitNodes(gammaNode); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index d97828ed6..bae1266dc 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -134,13 +134,13 @@ class MemoryStateEncoder final EncodeDelta(const delta::node & deltaNode); void - EncodeGamma(rvsdg::gamma_node & gammaNode); + EncodeGamma(rvsdg::GammaNode & gammaNode); void - EncodeGammaEntry(rvsdg::gamma_node & gammaNode); + EncodeGammaEntry(rvsdg::GammaNode & gammaNode); void - EncodeGammaExit(rvsdg::gamma_node & gammaNode); + EncodeGammaExit(rvsdg::GammaNode & gammaNode); void EncodeTheta(rvsdg::theta_node & thetaNode); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 314a23184..be5ed51a4 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -1612,7 +1612,7 @@ Steensgaard::AnalyzePhi(const phi::node & phi) } void -Steensgaard::AnalyzeGamma(const jlm::rvsdg::gamma_node & node) +Steensgaard::AnalyzeGamma(const rvsdg::GammaNode & node) { // Handle entry variables for (auto ev = node.begin_entryvar(); ev != node.end_entryvar(); ev++) @@ -1692,7 +1692,7 @@ Steensgaard::AnalyzeStructuralNode(const jlm::rvsdg::structural_node & node) { AnalyzeDelta(*deltaNode); } - else if (auto gammaNode = dynamic_cast(&node)) + else if (auto gammaNode = dynamic_cast(&node)) { AnalyzeGamma(*gammaNode); } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index 893757f47..ee3456242 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -11,7 +11,7 @@ namespace jlm::rvsdg { -class gamma_node; +class GammaNode; class theta_node; } @@ -85,7 +85,7 @@ class Steensgaard final : public AliasAnalysis AnalyzePhi(const phi::node & node); void - AnalyzeGamma(const rvsdg::gamma_node & node); + AnalyzeGamma(const rvsdg::GammaNode & node); void AnalyzeTheta(const rvsdg::theta_node & node); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 20b1627af..de9bcbd0b 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -530,7 +530,7 @@ void TopDownMemoryNodeEliminator::EliminateTopDownStructuralNode( const rvsdg::structural_node & structuralNode) { - if (auto gammaNode = dynamic_cast(&structuralNode)) + if (auto gammaNode = dynamic_cast(&structuralNode)) { EliminateTopDownGamma(*gammaNode); } @@ -657,10 +657,10 @@ TopDownMemoryNodeEliminator::EliminateTopDownPhi(const phi::node & phiNode) } void -TopDownMemoryNodeEliminator::EliminateTopDownGamma(const rvsdg::gamma_node & gammaNode) +TopDownMemoryNodeEliminator::EliminateTopDownGamma(const rvsdg::GammaNode & gammaNode) { auto addSubregionLiveAndEntryNodes = - [](const rvsdg::gamma_node & gammaNode, TopDownMemoryNodeEliminator::Context & context) + [](const rvsdg::GammaNode & gammaNode, TopDownMemoryNodeEliminator::Context & context) { auto & gammaRegion = *gammaNode.region(); auto & seedProvisioning = context.GetSeedProvisioning(); @@ -679,7 +679,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownGamma(const rvsdg::gamma_node & gam } }; - auto eliminateTopDownForSubregions = [&](const rvsdg::gamma_node & gammaNode) + auto eliminateTopDownForSubregions = [&](const rvsdg::GammaNode & gammaNode) { for (size_t n = 0; n < gammaNode.nsubregions(); n++) { @@ -689,7 +689,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownGamma(const rvsdg::gamma_node & gam }; auto addSubregionExitNodes = - [](const rvsdg::gamma_node & gammaNode, TopDownMemoryNodeEliminator::Context & context) + [](const rvsdg::GammaNode & gammaNode, TopDownMemoryNodeEliminator::Context & context) { auto & provisioning = context.GetProvisioning(); @@ -702,7 +702,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownGamma(const rvsdg::gamma_node & gam }; auto updateGammaRegionLiveNodes = - [](const rvsdg::gamma_node & gammaNode, TopDownMemoryNodeEliminator::Context & context) + [](const rvsdg::GammaNode & gammaNode, TopDownMemoryNodeEliminator::Context & context) { auto & gammaRegion = *gammaNode.region(); auto & provisioning = context.GetProvisioning(); @@ -937,7 +937,7 @@ TopDownMemoryNodeEliminator::CheckInvariants( auto subregion = phiNode->subregion(); collectRegionsAndCalls(*subregion, regions, callNodes); } - else if (auto gammaNode = dynamic_cast(&node)) + else if (auto gammaNode = dynamic_cast(&node)) { for (size_t n = 0; n < gammaNode->nsubregions(); n++) { diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp index 0672e76c1..7da9057df 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp @@ -27,7 +27,7 @@ class node; namespace jlm::rvsdg { -class gamma_node; +class GammaNode; class node; class region; class simple_node; @@ -149,7 +149,7 @@ class TopDownMemoryNodeEliminator final : public MemoryNodeEliminator EliminateTopDownPhi(const phi::node & phiNode); void - EliminateTopDownGamma(const rvsdg::gamma_node & gammaNode); + EliminateTopDownGamma(const rvsdg::GammaNode & gammaNode); void EliminateTopDownTheta(const rvsdg::theta_node & thetaNode); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 3ec3d247a..c5fb290aa 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -469,7 +469,7 @@ static void divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node)); - auto gamma = static_cast(node); + auto gamma = static_cast(node); for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) { diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index ad236916b..0dc8b47d7 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -71,7 +71,7 @@ route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) output = route_to_region(output, region->node()->region()); - if (auto gamma = dynamic_cast(region->node())) + if (auto gamma = dynamic_cast(region->node())) { gamma->add_entryvar(output); output = region->argument(region->narguments() - 1); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 265a27e6b..d35788bad 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -48,7 +48,7 @@ class ivtstat final : public util::Statistics } }; -static jlm::rvsdg::gamma_node * +static rvsdg::GammaNode * is_applicable(const jlm::rvsdg::theta_node * theta) { auto matchnode = jlm::rvsdg::node_output::node(theta->predicate()->origin()); @@ -58,7 +58,7 @@ is_applicable(const jlm::rvsdg::theta_node * theta) if (matchnode->output(0)->nusers() != 2) return nullptr; - jlm::rvsdg::gamma_node * gnode = nullptr; + rvsdg::GammaNode * gnode = nullptr; for (const auto & user : *matchnode->output(0)) { if (user == theta->predicate()) @@ -67,14 +67,14 @@ is_applicable(const jlm::rvsdg::theta_node * theta) if (!rvsdg::is(rvsdg::input::GetNode(*user))) return nullptr; - gnode = dynamic_cast(rvsdg::input::GetNode(*user)); + gnode = dynamic_cast(rvsdg::input::GetNode(*user)); } return gnode; } static void -pullin(jlm::rvsdg::gamma_node * gamma, jlm::rvsdg::theta_node * theta) +pullin(rvsdg::GammaNode * gamma, jlm::rvsdg::theta_node * theta) { pullin_bottom(gamma); for (const auto & lv : *theta) @@ -152,9 +152,8 @@ invert(jlm::rvsdg::theta_node * otheta) smap.insert(olv->argument(), olv->input()->origin()); copy_condition_nodes(otheta->region(), smap, cnodes); - auto ngamma = jlm::rvsdg::gamma_node::create( - smap.lookup(ogamma->predicate()->origin()), - ogamma->nsubregions()); + auto ngamma = + rvsdg::GammaNode::create(smap.lookup(ogamma->predicate()->origin()), ogamma->nsubregions()); /* handle subregion 0 */ jlm::rvsdg::substitution_map r0map; diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 72429b467..2b6078c3d 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -45,7 +45,7 @@ class pullstat final : public util::Statistics }; static bool -empty(const jlm::rvsdg::gamma_node * gamma) +empty(const rvsdg::GammaNode * gamma) { for (size_t n = 0; n < gamma->nsubregions(); n++) { @@ -80,7 +80,7 @@ remove(jlm::rvsdg::gamma_input * input) } static void -pullin_node(jlm::rvsdg::gamma_node * gamma, jlm::rvsdg::node * node) +pullin_node(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) { /* collect operands */ std::vector> operands(gamma->nsubregions()); @@ -111,7 +111,7 @@ pullin_node(jlm::rvsdg::gamma_node * gamma, jlm::rvsdg::node * node) } static void -cleanup(jlm::rvsdg::gamma_node * gamma, jlm::rvsdg::node * node) +cleanup(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) { JLM_ASSERT(single_successor(node)); @@ -125,7 +125,7 @@ cleanup(jlm::rvsdg::gamma_node * gamma, jlm::rvsdg::node * node) } void -pullin_top(jlm::rvsdg::gamma_node * gamma) +pullin_top(rvsdg::GammaNode * gamma) { /* FIXME: This is inefficient. We can do better. */ auto ev = gamma->begin_entryvar(); @@ -149,7 +149,7 @@ pullin_top(jlm::rvsdg::gamma_node * gamma) } void -pullin_bottom(jlm::rvsdg::gamma_node * gamma) +pullin_bottom(rvsdg::GammaNode * gamma) { /* collect immediate successors of the gamma node */ std::unordered_set workset; @@ -213,7 +213,7 @@ pullin_bottom(jlm::rvsdg::gamma_node * gamma) } static size_t -is_used_in_nsubregions(const jlm::rvsdg::gamma_node * gamma, const jlm::rvsdg::node * node) +is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const jlm::rvsdg::node * node) { JLM_ASSERT(single_successor(node)); @@ -243,7 +243,7 @@ is_used_in_nsubregions(const jlm::rvsdg::gamma_node * gamma, const jlm::rvsdg::n } void -pull(jlm::rvsdg::gamma_node * gamma) +pull(rvsdg::GammaNode * gamma) { /* We don't want to pull anything into empty gammas with two subregions, @@ -289,7 +289,7 @@ pull(jlm::rvsdg::region * region) { if (auto structnode = dynamic_cast(node)) { - if (auto gamma = dynamic_cast(node)) + if (auto gamma = dynamic_cast(node)) pull(gamma); for (size_t n = 0; n < structnode->nsubregions(); n++) diff --git a/jlm/llvm/opt/pull.hpp b/jlm/llvm/opt/pull.hpp index 259ee05cd..fb6650366 100644 --- a/jlm/llvm/opt/pull.hpp +++ b/jlm/llvm/opt/pull.hpp @@ -11,7 +11,7 @@ namespace jlm::rvsdg { -class gamma_node; +class GammaNode; } namespace jlm::llvm @@ -32,13 +32,13 @@ class pullin final : public optimization }; void -pullin_top(jlm::rvsdg::gamma_node * gamma); +pullin_top(rvsdg::GammaNode * gamma); void -pullin_bottom(jlm::rvsdg::gamma_node * gamma); +pullin_bottom(rvsdg::GammaNode * gamma); void -pull(jlm::rvsdg::gamma_node * gamma); +pull(rvsdg::GammaNode * gamma); void pull(jlm::rvsdg::region * region); diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index d69b07e29..6b4e6b075 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -101,7 +101,7 @@ copy_from_gamma(jlm::rvsdg::node * node, size_t r) JLM_ASSERT(node->depth() == 0); auto target = node->region()->node()->region(); - auto gamma = static_cast(node->region()->node()); + auto gamma = static_cast(node->region()->node()); std::vector operands; for (size_t n = 0; n < node->ninputs(); n++) @@ -167,7 +167,7 @@ is_gamma_top_pushable(const jlm::rvsdg::node * node) } void -push(jlm::rvsdg::gamma_node * gamma) +push(rvsdg::GammaNode * gamma) { for (size_t r = 0; r < gamma->nsubregions(); r++) { @@ -413,10 +413,10 @@ push(jlm::rvsdg::region * region) push(strnode->subregion(n)); } - if (auto gamma = dynamic_cast(node)) + if (auto gamma = dynamic_cast(node)) push(gamma); - if (auto theta = dynamic_cast(node)) + if (auto theta = dynamic_cast(node)) push(theta); } } diff --git a/jlm/llvm/opt/push.hpp b/jlm/llvm/opt/push.hpp index 7337c5779..0820cd279 100644 --- a/jlm/llvm/opt/push.hpp +++ b/jlm/llvm/opt/push.hpp @@ -10,7 +10,7 @@ namespace jlm::rvsdg { -class gamma_node; +class GammaNode; class theta_node; } @@ -41,7 +41,7 @@ void push(jlm::rvsdg::theta_node * theta); void -push(jlm::rvsdg::gamma_node * gamma); +push(rvsdg::GammaNode * gamma); } diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 689a536e9..cbfe84367 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -399,7 +399,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) jlm::rvsdg::substitution_map smap; { auto pred = create_unrolled_gamma_predicate(ui, factor); - auto ngamma = jlm::rvsdg::gamma_node::create(pred, 2); + auto ngamma = rvsdg::GammaNode::create(pred, 2); auto ntheta = jlm::rvsdg::theta_node::create(ngamma->subregion(1)); jlm::rvsdg::substitution_map rmap[2]; @@ -432,7 +432,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) /* handle gamma for residual iterations */ { auto pred = create_residual_gamma_predicate(smap, ui); - auto ngamma = jlm::rvsdg::gamma_node::create(pred, 2); + auto ngamma = rvsdg::GammaNode::create(pred, 2); auto ntheta = jlm::rvsdg::theta_node::create(ngamma->subregion(1)); jlm::rvsdg::substitution_map rmap[2]; diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 34b7d5ba6..02c335f3f 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -160,7 +160,7 @@ JlmToMlirConverter::ConvertNode( { return ConvertLambda(*lambda, block); } - else if (auto gamma = dynamic_cast(&node)) + else if (auto gamma = dynamic_cast(&node)) { return ConvertGamma(*gamma, block, inputs); } @@ -427,7 +427,7 @@ JlmToMlirConverter::ConvertLambda(const llvm::lambda::node & lambdaNode, ::mlir: ::mlir::Operation * JlmToMlirConverter::ConvertGamma( - const rvsdg::gamma_node & gammaNode, + const rvsdg::GammaNode & gammaNode, ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs) { diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index 970625842..41eef98d3 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -160,7 +160,7 @@ class JlmToMlirConverter final */ ::mlir::Operation * ConvertGamma( - const rvsdg::gamma_node & gammaNode, + const rvsdg::GammaNode & gammaNode, ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs); diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index a69c741ca..d906bb028 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -319,7 +319,7 @@ MlirToJlmConverter::ConvertOperation( } else if (auto mlirGammaNode = ::mlir::dyn_cast<::mlir::rvsdg::GammaNode>(&mlirOperation)) { - auto rvsdgGammaNode = rvsdg::gamma_node::create( + auto rvsdgGammaNode = rvsdg::GammaNode::create( inputs[0], // predicate mlirGammaNode.getNumRegions() // nalternatives ); diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 0ed36061b..a41c01756 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -14,14 +14,14 @@ namespace jlm::rvsdg /* gamma normal form */ static bool -is_predicate_reducible(const jlm::rvsdg::gamma_node * gamma) +is_predicate_reducible(const GammaNode * gamma) { auto constant = node_output::node(gamma->predicate()->origin()); return constant && is_ctlconstant_op(constant->operation()); } static void -perform_predicate_reduction(jlm::rvsdg::gamma_node * gamma) +perform_predicate_reduction(GammaNode * gamma) { auto origin = gamma->predicate()->origin(); auto constant = static_cast(origin)->node(); @@ -41,7 +41,7 @@ perform_predicate_reduction(jlm::rvsdg::gamma_node * gamma) } static bool -perform_invariant_reduction(jlm::rvsdg::gamma_node * gamma) +perform_invariant_reduction(GammaNode * gamma) { bool was_normalized = true; for (auto it = gamma->begin_exitvar(); it != gamma->end_exitvar(); it++) @@ -70,7 +70,7 @@ perform_invariant_reduction(jlm::rvsdg::gamma_node * gamma) } static std::unordered_set -is_control_constant_reducible(jlm::rvsdg::gamma_node * gamma) +is_control_constant_reducible(GammaNode * gamma) { /* check gamma predicate */ auto match = node_output::node(gamma->predicate()->origin()); @@ -114,7 +114,7 @@ is_control_constant_reducible(jlm::rvsdg::gamma_node * gamma) static void perform_control_constant_reduction(std::unordered_set & outputs) { - auto gamma = static_cast((*outputs.begin())->node()); + auto gamma = static_cast((*outputs.begin())->node()); auto origin = static_cast(gamma->predicate()->origin()); auto match = origin->node(); auto & match_op = to_match_op(match->operation()); @@ -171,8 +171,7 @@ gamma_normal_form::gamma_normal_form( bool gamma_normal_form::normalize_node(jlm::rvsdg::node * node_) const { - JLM_ASSERT(dynamic_cast(node_)); - auto node = static_cast(node_); + auto node = util::AssertedCast(node_); if (!get_mutable()) return true; @@ -305,11 +304,10 @@ gamma_output::IsInvariant(rvsdg::output ** invariantOrigin) const noexcept /* gamma node */ -gamma_node::~gamma_node() -{} +GammaNode::~GammaNode() noexcept = default; -const gamma_node::entryvar_iterator & -gamma_node::entryvar_iterator::operator++() noexcept +const GammaNode::entryvar_iterator & +GammaNode::entryvar_iterator::operator++() noexcept { if (input_ == nullptr) return *this; @@ -326,8 +324,8 @@ gamma_node::entryvar_iterator::operator++() noexcept return *this; } -const gamma_node::exitvar_iterator & -gamma_node::exitvar_iterator::operator++() noexcept +const GammaNode::exitvar_iterator & +GammaNode::exitvar_iterator::operator++() noexcept { if (output_ == nullptr) return *this; @@ -344,8 +342,8 @@ gamma_node::exitvar_iterator::operator++() noexcept return *this; } -jlm::rvsdg::gamma_node * -gamma_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +GammaNode * +GammaNode::copy(rvsdg::region * region, substitution_map & smap) const { auto gamma = create(smap.lookup(predicate()->origin()), nsubregions()); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index cfc77f8ff..f77d33159 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -109,13 +109,13 @@ class GammaOperation final : public structural_op class gamma_input; class gamma_output; -class gamma_node : public jlm::rvsdg::structural_node +class GammaNode : public structural_node { public: - virtual ~gamma_node(); + ~GammaNode() noexcept override; private: - gamma_node(jlm::rvsdg::output * predicate, size_t nalternatives); + GammaNode(rvsdg::output * predicate, size_t nalternatives); class entryvar_iterator { @@ -222,10 +222,10 @@ class gamma_node : public jlm::rvsdg::structural_node }; public: - static jlm::rvsdg::gamma_node * + static GammaNode * create(jlm::rvsdg::output * predicate, size_t nalternatives) { - return new jlm::rvsdg::gamma_node(predicate, nalternatives); + return new GammaNode(predicate, nalternatives); } jlm::rvsdg::gamma_input * @@ -250,7 +250,7 @@ class gamma_node : public jlm::rvsdg::structural_node jlm::rvsdg::gamma_output * exitvar(size_t index) const noexcept; - inline gamma_node::entryvar_iterator + inline GammaNode::entryvar_iterator begin_entryvar() const { if (nentryvars() == 0) @@ -259,13 +259,13 @@ class gamma_node : public jlm::rvsdg::structural_node return entryvar_iterator(entryvar(0)); } - inline gamma_node::entryvar_iterator + inline GammaNode::entryvar_iterator end_entryvar() const { return entryvar_iterator(nullptr); } - inline gamma_node::exitvar_iterator + inline GammaNode::exitvar_iterator begin_exitvar() const { if (nexitvars() == 0) @@ -274,7 +274,7 @@ class gamma_node : public jlm::rvsdg::structural_node return exitvar_iterator(exitvar(0)); } - inline gamma_node::exitvar_iterator + inline GammaNode::exitvar_iterator end_exitvar() const { return exitvar_iterator(nullptr); @@ -311,7 +311,7 @@ class gamma_node : public jlm::rvsdg::structural_node RemoveGammaOutputsWhere(match); } - virtual jlm::rvsdg::gamma_node * + virtual GammaNode * copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; }; @@ -319,24 +319,24 @@ class gamma_node : public jlm::rvsdg::structural_node class gamma_input final : public structural_input { - friend gamma_node; + friend GammaNode; public: virtual ~gamma_input() noexcept; private: inline gamma_input( - gamma_node * node, + GammaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : structural_input(node, origin, std::move(type)) {} public: - gamma_node * + GammaNode * node() const noexcept { - return static_cast(structural_input::node()); + return static_cast(structural_input::node()); } inline argument_list::iterator @@ -383,19 +383,19 @@ class gamma_input final : public structural_input class gamma_output final : public structural_output { - friend gamma_node; + friend GammaNode; public: virtual ~gamma_output() noexcept; - inline gamma_output(gamma_node * node, std::shared_ptr type) + inline gamma_output(GammaNode * node, std::shared_ptr type) : structural_output(node, std::move(type)) {} - gamma_node * + GammaNode * node() const noexcept { - return static_cast(structural_output::node()); + return static_cast(structural_output::node()); } inline result_list::iterator @@ -453,7 +453,7 @@ class gamma_output final : public structural_output /* gamma node method definitions */ -inline gamma_node::gamma_node(jlm::rvsdg::output * predicate, size_t nalternatives) +inline GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) : structural_node(GammaOperation(nalternatives), predicate->region(), nalternatives) { node::add_input(std::unique_ptr( @@ -465,7 +465,7 @@ inline gamma_node::gamma_node(jlm::rvsdg::output * predicate, size_t nalternativ */ class GammaArgument final : public RegionArgument { - friend gamma_node; + friend GammaNode; public: ~GammaArgument() noexcept override; @@ -492,7 +492,7 @@ class GammaArgument final : public RegionArgument */ class GammaResult final : public RegionResult { - friend gamma_node; + friend GammaNode; public: ~GammaResult() noexcept override; @@ -515,25 +515,25 @@ class GammaResult final : public RegionResult }; inline jlm::rvsdg::gamma_input * -gamma_node::predicate() const noexcept +GammaNode::predicate() const noexcept { return static_cast(structural_node::input(0)); } inline jlm::rvsdg::gamma_input * -gamma_node::entryvar(size_t index) const noexcept +GammaNode::entryvar(size_t index) const noexcept { return static_cast(node::input(index + 1)); } inline jlm::rvsdg::gamma_output * -gamma_node::exitvar(size_t index) const noexcept +GammaNode::exitvar(size_t index) const noexcept { return static_cast(node::output(index)); } inline jlm::rvsdg::gamma_input * -gamma_node::add_entryvar(jlm::rvsdg::output * origin) +GammaNode::add_entryvar(jlm::rvsdg::output * origin) { auto input = node::add_input(std::unique_ptr(new gamma_input(this, origin, origin->Type()))); @@ -548,7 +548,7 @@ gamma_node::add_entryvar(jlm::rvsdg::output * origin) } inline jlm::rvsdg::gamma_output * -gamma_node::add_exitvar(const std::vector & values) +GammaNode::add_exitvar(const std::vector & values) { if (values.size() != nsubregions()) throw jlm::util::error("Incorrect number of values."); @@ -567,7 +567,7 @@ gamma_node::add_exitvar(const std::vector & values) template void -gamma_node::RemoveGammaOutputsWhere(const F & match) +GammaNode::RemoveGammaOutputsWhere(const F & match) { // iterate backwards to avoid the invalidation of 'n' by RemoveOutput() for (size_t n = noutputs() - 1; n != static_cast(-1); n--) diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index b467590dd..e0015f461 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1320,7 +1320,7 @@ GammaTest::SetupRvsdg() auto biteq = jlm::rvsdg::biteq_op::create(32, fct->fctargument(0), zero); auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, biteq); - auto gammanode = jlm::rvsdg::gamma_node::create(predicate, 2); + auto gammanode = jlm::rvsdg::GammaNode::create(predicate, 2); auto p1ev = gammanode->add_entryvar(fct->fctargument(1)); auto p2ev = gammanode->add_entryvar(fct->fctargument(2)); auto p3ev = gammanode->add_entryvar(fct->fctargument(3)); @@ -1369,7 +1369,7 @@ GammaTest2::SetupRvsdg() rvsdg::output * zAddress, rvsdg::output * memoryState) { - auto gammaNode = rvsdg::gamma_node::create(predicate, 2); + auto gammaNode = rvsdg::GammaNode::create(predicate, 2); auto gammaInputX = gammaNode->add_entryvar(xAddress); auto gammaInputY = gammaNode->add_entryvar(yAddress); @@ -2031,7 +2031,7 @@ PhiTest1::SetupRvsdg() auto bitult = jlm::rvsdg::bitult_op::create(64, valueArgument, two); auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, bitult); - auto gammaNode = jlm::rvsdg::gamma_node::create(predicate, 2); + auto gammaNode = jlm::rvsdg::GammaNode::create(predicate, 2); auto nev = gammaNode->add_entryvar(valueArgument); auto resultev = gammaNode->add_entryvar(pointerArgument); auto fibev = gammaNode->add_entryvar(ctxVarFib); @@ -3893,7 +3893,7 @@ VariadicFunctionTest2::SetupRvsdg() auto icmpResult = rvsdg::bitult_op::create(32, loadResults[0], fortyOne); auto matchResult = rvsdg::match_op::Create(*icmpResult, { { 1, 1 } }, 0, 2); - auto gammaNode = rvsdg::gamma_node::create(matchResult, 2); + auto gammaNode = rvsdg::GammaNode::create(matchResult, 2); auto gammaVaAddress = gammaNode->add_entryvar(allocaResults[0]); auto gammaLoadResult = gammaNode->add_entryvar(loadResults[0]); auto gammaMemoryState = gammaNode->add_entryvar(loadResults[1]); diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index 914b3bdf0..557f42f7b 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -950,7 +950,7 @@ class GammaTest final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::gamma_node * gamma; + rvsdg::GammaNode * gamma; }; /** \brief GammaTest2 class @@ -1020,7 +1020,7 @@ class GammaTest2 final : public RvsdgTest return *LambdaH_; } - [[nodiscard]] rvsdg::gamma_node & + [[nodiscard]] rvsdg::GammaNode & GetGamma() const noexcept { return *Gamma_; @@ -1076,7 +1076,7 @@ class GammaTest2 final : public RvsdgTest llvm::lambda::node * LambdaG_; llvm::lambda::node * LambdaH_; - rvsdg::gamma_node * Gamma_; + rvsdg::GammaNode * Gamma_; llvm::CallNode * CallFromG_; llvm::CallNode * CallFromH_; @@ -1386,7 +1386,7 @@ class PhiTest1 final : public RvsdgTest jlm::llvm::lambda::node * lambda_fib; jlm::llvm::lambda::node * lambda_test; - jlm::rvsdg::gamma_node * gamma; + rvsdg::GammaNode * gamma; jlm::llvm::phi::node * phi; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index 1f1aef3f8..c9a41f559 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -29,7 +29,7 @@ TestWithMatch() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->fctargument(0)); - auto gamma = jlm::rvsdg::gamma_node::create(match, 2); + auto gamma = jlm::rvsdg::GammaNode::create(match, 2); auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); @@ -65,7 +65,7 @@ TestWithoutMatch() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - auto gamma = jlm::rvsdg::gamma_node::create(lambda->fctargument(0), 2); + auto gamma = jlm::rvsdg::GammaNode::create(lambda->fctargument(0), 2); auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index c0a56dc5b..750d47269 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -30,7 +30,7 @@ TestGamma() auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); - auto gammaNode = jlm::rvsdg::gamma_node::create(p, 2); + auto gammaNode = jlm::rvsdg::GammaNode::create(p, 2); auto gammaInput1 = gammaNode->add_entryvar(x); auto gammaInput2 = gammaNode->add_entryvar(y); diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp index 1a58ab1fa..a02285788 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp @@ -35,7 +35,7 @@ test_with_match() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->fctargument(0)); - auto gamma = jlm::rvsdg::gamma_node::create(match, 2); + auto gamma = jlm::rvsdg::GammaNode::create(match, 2); auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); @@ -77,7 +77,7 @@ test_without_match() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - auto gamma = jlm::rvsdg::gamma_node::create(lambda->fctargument(0), 2); + auto gamma = jlm::rvsdg::GammaNode::create(lambda->fctargument(0), 2); auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); @@ -122,7 +122,7 @@ test_gamma3() auto match = jlm::rvsdg::match(32, { { 0, 0 }, { 1, 1 } }, 2, 3, lambda->fctargument(0)); - auto gamma = jlm::rvsdg::gamma_node::create(match, 3); + auto gamma = jlm::rvsdg::GammaNode::create(match, 3); auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); auto ex = gamma->add_exitvar({ ev1->argument(0), ev1->argument(1), ev2->argument(2) }); diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp index dddc66e81..4698c9d5e 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp @@ -32,7 +32,7 @@ test() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->fctargument(0)); - auto gamma = jlm::rvsdg::gamma_node::create(match, 2); + auto gamma = jlm::rvsdg::GammaNode::create(match, 2); auto ev = gamma->add_entryvar(lambda->fctargument(1)); auto output = jlm::tests::create_testop(gamma->subregion(1), { ev->argument(1) }, { vt })[0]; auto ex = gamma->add_exitvar({ ev->argument(0), output }); diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index b8d5cd9bc..20c041d99 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -197,7 +197,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto itf = innerTheta->add_loopvar(otf->argument()); auto predicate = jlm::rvsdg::control_false(innerTheta->subregion()); - auto gamma = jlm::rvsdg::gamma_node::create(predicate, 2); + auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); auto ev = gamma->add_entryvar(itf->argument()); auto xv = gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); @@ -402,7 +402,7 @@ TestCallTypeClassifierRecursiveDirectCall() auto bitult = jlm::rvsdg::bitult_op::create(64, valueArgument, two); auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, bitult); - auto gammaNode = jlm::rvsdg::gamma_node::create(predicate, 2); + auto gammaNode = jlm::rvsdg::GammaNode::create(predicate, 2); auto nev = gammaNode->add_entryvar(valueArgument); auto resultev = gammaNode->add_entryvar(pointerArgument); auto fibev = gammaNode->add_entryvar(ctxVarFib); diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index c1b34f4ad..e6e1c0406 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -50,12 +50,12 @@ TestGamma() auto x = lambdaNode->fctargument(1); auto y = lambdaNode->fctargument(2); - auto gammaNode1 = jlm::rvsdg::gamma_node::create(c, 2); + auto gammaNode1 = jlm::rvsdg::GammaNode::create(c, 2); auto gammaInput1 = gammaNode1->add_entryvar(c); auto gammaInput2 = gammaNode1->add_entryvar(x); auto gammaInput3 = gammaNode1->add_entryvar(y); - auto gammaNode2 = jlm::rvsdg::gamma_node::create(gammaInput1->argument(0), 2); + auto gammaNode2 = jlm::rvsdg::GammaNode::create(gammaInput1->argument(0), 2); auto gammaInput4 = gammaNode2->add_entryvar(gammaInput2->argument(0)); auto gammaInput5 = gammaNode2->add_entryvar(gammaInput3->argument(0)); gammaNode2->add_exitvar({ gammaInput4->argument(0), gammaInput4->argument(1) }); @@ -162,7 +162,7 @@ TestCall() auto ioStateArgument = lambdaNode->fctargument(3); auto memoryStateArgument = lambdaNode->fctargument(4); - auto gammaNode = jlm::rvsdg::gamma_node::create(controlArgument, 2); + auto gammaNode = jlm::rvsdg::GammaNode::create(controlArgument, 2); auto gammaInputX = gammaNode->add_entryvar(xArgument); auto gammaInputY = gammaNode->add_entryvar(yArgument); auto gammaInputIOState = gammaNode->add_entryvar(ioStateArgument); @@ -251,7 +251,7 @@ TestCallWithMemoryStateNodes() auto lambdaEntrySplitResults = LambdaEntryMemoryStateSplitOperation::Create(*memoryStateArgument, 2); - auto gammaNode = jlm::rvsdg::gamma_node::create(controlArgument, 2); + auto gammaNode = jlm::rvsdg::GammaNode::create(controlArgument, 2); auto gammaInputX = gammaNode->add_entryvar(xArgument); auto gammaInputMemoryState1 = gammaNode->add_entryvar(lambdaEntrySplitResults[0]); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 4cf003ed9..94a8d1e6c 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -59,7 +59,7 @@ TestGamma() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto gamma = jlm::rvsdg::gamma_node::create(c, 2); + auto gamma = jlm::rvsdg::GammaNode::create(c, 2); auto ev1 = gamma->add_entryvar(x); auto ev2 = gamma->add_entryvar(y); auto ev3 = gamma->add_entryvar(x); @@ -97,7 +97,7 @@ TestGamma2() auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto gamma = jlm::rvsdg::gamma_node::create(c, 2); + auto gamma = jlm::rvsdg::GammaNode::create(c, 2); gamma->add_entryvar(x); auto n1 = jlm::tests::create_testop(gamma->subregion(0), {}, { vt })[0]; diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 72e5f0740..09ea2519e 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -1892,7 +1892,7 @@ ValidatePhiTestSteensgaardAgnosticTopDown(const jlm::tests::PhiTest1 & test) assert(is(*lambdaExitMerge, 4, 1)); const StoreNonVolatileNode * storeNode = nullptr; - const jlm::rvsdg::gamma_node * gammaNode = nullptr; + const jlm::rvsdg::GammaNode * gammaNode = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto node = jlm::rvsdg::node_output::node(lambdaExitMerge->input(n)->origin()); @@ -1900,7 +1900,7 @@ ValidatePhiTestSteensgaardAgnosticTopDown(const jlm::tests::PhiTest1 & test) { storeNode = castedStoreNode; } - else if (auto castedGammaNode = dynamic_cast(node)) + else if (auto castedGammaNode = dynamic_cast(node)) { gammaNode = castedGammaNode; } diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 2cc7debf6..1e4ae15c6 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -84,7 +84,7 @@ test_gamma() auto u1 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; auto u2 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; - auto gamma = jlm::rvsdg::gamma_node::create(c, 2); + auto gamma = jlm::rvsdg::GammaNode::create(c, 2); auto ev1 = gamma->add_entryvar(u1); auto ev2 = gamma->add_entryvar(u2); diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index b5e27d5ba..4403c7a03 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -62,7 +62,7 @@ test1() auto iOStateArgument = lambda->fctargument(2); auto memoryStateArgument = lambda->fctargument(3); - auto gamma = jlm::rvsdg::gamma_node::create(controlArgument, 2); + auto gamma = jlm::rvsdg::GammaNode::create(controlArgument, 2); auto gammaInputF1 = gamma->add_entryvar(d); auto gammaInputValue = gamma->add_entryvar(valueArgument); auto gammaInputIoState = gamma->add_entryvar(iOStateArgument); diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index dc09d320f..05dc778e9 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -41,7 +41,7 @@ test1() { jlm::rvsdg::bittype::Create(1) })[0]; auto predicate = jlm::rvsdg::match(1, { { 1, 0 } }, 1, 2, a); - auto gamma = jlm::rvsdg::gamma_node::create(predicate, 2); + auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); auto evx = gamma->add_entryvar(lvx->argument()); auto evy = gamma->add_entryvar(lvy->argument()); @@ -96,7 +96,7 @@ test2() auto n2 = jlm::tests::create_testop(theta->subregion(), { lv1->argument() }, { vt })[0]; auto predicate = jlm::rvsdg::match(1, { { 1, 0 } }, 1, 2, n1); - auto gamma = jlm::rvsdg::gamma_node::create(predicate, 2); + auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); auto ev1 = gamma->add_entryvar(n1); auto ev2 = gamma->add_entryvar(lv1->argument()); diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index 2c2434ccc..ad2fd468d 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -39,7 +39,7 @@ test_pullin_top() auto n4 = jlm::tests::create_testop(graph.root(), { c, n1 }, { ct })[0]; auto n5 = jlm::tests::create_testop(graph.root(), { n1, n3 }, { vt })[0]; - auto gamma = jlm::rvsdg::gamma_node::create(n4, 2); + auto gamma = jlm::rvsdg::GammaNode::create(n4, 2); gamma->add_entryvar(n4); auto ev = gamma->add_entryvar(n5); @@ -66,7 +66,7 @@ test_pullin_bottom() auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto gamma = jlm::rvsdg::gamma_node::create(c, 2); + auto gamma = jlm::rvsdg::GammaNode::create(c, 2); auto ev = gamma->add_entryvar(x); gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); @@ -98,14 +98,14 @@ test_pull() auto croot = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; /* outer gamma */ - auto gamma1 = jlm::rvsdg::gamma_node::create(p, 2); + auto gamma1 = jlm::rvsdg::GammaNode::create(p, 2); auto ev1 = gamma1->add_entryvar(p); auto ev2 = gamma1->add_entryvar(croot); auto cg1 = jlm::tests::create_testop(gamma1->subregion(0), {}, { vt })[0]; /* inner gamma */ - auto gamma2 = jlm::rvsdg::gamma_node::create(ev1->argument(1), 2); + auto gamma2 = jlm::rvsdg::GammaNode::create(ev1->argument(1), 2); auto ev3 = gamma2->add_entryvar(ev2->argument(1)); auto cg2 = jlm::tests::create_testop(gamma2->subregion(0), {}, { vt })[0]; auto un = jlm::tests::create_testop(gamma2->subregion(1), { ev3->argument(1) }, { vt })[0]; diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 38dda2c74..5b3280e44 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -34,7 +34,7 @@ test_gamma() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto s = &jlm::tests::GraphImport::Create(graph, st, "s"); - auto gamma = jlm::rvsdg::gamma_node::create(c, 2); + auto gamma = jlm::rvsdg::GammaNode::create(c, 2); auto evx = gamma->add_entryvar(x); auto evs = gamma->add_entryvar(s); diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 33301f7f4..27413f03e 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -503,7 +503,7 @@ TestGamma() auto CtrlConstant = jlm::rvsdg::control_constant(graph->root(), 3, 1); auto entryvar1 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 5); auto entryvar2 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 6); - jlm::rvsdg::gamma_node * rvsdgGammaNode = jlm::rvsdg::gamma_node::create( + auto rvsdgGammaNode = jlm::rvsdg::GammaNode::create( CtrlConstant, // predicate 3 // nalternatives ); diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index a8b274e90..f99ffbc4a 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -25,7 +25,7 @@ test_gamma(void) auto pred = match(2, { { 0, 0 }, { 1, 1 } }, 2, 3, cmp); - auto gamma = gamma_node::create(pred, 3); + auto gamma = GammaNode::create(pred, 3); auto ev0 = gamma->add_entryvar(v0); auto ev1 = gamma->add_entryvar(v1); auto ev2 = gamma->add_entryvar(v2); @@ -43,7 +43,7 @@ test_gamma(void) /* test entry and exit variable iterators */ - auto gamma3 = gamma_node::create(v3, 2); + auto gamma3 = GammaNode::create(v3, 2); assert(gamma3->begin_entryvar() == gamma3->end_entryvar()); assert(gamma3->begin_exitvar() == gamma3->end_exitvar()); } @@ -64,7 +64,7 @@ test_predicate_reduction(void) auto pred = jlm::rvsdg::control_constant(graph.root(), 3, 1); - auto gamma = gamma_node::create(pred, 3); + auto gamma = GammaNode::create(pred, 3); auto ev0 = gamma->add_entryvar(v0); auto ev1 = gamma->add_entryvar(v1); auto ev2 = gamma->add_entryvar(v2); @@ -93,7 +93,7 @@ test_invariant_reduction(void) auto pred = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), ""); auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); - auto gamma = jlm::rvsdg::gamma_node::create(pred, 2); + auto gamma = GammaNode::create(pred, 2); auto ev = gamma->add_entryvar(v); gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); @@ -119,7 +119,7 @@ test_control_constant_reduction() auto c = match(1, { { 0, 0 } }, 1, 2, x); - auto gamma = gamma_node::create(c, 2); + auto gamma = GammaNode::create(c, 2); auto t = jlm::rvsdg::control_true(gamma->subregion(0)); auto f = jlm::rvsdg::control_false(gamma->subregion(1)); @@ -157,7 +157,7 @@ test_control_constant_reduction2() auto c = match(2, { { 3, 2 }, { 2, 1 }, { 1, 0 } }, 3, 4, import); - auto gamma = gamma_node::create(c, 4); + auto gamma = GammaNode::create(c, 4); auto t1 = jlm::rvsdg::control_true(gamma->subregion(0)); auto t2 = jlm::rvsdg::control_true(gamma->subregion(1)); @@ -192,7 +192,7 @@ TestRemoveGammaOutputsWhere() auto v2 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v3 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); - auto gammaNode = gamma_node::create(predicate, 2); + auto gammaNode = GammaNode::create(predicate, 2); auto gammaInput0 = gammaNode->add_entryvar(v0); auto gammaInput1 = gammaNode->add_entryvar(v1); auto gammaInput2 = gammaNode->add_entryvar(v2); @@ -254,7 +254,7 @@ TestPruneOutputs() auto v2 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v3 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); - auto gammaNode = gamma_node::create(predicate, 2); + auto gammaNode = GammaNode::create(predicate, 2); auto gammaInput0 = gammaNode->add_entryvar(v0); auto gammaInput1 = gammaNode->add_entryvar(v1); auto gammaInput2 = gammaNode->add_entryvar(v2); @@ -301,7 +301,7 @@ TestIsInvariant() auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); - auto gammaNode = gamma_node::create(predicate, 2); + auto gammaNode = GammaNode::create(predicate, 2); auto gammaInput0 = gammaNode->add_entryvar(v0); auto gammaInput1 = gammaNode->add_entryvar(v1); auto gammaInput2 = gammaNode->add_entryvar(v1); From 0d7b194775469d987ec325894119afa04d3e9ae3 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 9 Sep 2024 07:03:06 +0200 Subject: [PATCH 073/170] Rename gamma_input class to GammaInput (#613) --- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 6 +-- .../InterProceduralGraphConversion.cpp | 2 +- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 4 +- jlm/llvm/opt/pull.cpp | 9 ++-- jlm/rvsdg/gamma.cpp | 7 ++- jlm/rvsdg/gamma.hpp | 49 +++++++++---------- 9 files changed, 39 insertions(+), 44 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index f8741e601..fcc51bded 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -45,7 +45,7 @@ distribute_constant(const rvsdg::simple_op & op, rvsdg::simple_output * out) break; } } - if (auto gi = dynamic_cast(user)) + if (auto gi = dynamic_cast(user)) { if (gi->node()->predicate() == gi) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 5d3472588..0bb5bd742 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -193,7 +193,7 @@ trace_edge( // end of region reached return res; } - else if (auto gi = dynamic_cast(user)) + else if (auto gi = dynamic_cast(user)) { auto gn = gi->node(); auto ip = gn->add_entryvar(new_edge); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 172c5065e..19d18374c 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -250,12 +250,12 @@ depends_on(jlm::rvsdg::output * output, jlm::rvsdg::node * node) return false; } -jlm::rvsdg::gamma_input * +rvsdg::GammaInput * get_entryvar(jlm::rvsdg::output * origin, rvsdg::GammaNode * gamma) { for (auto user : *origin) { - auto gi = dynamic_cast(user); + auto gi = dynamic_cast(user); if (gi && gi->node() == gamma) { return gi; @@ -269,7 +269,7 @@ merge_gamma(rvsdg::GammaNode * gamma) { for (auto user : *gamma->predicate()->origin()) { - auto gi = dynamic_cast(user); + auto gi = dynamic_cast(user); if (gi && gi != gamma->predicate()) { // other gamma depending on same predicate diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 9f912547e..ca1fdc574 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -707,7 +707,7 @@ Convert( * Add gamma inputs. */ auto & demandSet = demandMap.Lookup(branchAggregationNode); - std::unordered_map gammaInputMap; + std::unordered_map gammaInputMap; for (auto & v : demandSet.InputVariables().Variables()) gammaInputMap[&v] = gamma->add_entryvar(regionalizedVariableMap.GetTopVariableMap().lookup(&v)); diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index e367611b7..f0c1eba7e 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -308,7 +308,7 @@ node::ComputeCallSummary() const continue; } - if (auto gamma_input = dynamic_cast(input)) + if (auto gamma_input = dynamic_cast(input)) { for (auto & argument : *gamma_input) worklist.insert(worklist.end(), argument.begin(), argument.end()); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index d35788bad..29eada91b 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -205,7 +205,7 @@ invert(jlm::rvsdg::theta_node * otheta) } for (size_t n = 1; n < ogamma->ninputs(); n++) { - auto oev = static_cast(ogamma->input(n)); + auto oev = util::AssertedCast(ogamma->input(n)); if (auto argument = to_argument(oev->origin())) { r1map.insert(oev->argument(1), nlvs[argument->input()]->argument()); @@ -244,7 +244,7 @@ invert(jlm::rvsdg::theta_node * otheta) } for (size_t n = 1; n < ogamma->ninputs(); n++) { - auto oev = static_cast(ogamma->input(n)); + auto oev = util::AssertedCast(ogamma->input(n)); if (auto argument = to_argument(oev->origin())) { r1map.insert(oev->argument(0), nlvs[argument->input()]); diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 2b6078c3d..7d514ae83 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -70,7 +70,7 @@ single_successor(const jlm::rvsdg::node * node) } static void -remove(jlm::rvsdg::gamma_input * input) +remove(rvsdg::GammaInput * input) { auto gamma = input->node(); @@ -119,7 +119,7 @@ cleanup(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) for (size_t n = 0; n < node->noutputs(); n++) { while (node->output(n)->nusers() != 0) - remove(static_cast(*node->output(n)->begin())); + remove(util::AssertedCast(*node->output(n)->begin())); } remove(node); } @@ -218,13 +218,12 @@ is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const jlm::rvsdg::node * JLM_ASSERT(single_successor(node)); /* collect all gamma inputs */ - std::unordered_set inputs; + std::unordered_set inputs; for (size_t n = 0; n < node->noutputs(); n++) { for (const auto & user : *(node->output(n))) { - JLM_ASSERT(is(*user)); - inputs.insert(static_cast(user)); + inputs.insert(util::AssertedCast(user)); } } diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index a41c01756..70d595167 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -267,8 +267,7 @@ GammaOperation::operator==(const operation & other) const noexcept /* gamma input */ -gamma_input::~gamma_input() noexcept -{} +GammaInput::~GammaInput() noexcept = default; /* gamma output */ @@ -320,7 +319,7 @@ GammaNode::entryvar_iterator::operator++() noexcept return *this; } - input_ = static_cast(node->input(++index)); + input_ = static_cast(node->input(++index)); return *this; } @@ -378,7 +377,7 @@ GammaArgument::~GammaArgument() noexcept = default; GammaArgument & GammaArgument::Copy(rvsdg::region & region, structural_input * input) { - auto gammaInput = util::AssertedCast(input); + auto gammaInput = util::AssertedCast(input); return Create(region, *gammaInput); } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index f77d33159..332307693 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -106,7 +106,7 @@ class GammaOperation final : public structural_op /* gamma node */ -class gamma_input; +class GammaInput; class gamma_output; class GammaNode : public structural_node @@ -120,11 +120,11 @@ class GammaNode : public structural_node class entryvar_iterator { public: - inline constexpr entryvar_iterator(jlm::rvsdg::gamma_input * input) noexcept + constexpr entryvar_iterator(GammaInput * input) noexcept : input_(input) {} - inline jlm::rvsdg::gamma_input * + GammaInput * input() const noexcept { return input_; @@ -153,20 +153,20 @@ class GammaNode : public structural_node return !(*this == other); } - inline jlm::rvsdg::gamma_input & + GammaInput & operator*() noexcept { return *input_; } - inline jlm::rvsdg::gamma_input * + GammaInput * operator->() noexcept { return input_; } private: - jlm::rvsdg::gamma_input * input_; + GammaInput * input_; }; class exitvar_iterator @@ -228,7 +228,7 @@ class GammaNode : public structural_node return new GammaNode(predicate, nalternatives); } - jlm::rvsdg::gamma_input * + inline GammaInput * predicate() const noexcept; inline size_t @@ -244,7 +244,7 @@ class GammaNode : public structural_node return node::noutputs(); } - jlm::rvsdg::gamma_input * + inline GammaInput * entryvar(size_t index) const noexcept; jlm::rvsdg::gamma_output * @@ -280,7 +280,7 @@ class GammaNode : public structural_node return exitvar_iterator(nullptr); } - jlm::rvsdg::gamma_input * + inline GammaInput * add_entryvar(jlm::rvsdg::output * origin); jlm::rvsdg::gamma_output * @@ -317,18 +317,15 @@ class GammaNode : public structural_node /* gamma input */ -class gamma_input final : public structural_input +class GammaInput final : public structural_input { friend GammaNode; public: - virtual ~gamma_input() noexcept; + ~GammaInput() noexcept override; private: - inline gamma_input( - GammaNode * node, - jlm::rvsdg::output * origin, - std::shared_ptr type) + GammaInput(GammaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : structural_input(node, origin, std::move(type)) {} @@ -456,8 +453,8 @@ class gamma_output final : public structural_output inline GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) : structural_node(GammaOperation(nalternatives), predicate->region(), nalternatives) { - node::add_input(std::unique_ptr( - new gamma_input(this, predicate, ctltype::Create(nalternatives)))); + node::add_input( + std::unique_ptr(new GammaInput(this, predicate, ctltype::Create(nalternatives)))); } /** @@ -474,12 +471,12 @@ class GammaArgument final : public RegionArgument Copy(rvsdg::region & region, structural_input * input) override; private: - GammaArgument(rvsdg::region & region, gamma_input & input) + GammaArgument(rvsdg::region & region, GammaInput & input) : RegionArgument(®ion, &input, input.Type()) {} static GammaArgument & - Create(rvsdg::region & region, gamma_input & input) + Create(rvsdg::region & region, GammaInput & input) { auto gammaArgument = new GammaArgument(region, input); region.append_argument(gammaArgument); @@ -514,16 +511,16 @@ class GammaResult final : public RegionResult } }; -inline jlm::rvsdg::gamma_input * +inline GammaInput * GammaNode::predicate() const noexcept { - return static_cast(structural_node::input(0)); + return util::AssertedCast(structural_node::input(0)); } -inline jlm::rvsdg::gamma_input * +inline GammaInput * GammaNode::entryvar(size_t index) const noexcept { - return static_cast(node::input(index + 1)); + return util::AssertedCast(node::input(index + 1)); } inline jlm::rvsdg::gamma_output * @@ -532,12 +529,12 @@ GammaNode::exitvar(size_t index) const noexcept return static_cast(node::output(index)); } -inline jlm::rvsdg::gamma_input * +inline GammaInput * GammaNode::add_entryvar(jlm::rvsdg::output * origin) { auto input = - node::add_input(std::unique_ptr(new gamma_input(this, origin, origin->Type()))); - auto gammaInput = static_cast(input); + node::add_input(std::unique_ptr(new GammaInput(this, origin, origin->Type()))); + auto gammaInput = util::AssertedCast(input); for (size_t n = 0; n < nsubregions(); n++) { From 3aa8f056cdc0df88c3f5ac5115d8a92b7153dea7 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 10 Sep 2024 05:58:35 +0200 Subject: [PATCH 074/170] Ensure deterministic basic block name in LLVM output (#614) 1. Cleans up the basic block conversion in the LLVM backend 2. Assigns name "bb[0...]" to the basic block in the output This PR is required for #586 in order to better follow the output. It is also part of the effort in #529. Example output: ``` ; Function Attrs: noinline nounwind optnone uwtable define i64 @fac(i64 noundef %0) #0 { bb0: %1 = alloca i64, align 8 %2 = alloca i64, align 8 store i64 %0, ptr %2, align 8 store i64 1, ptr %1, align 8 br label %bb1 bb1: ; preds = %bb4, %bb0 %3 = load i64, ptr %1, align 8 %4 = load i64, ptr %2, align 8 %5 = icmp ugt i64 %4, 1 br i1 %5, label %bb3, label %bb2 bb2: ; preds = %bb1 br label %bb4 bb3: ; preds = %bb1 %6 = mul i64 %3, %4 store i64 %6, ptr %1, align 8 %7 = load i64, ptr %2, align 8 %8 = add i64 %7, -1 store i64 %8, ptr %2, align 8 br label %bb4 bb4: ; preds = %bb3, %bb2 %9 = phi i1 [ false, %bb2 ], [ true, %bb3 ] br i1 %9, label %bb1, label %bb5 bb5: ; preds = %bb4 %10 = load i64, ptr %1, align 8 ret i64 %10 } ``` --- jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 35 ++++++++++++++++++-------- 1 file changed, 25 insertions(+), 10 deletions(-) diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 630cfbdc0..8301b3f32 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -353,6 +353,30 @@ convert_attributes(const function_node & f, context & ctx) return ::llvm::AttributeList::get(llvmctx, fctset, retset, argsets); } +static std::vector +ConvertBasicBlocks( + const llvm::cfg & controlFlowGraph, + ::llvm::Function & function, + jlm2llvm::context & context) +{ + auto nodes = breadth_first(controlFlowGraph); + + uint64_t basicBlockCounter = 0; + for (const auto & node : nodes) + { + if (node == controlFlowGraph.entry()) + continue; + if (node == controlFlowGraph.exit()) + continue; + + auto name = util::strfmt("bb", basicBlockCounter++); + auto * basicBlock = ::llvm::BasicBlock::Create(function.getContext(), name, &function); + context.insert(node, basicBlock); + } + + return nodes; +} + static inline void convert_cfg(llvm::cfg & cfg, ::llvm::Function & f, context & ctx) { @@ -369,17 +393,8 @@ convert_cfg(llvm::cfg & cfg, ::llvm::Function & f, context & ctx) }; straighten(cfg); - auto nodes = breadth_first(cfg); - - /* create basic blocks */ - for (const auto & node : nodes) - { - if (node == cfg.entry() || node == cfg.exit()) - continue; - auto bb = ::llvm::BasicBlock::Create(f.getContext(), util::strfmt("bb", &node), &f); - ctx.insert(node, bb); - } + auto nodes = ConvertBasicBlocks(cfg, f, ctx); add_arguments(cfg, f, ctx); From dbfcef09a3e9cba3632486f3e1553824f36aa360 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 10 Sep 2024 20:45:03 +0200 Subject: [PATCH 075/170] Rename gamma_output class to GammaOutput (#615) --- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- jlm/llvm/ir/operators/call.cpp | 6 ++-- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/rvsdg/gamma.cpp | 7 ++-- jlm/rvsdg/gamma.hpp | 40 ++++++++++----------- tests/jlm/rvsdg/test-gamma.cpp | 4 +-- 7 files changed, 31 insertions(+), 32 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 19d18374c..963996b10 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -299,7 +299,7 @@ merge_gamma(rvsdg::GammaNode * gamma) auto ev = gamma->entryvar(i); if (is_output_of(ev->origin(), other_gamma)) { - auto go = dynamic_cast(ev->origin()); + auto go = dynamic_cast(ev->origin()); for (size_t j = 0; j < gamma->nsubregions(); ++j) { rmap[j].insert(ev->argument(j), go->result(j)->origin()); diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index b8ba9bb90..ee353ef4f 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -21,7 +21,7 @@ static rvsdg::input * invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutputs); static rvsdg::structural_input * -invariantInput(const rvsdg::gamma_output & output, InvariantOutputMap & invariantOutputs) +invariantInput(const rvsdg::GammaOutput & output, InvariantOutputMap & invariantOutputs) { size_t n; rvsdg::structural_input * input = nullptr; @@ -106,7 +106,7 @@ invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutpu return invariantInput(*thetaInput->output(), invariantOutputs); } - if (auto gammaOutput = dynamic_cast(&output)) + if (auto gammaOutput = dynamic_cast(&output)) return invariantInput(*gammaOutput, invariantOutputs); return nullptr; @@ -181,7 +181,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) continue; } - if (auto gammaOutput = dynamic_cast(origin)) + if (auto gammaOutput = dynamic_cast(origin)) { if (auto input = invariantInput(*gammaOutput)) { diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 0c5f2ba8c..e0eca37d6 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -198,7 +198,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto gammaOutput = dynamic_cast(&output)) + if (auto gammaOutput = dynamic_cast(&output)) { MarkOutput(*gammaOutput->node()->predicate()->origin()); for (const auto & result : gammaOutput->results) diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index be5ed51a4..21db7c486 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -245,7 +245,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":out", index); } - if (is(Output_)) + if (is(Output_)) { auto dbgstr = jlm::rvsdg::node_output::node(Output_)->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 70d595167..dd27dea69 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -271,11 +271,10 @@ GammaInput::~GammaInput() noexcept = default; /* gamma output */ -gamma_output::~gamma_output() noexcept -{} +GammaOutput::~GammaOutput() noexcept = default; bool -gamma_output::IsInvariant(rvsdg::output ** invariantOrigin) const noexcept +GammaOutput::IsInvariant(rvsdg::output ** invariantOrigin) const noexcept { auto argument = dynamic_cast(result(0)->origin()); if (!argument) @@ -386,7 +385,7 @@ GammaResult::~GammaResult() noexcept = default; GammaResult & GammaResult::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) { - auto gammaOutput = util::AssertedCast(output); + auto gammaOutput = util::AssertedCast(output); return GammaResult::Create(*origin.region(), origin, *gammaOutput); } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 332307693..937a68769 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -107,7 +107,7 @@ class GammaOperation final : public structural_op /* gamma node */ class GammaInput; -class gamma_output; +class GammaOutput; class GammaNode : public structural_node { @@ -172,11 +172,11 @@ class GammaNode : public structural_node class exitvar_iterator { public: - inline constexpr exitvar_iterator(jlm::rvsdg::gamma_output * output) noexcept + constexpr explicit exitvar_iterator(GammaOutput * output) noexcept : output_(output) {} - inline jlm::rvsdg::gamma_output * + [[nodiscard]] GammaOutput * output() const noexcept { return output_; @@ -205,20 +205,20 @@ class GammaNode : public structural_node return !(*this == other); } - inline gamma_output & + GammaOutput & operator*() noexcept { return *output_; } - inline gamma_output * + GammaOutput * operator->() noexcept { return output_; } private: - jlm::rvsdg::gamma_output * output_; + GammaOutput * output_; }; public: @@ -247,7 +247,7 @@ class GammaNode : public structural_node inline GammaInput * entryvar(size_t index) const noexcept; - jlm::rvsdg::gamma_output * + [[nodiscard]] inline GammaOutput * exitvar(size_t index) const noexcept; inline GammaNode::entryvar_iterator @@ -283,14 +283,14 @@ class GammaNode : public structural_node inline GammaInput * add_entryvar(jlm::rvsdg::output * origin); - jlm::rvsdg::gamma_output * + inline GammaOutput * add_exitvar(const std::vector & values); /** * Removes all gamma outputs and their respective results. The outputs must have no users and * match the condition specified by \p match. * - * @tparam F A type that supports the function call operator: bool operator(const gamma_output&) + * @tparam F A type that supports the function call operator: bool operator(const GammaOutput&) * @param match Defines the condition of the elements to remove. */ template @@ -303,7 +303,7 @@ class GammaNode : public structural_node void PruneOutputs() { - auto match = [](const gamma_output &) + auto match = [](const GammaOutput &) { return true; }; @@ -378,14 +378,14 @@ class GammaInput final : public structural_input /* gamma output */ -class gamma_output final : public structural_output +class GammaOutput final : public structural_output { friend GammaNode; public: - virtual ~gamma_output() noexcept; + ~GammaOutput() noexcept override; - inline gamma_output(GammaNode * node, std::shared_ptr type) + GammaOutput(GammaNode * node, std::shared_ptr type) : structural_output(node, std::move(type)) {} @@ -495,7 +495,7 @@ class GammaResult final : public RegionResult ~GammaResult() noexcept override; private: - GammaResult(rvsdg::region & region, rvsdg::output & origin, gamma_output & gammaOutput) + GammaResult(rvsdg::region & region, rvsdg::output & origin, GammaOutput & gammaOutput) : RegionResult(®ion, &origin, &gammaOutput, origin.Type()) {} @@ -503,7 +503,7 @@ class GammaResult final : public RegionResult Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; static GammaResult & - Create(rvsdg::region & region, rvsdg::output & origin, gamma_output & gammaOutput) + Create(rvsdg::region & region, rvsdg::output & origin, GammaOutput & gammaOutput) { auto gammaResult = new GammaResult(region, origin, gammaOutput); origin.region()->append_result(gammaResult); @@ -523,10 +523,10 @@ GammaNode::entryvar(size_t index) const noexcept return util::AssertedCast(node::input(index + 1)); } -inline jlm::rvsdg::gamma_output * +inline GammaOutput * GammaNode::exitvar(size_t index) const noexcept { - return static_cast(node::output(index)); + return static_cast(node::output(index)); } inline GammaInput * @@ -544,14 +544,14 @@ GammaNode::add_entryvar(jlm::rvsdg::output * origin) return gammaInput; } -inline jlm::rvsdg::gamma_output * +inline GammaOutput * GammaNode::add_exitvar(const std::vector & values) { if (values.size() != nsubregions()) throw jlm::util::error("Incorrect number of values."); const auto & type = values[0]->Type(); - node::add_output(std::make_unique(this, type)); + node::add_output(std::make_unique(this, type)); auto output = exitvar(nexitvars() - 1); for (size_t n = 0; n < nsubregions(); n++) @@ -569,7 +569,7 @@ GammaNode::RemoveGammaOutputsWhere(const F & match) // iterate backwards to avoid the invalidation of 'n' by RemoveOutput() for (size_t n = noutputs() - 1; n != static_cast(-1); n--) { - auto & gammaOutput = *util::AssertedCast(output(n)); + auto & gammaOutput = *util::AssertedCast(output(n)); if (gammaOutput.nusers() == 0 && match(gammaOutput)) { for (size_t r = 0; r < nsubregions(); r++) diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index f99ffbc4a..2f58ccf9c 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -215,7 +215,7 @@ TestRemoveGammaOutputsWhere() // Remove gammaOutput1 gammaNode->RemoveGammaOutputsWhere( - [&](const gamma_output & output) + [&](const GammaOutput & output) { return output.index() == gammaOutput1->index(); }); @@ -227,7 +227,7 @@ TestRemoveGammaOutputsWhere() // Try to remove gammaOutput2. This should result in no change as gammaOutput2 still has users. gammaNode->RemoveGammaOutputsWhere( - [&](const gamma_output & output) + [&](const GammaOutput & output) { return output.index() == gammaOutput2->index(); }); From 3716d623f50458e92ed36d34d4791541d2590375 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Wed, 11 Sep 2024 15:51:28 +0200 Subject: [PATCH 076/170] Add dot output to jlm-opt, using the GraphWriter API (#611) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds the command line option `--output-format=dot`, which causes the final RVSDG to be dumped as the GraphViz dot format. One non-standard thing about the output is that each region becomes a separate graph, but with globally unique ids. This is done to make it easier to focus on parts of the RVSDG at a time, but it requires a renderer that expects multiple graphs in a single file. I have made a webapp that takes concatenated graphs as input, and creates a tree based on which graphs are referenced as subgraphs in other graphs. Selecting a node, edge or port will show all its properties, and highlight other nodes/edges they reference. The viewer should also work with any other GraphViz you may have. See the webapp [here](https://dot-tree-viz.vercel.app/). Repo for webapp [here](https://github.com/haved/dot-tree-viz). The webapp needs more polish to make it pleasant to work with complex graphs (resizing views, rendering subgraphs inside graphs, rendering \n properly).
Example output (long) ``` digraph graph0 { node[shape=box style=filled fillcolor=white]; penwidth=6; label="Type graph" node0 [label=ptr tooltip="5e0edd496b28" ]; node1 [label=iostate tooltip="5e0edd496b38" ]; node2 [label=mem tooltip="5e0edd496b40" ]; node3 [label=bit32 tooltip="5e0edd496e80" ]; node4 [label="ctl(2)" tooltip="5e0edd499200" ]; node5 [label="ctl(3)" tooltip="5e0edd499210" ]; node6 [label=bit1 tooltip="5e0edd496c90" ]; } digraph graph1 { node[shape=box style=filled fillcolor=white]; penwidth=6; label="RVSDG root graph" tooltip="5e0f07188b80" node7 [shape=plain style=solid label=<
LAMBDA[lzma_decode]
graph2
> tooltip="5e0f0718e890" ]; { rank=sink; r13 [label=r13 tooltip="5e0f0719eec0" ]; } node7:o0:s -> r13[id=edge48 ]; } digraph graph2 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f07184ac0" { rank=source; a0 [label=a0 type=node1 tooltip="5e0f071a0b00" ]; a1 [label=a1 type=node2 tooltip="5e0f071a0c80" ]; a0 -> a1[style=invis]; } node8 [shape=plain style=solid label=<
BITS32(2)
> tooltip="5e0f071a70c0" ]; node9 [shape=plain style=solid label=<
undef
> tooltip="5e0f071a20e0" ]; node10 [shape=plain style=solid label=<
BITS32(1)
> tooltip="5e0f0718f7d0" ]; node11 [shape=plain style=solid label=<
BITS32(1)
> tooltip="5e0f0718e420" ]; node12 [shape=plain style=solid label=<
undef
> tooltip="5e0f0718e9c0" ]; node13 [shape=plain style=solid label=<
ALLOCA[bit32]
> tooltip="5e0f0718f710" ]; node14 [shape=plain style=solid label=<
MemoryStateMerge
> tooltip="5e0f0718e760" ]; node15 [shape=plain style=solid label=<
Store
> tooltip="5e0f07192640" ]; node16 [shape=plain style=solid label=<
Load
> tooltip="5e0f071926d0" ]; node17 [shape=plain style=solid label=<
MATCH[1 -> 1, 0 -> 0, 2]
> tooltip="5e0f071a1da0" ]; node18 [shape=plain style=solid label=<
GAMMA
graph3 graph4 graph5
> tooltip="5e0f0718e590" ]; node22 [shape=plain style=solid label=<
GAMMA
graph6 graph7
> tooltip="5e0f071924c0" ]; { rank=sink; r10 [label=r10 tooltip="5e0f071a72d0" ]; r11 [label=r11 tooltip="5e0f071a7320" ]; r10 -> r11[style=invis]; r12 [label=r12 tooltip="5e0f071a7370" ]; r11 -> r12[style=invis]; } node11:o4:s -> node13:i0:n[id=edge32 ]; node13:o7:s -> node14:i1:n[id=edge33 color="#FF0000" ]; a1 -> node14:i2:n[id=edge34 color="#FF0000" ]; node13:o6:s -> node15:i3:n[id=edge35 ]; node10:o3:s -> node15:i4:n[id=edge36 ]; node14:o8:s -> node15:i5:n[id=edge37 color="#FF0000" ]; node13:o6:s -> node16:i6:n[id=edge38 ]; node15:o9:s -> node16:i7:n[id=edge39 color="#FF0000" ]; node16:o10:s -> node17:i8:n[id=edge40 ]; node17:o12:s -> node18:i9:n[id=edge41 ]; node18:o13:s -> node22:i10:n[id=edge42 ]; node13:o6:s -> node22:i11:n[id=edge43 ]; node16:o11:s -> node22:i12:n[id=edge44 color="#FF0000" ]; node8:o1:s -> r10[id=edge45 ]; a0 -> r11[id=edge46 color="#00FF00" ]; node22:o17:s -> r12[id=edge47 color="#FF0000" ]; } digraph graph3 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a2260" node19 [shape=plain style=solid label=<
CTL(1)
> tooltip="5e0f071a25c0" ]; { rank=sink; r0 [label=r0 tooltip="5e0f071a2c80" ]; } node19:o14:s -> r0[id=edge0 ]; } digraph graph4 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a2320" node20 [shape=plain style=solid label=<
CTL(1)
> tooltip="5e0f071a28a0" ]; { rank=sink; r1 [label=r1 tooltip="5e0f071a2cd0" ]; } node20:o15:s -> r1[id=edge1 ]; } digraph graph5 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a23e0" node21 [shape=plain style=solid label=<
CTL(0)
> tooltip="5e0f071a2a50" ]; { rank=sink; r2 [label=r2 tooltip="5e0f071a2dd0" ]; } node21:o16:s -> r2[id=edge2 ]; } digraph graph6 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a2ef0" { rank=source; a2 [label=a2 type=node0 tooltip="5e0f0719c1c0" ]; a3 [label=a3 type=node2 tooltip="5e0f0719aac0" ]; a2 -> a3[style=invis]; } { rank=sink; r3 [label=r3 tooltip="5e0f071a35f0" ]; } a3 -> r3[id=edge3 color="#FF0000" ]; } digraph graph7 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a2fb0" { rank=source; a4 [label=a4 type=node0 tooltip="5e0f0719c2b0" ]; a5 [label=a5 type=node2 tooltip="5e0f0719ae60" ]; a4 -> a5[style=invis]; } node23 [shape=plain style=solid label=<
THETA
graph8
> tooltip="5e0f071a33b0" ]; { rank=sink; r9 [label=r9 tooltip="5e0f071a7070" ]; } a4 -> node23:i13:n[id=edge29 ]; a5 -> node23:i14:n[id=edge30 color="#FF0000" ]; node23:o19:s -> r9[id=edge31 color="#FF0000" ]; } digraph graph8 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a3480" { rank=source; a6 [label=a6 type=node0 tooltip="5e0f07195020" ]; a7 [label=a7 type=node2 tooltip="5e0f0719ab40" ]; a6 -> a7[style=invis]; } node24 [shape=plain style=solid label=<
Load
> tooltip="5e0f071a3ee0" ]; node25 [shape=plain style=solid label=<
BITS32(5)
> tooltip="5e0f071a6690" ]; node26 [shape=plain style=solid label=<
BITS32(10)
> tooltip="5e0f071a4240" ]; node27 [shape=plain style=solid label=<
BitSlt32
> tooltip="5e0f071a4550" ]; node28 [shape=plain style=solid label=<
MATCH[1 -> 1, 0]
> tooltip="5e0f071a48e0" ]; node29 [shape=plain style=solid label=<
GAMMA
graph9 graph10
> tooltip="5e0f071a4be0" ]; node34 [shape=plain style=solid label=<
Load
> tooltip="5e0f071a64c0" ]; node35 [shape=plain style=solid label=<
BitSlt32
> tooltip="5e0f071a6880" ]; node36 [shape=plain style=solid label=<
MATCH[1 -> 1, 0]
> tooltip="5e0f071a6c10" ]; { rank=sink; r6 [label=r6 tooltip="5e0f071a3710" ]; r7 [label=r7 tooltip="5e0f071a39f0" ]; r6 -> r7[style=invis]; r8 [label=r8 tooltip="5e0f071a3d90" ]; r7 -> r8[style=invis]; } a6 -> node24:i15:n[id=edge13 ]; a7 -> node24:i16:n[id=edge14 color="#FF0000" ]; node24:o20:s -> node27:i17:n[id=edge15 ]; node26:o23:s -> node27:i18:n[id=edge16 ]; node27:o24:s -> node28:i19:n[id=edge17 ]; node28:o25:s -> node29:i20:n[id=edge18 ]; a6 -> node29:i21:n[id=edge19 ]; node24:o21:s -> node29:i22:n[id=edge20 color="#FF0000" ]; a6 -> node34:i30:n[id=edge21 ]; node29:o26:s -> node34:i31:n[id=edge22 color="#FF0000" ]; node34:o32:s -> node35:i32:n[id=edge23 ]; node25:o22:s -> node35:i33:n[id=edge24 ]; node35:o34:s -> node36:i34:n[id=edge25 ]; node36:o35:s -> r6[id=edge26 ]; a6 -> r7[id=edge27 ]; node34:o33:s -> r8[id=edge28 color="#FF0000" ]; } digraph graph9 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a4c90" { rank=source; a8 [label=a8 type=node0 tooltip="5e0f0719c140" ]; a9 [label=a9 type=node2 tooltip="5e0f0719b190" ]; a8 -> a9[style=invis]; } { rank=sink; r4 [label=r4 tooltip="5e0f071a63b0" ]; } a9 -> r4[id=edge4 color="#FF0000" ]; } digraph graph10 { node[shape=box style=filled fillcolor=white]; penwidth=6; tooltip="5e0f071a4d30" { rank=source; a10 [label=a10 type=node0 tooltip="5e0f0719af20" ]; a11 [label=a11 type=node2 tooltip="5e0f0719c4a0" ]; a10 -> a11[style=invis]; } node30 [shape=plain style=solid label=<
Load
> tooltip="5e0f071a53b0" ]; node31 [shape=plain style=solid label=<
BITS32(1)
> tooltip="5e0f071a57c0" ]; node32 [shape=plain style=solid label=<
BitAdd32
> tooltip="5e0f071a5b60" ]; node33 [shape=plain style=solid label=<
Store
> tooltip="5e0f071a5f20" ]; { rank=sink; r5 [label=r5 tooltip="5e0f071a6400" ]; } a10 -> node30:i23:n[id=edge5 ]; a11 -> node30:i24:n[id=edge6 color="#FF0000" ]; node30:o27:s -> node32:i25:n[id=edge7 ]; node31:o29:s -> node32:i26:n[id=edge8 ]; a10 -> node33:i27:n[id=edge9 ]; node32:o30:s -> node33:i28:n[id=edge10 ]; node30:o28:s -> node33:i29:n[id=edge11 color="#FF0000" ]; node33:o31:s -> r5[id=edge12 color="#FF0000" ]; } ```
--------- Co-authored-by: Håvard Krogstie --- jlm/llvm/Makefile.sub | 3 + jlm/llvm/backend/dot/DotWriter.cpp | 206 ++++++++++++++++++ jlm/llvm/backend/dot/DotWriter.hpp | 29 +++ jlm/tooling/Command.cpp | 29 +++ jlm/tooling/Command.hpp | 6 + jlm/tooling/CommandLine.cpp | 9 +- jlm/tooling/CommandLine.hpp | 1 + jlm/util/GraphWriter.cpp | 133 ++++++----- jlm/util/GraphWriter.hpp | 56 +++-- tests/jlm/llvm/backend/dot/DotWriterTests.cpp | 105 +++++++++ .../alias-analyses/TestPointerObjectSet.cpp | 10 +- tests/jlm/util/TestGraphWriter.cpp | 9 + 12 files changed, 519 insertions(+), 77 deletions(-) create mode 100644 jlm/llvm/backend/dot/DotWriter.cpp create mode 100644 jlm/llvm/backend/dot/DotWriter.hpp create mode 100644 tests/jlm/llvm/backend/dot/DotWriterTests.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 69b660e9e..e22999ad6 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -5,6 +5,7 @@ libllvm_SOURCES = \ jlm/llvm/backend/jlm2llvm/instruction.cpp \ jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp \ jlm/llvm/backend/jlm2llvm/type.cpp \ + jlm/llvm/backend/dot/DotWriter.cpp \ jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp \ \ jlm/llvm/frontend/ControlFlowRestructuring.cpp \ @@ -123,6 +124,7 @@ libllvm_HEADERS = \ jlm/llvm/ir/operators/sext.hpp \ jlm/llvm/ir/operators/lambda.hpp \ jlm/llvm/ir/aggregation.hpp \ + jlm/llvm/backend/dot/DotWriter.hpp \ jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.hpp \ jlm/llvm/backend/rvsdg2jlm/context.hpp \ jlm/llvm/backend/jlm2llvm/jlm2llvm.hpp \ @@ -131,6 +133,7 @@ libllvm_HEADERS = \ jlm/llvm/backend/jlm2llvm/context.hpp \ libllvm_TESTS += \ + tests/jlm/llvm/backend/dot/DotWriterTests \ tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma \ tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma \ tests/jlm/llvm/backend/llvm/r2j/test-recursive-data \ diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp new file mode 100644 index 000000000..66c65bd23 --- /dev/null +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -0,0 +1,206 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#include + +#include +#include +#include +#include +#include +#include +#include + +namespace jlm::llvm::dot +{ +/** + * Creates a node in the \p typeGraph representing the given \p type, + * or returns such a node if it has already been created. + * The function is recursive, and will create nodes for subtypes of aggregate types. + */ +static util::Node & +GetOrCreateTypeGraphNode(const rvsdg::type & type, util::Graph & typeGraph) +{ + // If the type already has a corresponding node, return it + if (auto * graphElement = typeGraph.GetElementFromProgramObject(type)) + { + auto * node = reinterpret_cast(graphElement); + JLM_ASSERT(node); + return *node; + } + + auto & node = typeGraph.CreateNode(); + node.SetProgramObject(type); + node.SetLabel(type.debug_string()); + + // Some types get special handling, such as adding incoming edges from aggregate types + if (rvsdg::is(type) || rvsdg::is(type) + || rvsdg::is(type) || rvsdg::is(type) || rvsdg::is(type)) + { + // No need to provide any information beyond the debug string + } + else if (auto arrayType = dynamic_cast(&type)) + { + auto & elementTypeNode = GetOrCreateTypeGraphNode(arrayType->element_type(), typeGraph); + typeGraph.CreateDirectedEdge(elementTypeNode, node); + } + else if (auto structType = dynamic_cast(&type)) + { + auto & structDeclaration = structType->GetDeclaration(); + for (size_t n = 0; n < structDeclaration.NumElements(); n++) + { + auto & elementTypeNode = GetOrCreateTypeGraphNode(structDeclaration.GetElement(n), typeGraph); + typeGraph.CreateDirectedEdge(elementTypeNode, node); + } + } + else if (auto vectorType = dynamic_cast(&type)) + { + auto & elementTypeNode = GetOrCreateTypeGraphNode(vectorType->type(), typeGraph); + typeGraph.CreateDirectedEdge(elementTypeNode, node); + } + else if (auto functionType = dynamic_cast(&type)) + { + for (size_t n = 0; n < functionType->NumArguments(); n++) + { + auto & argumentTypeNode = GetOrCreateTypeGraphNode(functionType->ArgumentType(n), typeGraph); + auto & edge = typeGraph.CreateDirectedEdge(argumentTypeNode, node); + edge.SetAttribute("Argument#", util::strfmt(n)); + } + for (size_t n = 0; n < functionType->NumResults(); n++) + { + auto & resultTypeNode = GetOrCreateTypeGraphNode(functionType->ResultType(n), typeGraph); + auto & edge = typeGraph.CreateDirectedEdge(resultTypeNode, node); + edge.SetAttribute("Result#", util::strfmt(n)); + } + } + else + { + JLM_UNREACHABLE("Unknown type"); + } + + return node; +} + +/** + * Attaches the given GraphWriter port to the input in the RVSDG it represents. + * Also adds an edge to the input port, from the node representing the input's origin. + * @param inputPort the GraphWriter port representing the input + * @param rvsdgInput the RVSDG input + */ +static void +AttachNodeInput(util::Port & inputPort, const rvsdg::input & rvsdgInput) +{ + auto & graph = inputPort.GetGraph(); + inputPort.SetProgramObject(rvsdgInput); + + // nodes are visited in topological order, so if the origin is an output, it will already exist + if (auto originPort = + reinterpret_cast(graph.GetElementFromProgramObject(*rvsdgInput.origin()))) + { + auto & edge = graph.CreateDirectedEdge(*originPort, inputPort); + if (rvsdg::is(rvsdgInput.type())) + edge.SetAttribute("color", util::Colors::Red); + if (rvsdg::is(rvsdgInput.type())) + edge.SetAttribute("color", util::Colors::Green); + } +} + +/** + * Attaches the given GraphWriter port to the output in RVSDG it represents. + * Also adds information to the output about its type, using a reference to the type graph. + * @param outputPort the GraphWriter port representing the output + * @param rvsdgOutput the RVSDG output + * @param typeGraph the type graph, or nullptr if the output's type should not be included + */ +static void +AttachNodeOutput( + util::Port & outputPort, + const rvsdg::output & rvsdgOutput, + util::Graph * typeGraph) +{ + outputPort.SetProgramObject(rvsdgOutput); + if (typeGraph) + outputPort.SetAttributeGraphElement( + "type", + GetOrCreateTypeGraphNode(rvsdgOutput.type(), *typeGraph)); +} + +/** + * Fill the given \p graph with nodes corresponding to the nodes of the given \p region. + * If \p typeGraph is not nullptr, all rvsdg outputs get a type reference to the type graph. + * If the type does not already exist in the type graph, it is created. + */ +static void +CreateGraphNodes(util::Graph & graph, rvsdg::region & region, util::Graph * typeGraph) +{ + graph.SetProgramObject(region); + + // Start by creating nodes for all the region arguments, and attaching them to the RVSDG outputs. + for (size_t n = 0; n < region.narguments(); n++) + { + auto & node = graph.CreateArgumentNode(); + auto & argument = *region.argument(n); + AttachNodeOutput(node, argument, typeGraph); + + // If this argument corresponds to one of the structural node's inputs, reference it + if (argument.input()) + node.SetAttributeObject("input", *argument.input()); + } + + // Create a node for each node in the region in topological order. + // Inputs expect the node representing their origin to exist before being visited. + rvsdg::topdown_traverser traverser(®ion); + for (const auto rvsdgNode : traverser) + { + auto & node = graph.CreateInOutNode(rvsdgNode->ninputs(), rvsdgNode->noutputs()); + node.SetLabel(rvsdgNode->operation().debug_string()); + node.SetProgramObject(*rvsdgNode); + + for (size_t i = 0; i < rvsdgNode->ninputs(); i++) + AttachNodeInput(node.GetInputPort(i), *rvsdgNode->input(i)); + + for (size_t i = 0; i < rvsdgNode->noutputs(); i++) + AttachNodeOutput(node.GetOutputPort(i), *rvsdgNode->output(i), typeGraph); + + // Structural nodes also have subgraphs + if (auto structuralNode = dynamic_cast(rvsdgNode)) + { + for (size_t i = 0; i < structuralNode->nsubregions(); i++) + { + auto & subGraph = node.CreateSubgraph(); + CreateGraphNodes(subGraph, *structuralNode->subregion(i), typeGraph); + } + } + } + + // Create result nodes for the region's results, and attach them to their origins + for (size_t n = 0; n < region.nresults(); n++) + { + auto & node = graph.CreateResultNode(); + auto & result = *region.result(n); + AttachNodeInput(node, result); + + // If this result corresponds to one of the structural node's outputs, reference it + if (result.output()) + node.SetAttributeObject("output", *result.output()); + } +} + +util::Graph & +WriteGraphs(util::GraphWriter & writer, rvsdg::region & region, bool emitTypeGraph) +{ + util::Graph * typeGraph = nullptr; + if (emitTypeGraph) + { + typeGraph = &writer.CreateGraph(); + typeGraph->SetLabel("Type graph"); + } + util::Graph & rootGraph = writer.CreateGraph(); + rootGraph.SetLabel("RVSDG root graph"); + CreateGraphNodes(rootGraph, region, typeGraph); + + return rootGraph; +} +} diff --git a/jlm/llvm/backend/dot/DotWriter.hpp b/jlm/llvm/backend/dot/DotWriter.hpp new file mode 100644 index 000000000..021b07bd1 --- /dev/null +++ b/jlm/llvm/backend/dot/DotWriter.hpp @@ -0,0 +1,29 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_LLVM_BACKEND_DOT_DOTWRITER_HPP +#define JLM_LLVM_BACKEND_DOT_DOTWRITER_HPP + +#include +#include + +namespace jlm::llvm::dot +{ +/** + * Recursively converts a region and all sub-regions into graphs and sub-graphs. + * All nodes in each region become InOutNodes, with edges showing data and state dependencies. + * Arguments and results are represented using ArgumentNode and ResultNode, respectively. + * All created nodes, inputs, and outputs, get associated to the rvsdg nodes, inputs and outputs. + * + * @param writer the GraphWriter to use + * @param region the RVSDG region to recursively traverse + * @param emitTypeGraph if true, an additional graph containing nodes for all types is emitted + * @return a reference to the top-level graph corresponding to the region + */ +util::Graph & +WriteGraphs(util::GraphWriter & writer, rvsdg::region & region, bool emitTypeGraph); +} + +#endif // JLM_LLVM_BACKEND_DOT_DOTWRITER_HPP diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index 664a628a7..ecdb612ce 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include #include @@ -576,6 +577,30 @@ JlmOptCommand::PrintAsRvsdgTree( } } +void +JlmOptCommand::PrintAsDot( + const llvm::RvsdgModule & rvsdgModule, + const util::filepath & outputFile, + util::StatisticsCollector &) +{ + auto & rootRegion = *rvsdgModule.Rvsdg().root(); + + util::GraphWriter writer; + jlm::llvm::dot::WriteGraphs(writer, rootRegion, true); + + if (outputFile == "") + { + writer.OutputAllGraphs(std::cout, util::GraphOutputFormat::Dot); + } + else + { + std::ofstream fs; + fs.open(outputFile.to_str()); + writer.OutputAllGraphs(fs, util::GraphOutputFormat::Dot); + fs.close(); + } +} + void JlmOptCommand::PrintRvsdgModule( llvm::RvsdgModule & rvsdgModule, @@ -603,6 +628,10 @@ JlmOptCommand::PrintRvsdgModule( { PrintAsRvsdgTree(rvsdgModule, outputFile, statisticsCollector); } + else if (outputFormat == tooling::JlmOptCommandLineOptions::OutputFormat::Dot) + { + PrintAsDot(rvsdgModule, outputFile, statisticsCollector); + } else { JLM_UNREACHABLE("Unhandled output format."); diff --git a/jlm/tooling/Command.hpp b/jlm/tooling/Command.hpp index 720fa4fba..f54b41a52 100644 --- a/jlm/tooling/Command.hpp +++ b/jlm/tooling/Command.hpp @@ -418,6 +418,12 @@ class JlmOptCommand final : public Command const util::filepath & outputFile, util::StatisticsCollector & statisticsCollector); + static void + PrintAsDot( + const llvm::RvsdgModule & rvsdgModule, + const util::filepath & outputFile, + util::StatisticsCollector & statisticsCollector); + [[nodiscard]] std::vector GetOptimizations() const; diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 46185e868..07cf67f09 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -253,11 +253,9 @@ const std::unordered_map mapping = { - { OutputFormat::Ascii, "ascii" }, - { OutputFormat::Llvm, "llvm" }, - { OutputFormat::Mlir, "mlir" }, - { OutputFormat::Tree, "tree" }, - { OutputFormat::Xml, "xml" } + { OutputFormat::Ascii, "ascii" }, { OutputFormat::Dot, "dot" }, + { OutputFormat::Llvm, "llvm" }, { OutputFormat::Mlir, "mlir" }, + { OutputFormat::Tree, "tree" }, { OutputFormat::Xml, "xml" } }; auto firstIndex = static_cast(OutputFormat::FirstEnumValue); @@ -793,6 +791,7 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const cl::desc("Select output format:"), cl::values( CreateOutputFormatOption(JlmOptCommandLineOptions::OutputFormat::Ascii, "Output Ascii"), + CreateOutputFormatOption(JlmOptCommandLineOptions::OutputFormat::Dot, "Output Dot"), CreateOutputFormatOption( JlmOptCommandLineOptions::OutputFormat::Llvm, "Output LLVM IR [default]"), diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index 2c4292026..cf01d645a 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -53,6 +53,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions FirstEnumValue, // must always be the first enum value, used for iteration Ascii, + Dot, Llvm, Mlir, Tree, diff --git a/jlm/util/GraphWriter.cpp b/jlm/util/GraphWriter.cpp index c0d8ab272..a00209994 100644 --- a/jlm/util/GraphWriter.cpp +++ b/jlm/util/GraphWriter.cpp @@ -269,28 +269,57 @@ GraphElement::HasAttribute(const std::string & attribute) const return AttributeMap_.find(attribute) != AttributeMap_.end(); } -std::string_view -GraphElement::GetAttribute(const std::string & attribute) +std::optional +GraphElement::GetAttributeString(const std::string & attribute) const { - auto it = AttributeMap_.find(attribute); - if (it == AttributeMap_.end()) - throw jlm::util::error(strfmt("No attribute '", attribute, "' found")); - if (auto stringValue = std::get_if(&it->second)) - return *stringValue; - // Attributes that hold GraphElements or pointers to program objects become question marks - return "?"; + if (auto it = AttributeMap_.find(attribute); it != AttributeMap_.end()) + { + if (auto stringValue = std::get_if(&it->second)) + { + return *stringValue; + } + } + return std::nullopt; } -std::string_view -GraphElement::GetAttributeOr(const std::string & attribute, std::string_view otherwise) +std::optional +GraphElement::GetAttributeObject(const std::string & attribute) const { - auto it = AttributeMap_.find(attribute); - if (it == AttributeMap_.end()) - return otherwise; - if (auto stringValue = std::get_if(&it->second)) - return *stringValue; - // Attributes that hold GraphElements or pointers to program objects become question marks - return "?"; + if (auto it = AttributeMap_.find(attribute); it != AttributeMap_.end()) + { + if (auto uintptrValue = std::get_if(&it->second)) + { + return *uintptrValue; + } + } + return std::nullopt; +} + +const GraphElement * +GraphElement::GetAttributeGraphElement(const std::string & attribute) const +{ + if (auto it = AttributeMap_.find(attribute); it != AttributeMap_.end()) + { + if (auto graphElementValue = std::get_if(&it->second)) + { + return *graphElementValue; + } + + // Otherwise, check if this attribute holds a program object that is represented by a + // GraphElement in this graph, or in any graph in the GraphWriter. + if (auto ptr = std::get_if(&it->second)) + { + if (auto gElement = GetGraph().GetElementFromProgramObject(*ptr)) + { + return gElement; + } + if (auto gwElement = GetGraph().GetGraphWriter().GetElementFromProgramObject(*ptr)) + { + return gwElement; + } + } + } + return nullptr; } bool @@ -318,59 +347,57 @@ GraphElement::IsFinalized() const void GraphElement::OutputAttributes(std::ostream & out, AttributeOutputFormat format) const { - auto OutputAttribute = [&](std::string_view name, const AttributeValue & value) + auto FormatAttribute = [&](std::string_view name, std::string_view value) { if (format == AttributeOutputFormat::SpaceSeparatedList) + { PrintIdentifierSafe(out, name); + out << "="; + PrintIdentifierSafe(out, value); + } else if (format == AttributeOutputFormat::HTMLAttributes) + { PrintStringAsHtmlAttributeName(out, name); + out << "=\""; // HTML attributes must be quoted + PrintStringAsHtmlText(out, value, false); + out << '"'; // Closing quote + } else + { JLM_UNREACHABLE("Unknown AttributeOutputFormat"); + } - out << "="; - if (format == AttributeOutputFormat::HTMLAttributes) - out << '"'; // HTML attributes must be quoted + out << " "; // Attributes are space separated in both formats + }; - if (auto string = std::get_if(&value)) + auto OutputAttribute = [&](const std::string & name) + { + if (auto string = GetAttributeString(name)) { - if (format == AttributeOutputFormat::SpaceSeparatedList) - PrintIdentifierSafe(out, *string); - else - PrintStringAsHtmlText(out, *string, false); + FormatAttribute(name, *string); } - else if (auto graphElement = std::get_if(&value)) + else if (auto graphElement = GetAttributeGraphElement(name)) { - out << (*graphElement)->GetFullId(); + FormatAttribute(name, graphElement->GetFullId()); } - else if (auto ptr = std::get_if(&value)) + else if (auto object = GetAttributeObject(name)) { - // Check if some GraphElement in this graph, or in any graph, is mapped to this pointer - if (auto gElement = GetGraph().GetElementFromProgramObject(*ptr)) - { - out << gElement->GetFullId(); - } - else if (auto gwElement = GetGraph().GetGraphWriter().GetElementFromProgramObject(*ptr)) - { - out << gwElement->GetFullId(); - } - else - { - out << "ptr" << strfmt(std::hex, ptr); - } + FormatAttribute(name, strfmt("ptr", std::hex, *object)); + } + else + { + JLM_UNREACHABLE("Unknown attribute type"); } - if (format == AttributeOutputFormat::HTMLAttributes) - out << '"'; // Closing quote - out << " "; // Attributes are space separated in both formats }; - for (const auto & [name, value] : AttributeMap_) + for (const auto & [name, _] : AttributeMap_) { - OutputAttribute(name, value); + OutputAttribute(name); } // If no other tooltip is set, print the address of the associated program object if (HasProgramObject() && !HasAttribute(TOOLTIP_ATTRIBUTE)) - OutputAttribute(TOOLTIP_ATTRIBUTE, strfmt(std::hex, GetProgramObject())); + FormatAttribute(TOOLTIP_ATTRIBUTE, strfmt(std::hex, GetProgramObject())); } Port::Port() @@ -1440,11 +1467,17 @@ GraphWriter::GetNextUniqueIdStubSuffix(const char * idStub) } void -GraphWriter::OutputAllGraphs(std::ostream & out, GraphOutputFormat format) +GraphWriter::Finalize() { for (auto & graph : Graphs_) if (!graph->IsSubgraph()) graph->Finalize(); +} + +void +GraphWriter::OutputAllGraphs(std::ostream & out, GraphOutputFormat format) +{ + Finalize(); for (auto & graph : Graphs_) if (!graph->IsSubgraph()) diff --git a/jlm/util/GraphWriter.hpp b/jlm/util/GraphWriter.hpp index 183d4c46f..c872681b4 100644 --- a/jlm/util/GraphWriter.hpp +++ b/jlm/util/GraphWriter.hpp @@ -7,12 +7,9 @@ #define JLM_UTIL_GRAPHWRITER_HPP #include -#include -#include #include #include -#include #include #include #include @@ -204,23 +201,28 @@ class GraphElement HasAttribute(const std::string & attribute) const; /** - * Retrieves the value of the given \p attribute, as a string. - * If the attribute holds a program object or another graph element, the string "?" is returned. - * @return the attribute's string value or "?" - * @throws jlm::util::error if the attribute doesn't exist - * @see GetAttributeOr to provide a fallback value + * Retrieves the value of the given \p attribute, if it exists and is assigned a string. + * @return the attribute's string value, or std::nullopt if it does not exist. */ - [[nodiscard]] std::string_view - GetAttribute(const std::string & attribute); + [[nodiscard]] std::optional + GetAttributeString(const std::string & attribute) const; /** - * Retrieves the value of the given \p attribute, as a string. - * If the attribute doesn't exist, the string \p otherwise is returned. - * If the attribute holds a program object or another graph element, the string "?" is returned. - * @return the attribute's string value, "?", or otherwise + * Retrieves the value of the given \p attribute, if it is assigned a program object. + * If the attribute does not exist, or is not holding a program object, std::nullopt is returned. + * @return the object assigned to the attribute, or std::nullopt if it does not exist. */ - [[nodiscard]] std::string_view - GetAttributeOr(const std::string & attribute, std::string_view otherwise); + [[nodiscard]] std::optional + GetAttributeObject(const std::string & attribute) const; + + /** + * Retrieves the value of the given \p attribute, if it is assigned a graph element. + * Otherwise, if the attribute is assigned a program object, + * and there exists a GraphElement representing that program object, that is returned. + * @return pointer to the GraphElement held in the attribute, or nullptr if it does not exist. + */ + [[nodiscard]] const GraphElement * + GetAttributeGraphElement(const std::string & attribute) const; /** * Removes the attribute with the given name \p attribute, if it exists. @@ -932,6 +934,9 @@ class Graph : public GraphElement [[nodiscard]] GraphElement * GetElementFromProgramObject(const T & object) const { + // Check that object is not a reference to a pointer. + // If the user truly wants to use the address of a pointer, they can cast it to uintptr_t. + static_assert(!std::is_pointer_v); return GetElementFromProgramObject(reinterpret_cast(&object)); } @@ -947,7 +952,7 @@ class Graph : public GraphElement GetFromProgramObject(const ProgramObject & object) const { static_assert(std::is_base_of_v); - GraphElement * element = GetElementFromProgramObject(reinterpret_cast(&object)); + GraphElement * element = GetElementFromProgramObject(object); auto result = dynamic_cast(element); JLM_ASSERT(result); return *result; @@ -1059,6 +1064,23 @@ class GraphWriter [[nodiscard]] GraphElement * GetElementFromProgramObject(uintptr_t object) const; + template + [[nodiscard]] GraphElement * + GetElementFromProgramObject(const T & object) const + { + // Check that object is not a reference to a pointer. + // If the user truly wants to use the address of a pointer, they can cast it to uintptr_t. + static_assert(!std::is_pointer_v); + return GetElementFromProgramObject(reinterpret_cast(&object)); + } + + /** + * Ensures that all graphs added to the graph writer so far are finalized. + * Recursively finalizes the GraphElements of each graph. + */ + void + Finalize(); + /** * Finalizes and prints all graphs created in this GraphWriter. * @param out the output stream to write graphs to diff --git a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp new file mode 100644 index 000000000..2ec459639 --- /dev/null +++ b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp @@ -0,0 +1,105 @@ +/* + * Copyright 2024 Håvard Krogstie + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +#include +#include + +#include + +static int +TestWriteGraphs() +{ + using namespace jlm::llvm; + using namespace jlm::util; + + // Arrange + jlm::tests::GammaTest gammaTest; + + // Act + GraphWriter writer; + dot::WriteGraphs(writer, *gammaTest.graph().root(), false); + + writer.OutputAllGraphs(std::cout, GraphOutputFormat::Dot); + + // Assert + auto & rootGraph = writer.GetGraph(0); + assert(rootGraph.GetProgramObject() == reinterpret_cast(gammaTest.graph().root())); + assert(rootGraph.NumNodes() == 1); // Only the lambda node for "f" + assert(rootGraph.NumResultNodes() == 1); // Exporting the function "f" + auto & lambdaNode = *AssertedCast(&rootGraph.GetNode(0)); + + // The lambda only has one output, and a single subgraph + assert(lambdaNode.GetLabel() == gammaTest.lambda->operation().debug_string()); + assert(lambdaNode.NumInputPorts() == 0); + assert(lambdaNode.NumOutputPorts() == 1); + assert(lambdaNode.NumSubgraphs() == 1); + + auto & fctBody = lambdaNode.GetSubgraph(0); + assert(fctBody.NumArgumentNodes() == 6); + assert(fctBody.NumResultNodes() == 2); + + // Argument a1 leads to the gamma node + auto & connections = fctBody.GetArgumentNode(1).GetConnections(); + assert(connections.size() == 1); + auto & gammaNode = *AssertedCast(&connections[0]->GetTo().GetNode()); + assert(gammaNode.GetLabel() == gammaTest.gamma->operation().debug_string()); + assert(gammaNode.NumInputPorts() == 5); + assert(gammaNode.NumOutputPorts() == 2); + assert(gammaNode.NumSubgraphs() == 2); + + // The first argument of the first region of the gamma references the second gamma input + auto & argument = gammaNode.GetSubgraph(0).GetArgumentNode(0); + auto & input = gammaNode.GetInputPort(1); + assert(argument.GetAttributeGraphElement("input") == &input); + + // Check that the last argument is colored red to represent the memory state type + auto & stateConnections = fctBody.GetArgumentNode(5).GetConnections(); + assert(stateConnections.size() == 1); + assert(stateConnections[0]->GetAttributeString("color") == "#FF0000"); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/dot/DotWriterTests-TestWriteGraphs", TestWriteGraphs) + +static int +TestTypeGraph() +{ + using namespace jlm::llvm; + using namespace jlm::util; + + // Arrange + jlm::tests::GammaTest gammaTest; + auto ptrType = PointerType::Create(); + auto bit32Type = jlm::rvsdg::bittype::Create(32); + auto memType = MemoryStateType::Create(); + + // Act + GraphWriter writer; + dot::WriteGraphs(writer, *gammaTest.graph().root(), true); + + writer.Finalize(); + writer.OutputAllGraphs(std::cout, GraphOutputFormat::Dot); + + // Assert + auto & typeGraph = writer.GetGraph(0); + assert(typeGraph.GetProgramObject() == 0); + + // Check that nodes exist for the given types + [[maybe_unused]] auto & ptrNode = typeGraph.GetFromProgramObject(*ptrType); + [[maybe_unused]] auto & bit32Node = typeGraph.GetFromProgramObject(*ptrType); + auto & memNode = typeGraph.GetFromProgramObject(*memType); + + // Check that the rightmost argument of the function references the memNode type + auto & fGraph = writer.GetGraph(2); + assert(writer.GetElementFromProgramObject(*gammaTest.lambda->subregion()) == &fGraph); + assert(fGraph.GetArgumentNode(5).GetAttributeGraphElement("type") == &memNode); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/dot/DotWriterTests-TestTypeGraph", TestTypeGraph) diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 6894e1d1f..56ff1fadf 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -661,21 +661,21 @@ TestDrawSubsetGraph() auto * supersetEdge = graph.GetEdgeBetween(graph.GetNode(allocaReg0), graph.GetNode(import0)); assert(supersetEdge); assert(supersetEdge->IsDirected()); - assert(supersetEdge->GetAttributeOr("style", "solid") == "solid"); + assert(supersetEdge->GetAttributeString("style").value_or("solid") == "solid"); // Check that a store edge connects storeValue to storePointer auto * storeEdge = graph.GetEdgeBetween(graph.GetNode(storeValue), graph.GetNode(storePointer)); assert(storeEdge); assert(storeEdge->IsDirected()); - assert(storeEdge->GetAttributeOr("style", Edge::Style::Dashed) == Edge::Style::Dashed); - assert(StringContains(storeEdge->GetAttribute("arrowhead"), "dot")); + assert(storeEdge->GetAttributeString("style") == Edge::Style::Dashed); + assert(StringContains(storeEdge->GetAttributeString("arrowhead").value(), "dot")); // Check that a load edge connects loadPointer to loadValue auto * loadEdge = graph.GetEdgeBetween(graph.GetNode(loadPointer), graph.GetNode(loadValue)); assert(loadEdge); assert(loadEdge->IsDirected()); - assert(loadEdge->GetAttributeOr("style", Edge::Style::Dashed) == Edge::Style::Dashed); - assert(StringContains(loadEdge->GetAttribute("arrowtail"), "dot")); + assert(loadEdge->GetAttributeString("style") == Edge::Style::Dashed); + assert(StringContains(loadEdge->GetAttributeString("arrowtail").value(), "dot")); // Check that the function contains the word "function0" auto & functionNode = graph.GetNode(function0); diff --git a/tests/jlm/util/TestGraphWriter.cpp b/tests/jlm/util/TestGraphWriter.cpp index 2bf954d63..61ab303f6 100644 --- a/tests/jlm/util/TestGraphWriter.cpp +++ b/tests/jlm/util/TestGraphWriter.cpp @@ -49,7 +49,16 @@ TestGraphElement() graph.SetAttributeGraphElement("graph", graph); graph.SetAttributeObject("another graph", myInt); + // Check getting attributes assert(graph.HasAttribute("taste")); + assert(graph.GetAttributeString("taste") == "sweet"); + assert(!graph.GetAttributeString("not-an-attribute")); + assert(graph.GetAttributeGraphElement("graph") == &graph); + assert(graph.GetAttributeObject("another graph") == reinterpret_cast(&myInt)); + // Also check that one can get GraphElements based on the program object they represent + assert(graph.GetAttributeGraphElement("another graph") == &graph); + + // Test removing attributes assert(graph.RemoveAttribute("taste")); assert(!graph.HasAttribute("taste")); // Removing the attribute again returns false From 3fe3d3e7bea94821654bcaac9fcebd3809d82cf5 Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Thu, 12 Sep 2024 20:38:26 +0200 Subject: [PATCH 077/170] Header consistency checking (#619) make target for header consistency checking Provide a "check-headers" target that verifies whether all header files are correctly correctly declared for the build system. This avoids cases when a header is used, but not declared in any library build rule. In principle, the build system should always be aware of every single file used during build. Otherwise, this can cause problems, e.g. when a library is built but the "install" rule misses headers that should go without, hence publishing a broken package. The present checking does not verify if the header is "assigned" to the library that "uses" it (or some dependent library) -- that is difficult to express in "make", but this check should be sufficient for the purposes of manually rectifying any oversight. Fix missing headers detected by check-headers target Add all headers detected missing by the build target to declarations in the makefiles. --- Makefile.rules | 3 +++ README.md | 7 +++++ jlm/hls/Makefile.sub | 1 + jlm/llvm/Makefile.sub | 7 +++++ scripts/check_headers.sh | 56 ++++++++++++++++++++++++++++++++++++++++ tests/Makefile.sub | 4 +++ 6 files changed, 78 insertions(+) create mode 100755 scripts/check_headers.sh diff --git a/Makefile.rules b/Makefile.rules index e27edb933..3c3151e7e 100644 --- a/Makefile.rules +++ b/Makefile.rules @@ -113,6 +113,9 @@ endif echo-depends: echo $(DEPEND) +check-headers: depend + @scripts/check_headers.sh $(DEPEND) -h $(HEADERS) -s $(SOURCES) + ################################################################################ # Unit testing rules diff --git a/README.md b/README.md index 4910cd0c4..bcf3c3d40 100644 --- a/README.md +++ b/README.md @@ -72,6 +72,13 @@ make coverage ``` The report will be available in build/coverage/coverage.html. +To ensure that all build information in the makefiles are correct, it is also +advisable to check that all headers used are declared (this is quite easy +to forget). To do this, you can run: +``` +make check-headers +``` + ## High-level synthesis (HLS) backend The HLS backend uses the MLIR FIRRTL dialect from CIRCT to convert llvm IR to FIRRTL code. diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index 3c9895dcd..4bb0e3d22 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -65,6 +65,7 @@ libhls_HEADERS = \ jlm/hls/backend/rvsdg2rhls/mem-queue.hpp \ jlm/hls/backend/rvsdg2rhls/mem-sep.hpp \ jlm/hls/backend/rvsdg2rhls/memstate-conv.hpp \ + jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp \ jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.hpp \ jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp \ jlm/hls/backend/rvsdg2rhls/rhls-dne.hpp \ diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index e22999ad6..054f58218 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -71,13 +71,19 @@ libllvm_HEADERS = \ jlm/llvm/opt/inlining.hpp \ jlm/llvm/opt/cne.hpp \ jlm/llvm/opt/push.hpp \ + jlm/llvm/opt/alias-analyses/Andersen.hpp \ jlm/llvm/opt/alias-analyses/DifferencePropagation.hpp \ + jlm/llvm/opt/alias-analyses/EliminatedMemoryNodeProvider.hpp \ jlm/llvm/opt/alias-analyses/LazyCycleDetection.hpp \ jlm/llvm/opt/alias-analyses/MemoryNodeProvider.hpp \ jlm/llvm/opt/alias-analyses/OnlineCycleDetection.hpp \ jlm/llvm/opt/alias-analyses/Optimization.hpp \ + jlm/llvm/opt/alias-analyses/MemoryNodeEliminator.hpp \ + jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp \ jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp \ jlm/llvm/opt/alias-analyses/Steensgaard.hpp \ + jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp \ + jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp \ jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.hpp \ jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp \ jlm/llvm/opt/alias-analyses/PointsToGraph.hpp \ @@ -87,6 +93,7 @@ libllvm_HEADERS = \ jlm/llvm/opt/reduction.hpp \ jlm/llvm/opt/InvariantValueRedirection.hpp \ jlm/llvm/opt/inversion.hpp \ + jlm/llvm/opt/OptimizationSequence.hpp \ jlm/llvm/opt/RvsdgTreePrinter.hpp \ jlm/llvm/frontend/LlvmModuleConversion.hpp \ jlm/llvm/frontend/LlvmTypeConversion.hpp \ diff --git a/scripts/check_headers.sh b/scripts/check_headers.sh new file mode 100755 index 000000000..14fc9b4f7 --- /dev/null +++ b/scripts/check_headers.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +function headers_from_deps() { + cat $* | \ + sed -E -e "s/ /\\n/g" | \ + sed -E \ + -e "/^.*:/d" \ + -e "s/\\\\\$//" \ + -e "/^jlm/p" \ + -e "/^test/p" \ + -e "d" | \ + sort -u +} + +declare DEPFILES=() +while [[ "$#" -ge 1 ]] ; do + if [ "$1" == "-h" ] ; then + shift + break + fi + DEPFILES+=("$1") + shift +done + +declare HEADERS=() +while [[ "$#" -ge 1 ]] ; do + if [ "$1" == "-s" ] ; then + shift + break + fi + HEADERS+=("$1") + shift +done + +declare SOURCES=() +while [[ "$#" -ge 1 ]] ; do + SOURCES+=("$1") + shift +done + +TMPDIR=`mktemp -d` +trap 'rm -rf "${TMPDIR}"' EXIT + +headers_from_deps "${DEPFILES[*]}" > "${TMPDIR}/headers_used" +(IFS=' +' ; echo "${HEADERS[*]}" ; echo "${SOURCES[*]}" ) | sort -u > "${TMPDIR}/headers_declared" + +if grep -f "${TMPDIR}/headers_declared" -v "${TMPDIR}/headers_used" > "${TMPDIR}/headers_undeclared" ; then + echo "*** The following headers are used but not declared in build rules: ***" + cat "${TMPDIR}/headers_undeclared" + echo "Hint: the list may be inaccurate if dependence information is stale". + echo "If you think this is the case, please try running 'make depclean ; make depend'." + exit 1 +else + exit 0 +fi diff --git a/tests/Makefile.sub b/tests/Makefile.sub index 1acf54e01..ff4193c2d 100644 --- a/tests/Makefile.sub +++ b/tests/Makefile.sub @@ -7,6 +7,10 @@ libjlmtest_SOURCES = \ libjlmtest_HEADERS = \ tests/test-operation.hpp \ + tests/test-registry.hpp \ + tests/test-types.hpp \ + tests/test-util.hpp \ + tests/TestRvsdgs.hpp \ $(eval $(call common_library,libjlmtest)) From a7dbf184cb807601bfe4e36c0b1846f8cdd51349 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 13 Sep 2024 08:33:15 +0200 Subject: [PATCH 078/170] Add header check to CI (#620) Closes #617 --- .github/workflows/CheckHeaders.yml | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 .github/workflows/CheckHeaders.yml diff --git a/.github/workflows/CheckHeaders.yml b/.github/workflows/CheckHeaders.yml new file mode 100644 index 000000000..1b954b286 --- /dev/null +++ b/.github/workflows/CheckHeaders.yml @@ -0,0 +1,19 @@ +name: CheckHeaders + +on: + pull_request: + branches: [ master ] + +jobs: + CheckHeaders: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + - name: "Install LLVM" + uses: ./.github/actions/InstallPackages + with: + install-llvm: true # Needed to configure jlm + - name: "Configure jlm with HLS and MLIR enabled" + run: ./configure.sh --enable-mlir --enable-hls + - name: "Check headers" + run: make check-headers From 011ec4791cea125aa8d8fa5e7cb55fe58c8a5cc2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 14 Sep 2024 17:45:48 +0200 Subject: [PATCH 079/170] Add NumMemoryStateInputsOutputs annotation to RvsdgTreePrinter (#618) 1. Extends the structural test node with an interface for adding inputs/outputs/arguments/results 2. Add support for printing of memory state typed inputs/outputs to RvsdgTreePrinter class 3. Adds support for this annotation to jlm-opt --- jlm/llvm/opt/RvsdgTreePrinter.cpp | 75 +++++++++++ jlm/llvm/opt/RvsdgTreePrinter.hpp | 20 +++ jlm/tooling/CommandLine.cpp | 4 + tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp | 92 +++++++++++-- tests/test-operation.cpp | 66 +++++++++ tests/test-operation.hpp | 134 +++++++++++++++++++ 6 files changed, 377 insertions(+), 14 deletions(-) diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 82245d3c5..d57e16b7c 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -74,6 +74,9 @@ RvsdgTreePrinter::ComputeAnnotationMap(const rvsdg::graph & rvsdg) const case Configuration::Annotation::NumRvsdgNodes: AnnotateNumRvsdgNodes(rvsdg, annotationMap); break; + case Configuration::Annotation::NumMemoryStateInputsOutputs: + AnnotateNumMemoryStateInputsOutputs(rvsdg, annotationMap); + break; default: JLM_UNREACHABLE("Unhandled RVSDG tree annotation."); } @@ -115,6 +118,78 @@ RvsdgTreePrinter::AnnotateNumRvsdgNodes( annotateRegion(*rvsdg.root()); } +void +RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( + const rvsdg::graph & rvsdg, + util::AnnotationMap & annotationMap) +{ + std::string_view argumentLabel("NumMemoryStateTypeArguments"); + std::string_view resultLabel("NumMemoryStateTypeResults"); + std::string_view inputLabel("NumMemoryStateTypeInputs"); + std::string_view outputLabel("NumMemoryStateTypeOutputs"); + + std::function annotateRegion = [&](const rvsdg::region & region) + { + size_t numMemoryStateArguments = 0; + for (size_t n = 0; n < region.narguments(); n++) + { + auto argument = region.argument(n); + if (rvsdg::is(argument->type())) + { + numMemoryStateArguments++; + } + } + annotationMap.AddAnnotation(®ion, { argumentLabel, numMemoryStateArguments }); + + size_t numMemoryStateResults = 0; + for (size_t n = 0; n < region.nresults(); n++) + { + auto result = region.result(n); + if (rvsdg::is(result->type())) + { + numMemoryStateResults++; + } + } + annotationMap.AddAnnotation(®ion, { resultLabel, numMemoryStateResults }); + + for (auto & node : region.nodes) + { + if (auto structuralNode = dynamic_cast(&node)) + { + size_t numMemoryStateInputs = 0; + for (size_t n = 0; n < structuralNode->ninputs(); n++) + { + auto input = structuralNode->input(n); + if (rvsdg::is(input->type())) + { + numMemoryStateInputs++; + } + } + annotationMap.AddAnnotation(structuralNode, { inputLabel, numMemoryStateInputs }); + + size_t numMemoryStateOutputs = 0; + for (size_t n = 0; n < structuralNode->noutputs(); n++) + { + auto output = structuralNode->output(n); + if (rvsdg::is(output->type())) + { + numMemoryStateOutputs++; + } + } + annotationMap.AddAnnotation(structuralNode, { outputLabel, numMemoryStateOutputs }); + + for (size_t n = 0; n < structuralNode->nsubregions(); n++) + { + auto subregion = structuralNode->subregion(n); + annotateRegion(*subregion); + } + } + } + }; + + annotateRegion(*rvsdg.root()); +} + void RvsdgTreePrinter::WriteTreeToFile(const RvsdgModule & rvsdgModule, const std::string & tree) const { diff --git a/jlm/llvm/opt/RvsdgTreePrinter.hpp b/jlm/llvm/opt/RvsdgTreePrinter.hpp index bb55b0593..ddeaf5c94 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.hpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.hpp @@ -53,6 +53,12 @@ class RvsdgTreePrinter final : public optimization */ NumRvsdgNodes, + /** + * Annotate region and structural nodes with the number of inputs/outputs of type + * MemoryStateType. + */ + NumMemoryStateInputsOutputs, + /** * Must always be the last enum value. Used for iteration. */ @@ -137,6 +143,20 @@ class RvsdgTreePrinter final : public optimization static void AnnotateNumRvsdgNodes(const rvsdg::graph & rvsdg, util::AnnotationMap & annotationMap); + /** + * Adds an annotation to \p annotationMap that indicates the number of inputs/outputs of type + * MemoryStateType. + * + * @param rvsdg The RVSDG for which to compute the annotation. + * @param annotationMap The annotation map in which the annotation is inserted. + * + * @see NumMemoryStateInputsOutputs + */ + static void + AnnotateNumMemoryStateInputsOutputs( + const rvsdg::graph & rvsdg, + util::AnnotationMap & annotationMap); + void WriteTreeToFile(const RvsdgModule & rvsdgModule, const std::string & tree) const; diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index 07cf67f09..c38ae8aa9 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -887,6 +887,10 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const llvm::RvsdgTreePrinter::Configuration::Annotation::NumRvsdgNodes, "NumRvsdgNodes", "Annotate number of RVSDG nodes")), + cl::values(::clEnumValN( + llvm::RvsdgTreePrinter::Configuration::Annotation::NumMemoryStateInputsOutputs, + "NumMemoryStateInputsOutputs", + "Annotate number of inputs/outputs with memory state type")), cl::CommaSeparated, cl::desc("Comma separated list of RVSDG tree printer annotations")); diff --git a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp index 5d7704183..87b2d6e01 100644 --- a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp +++ b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp @@ -8,11 +8,20 @@ #include #include -#include #include #include +static std::string +ReadFile(const std::string & outputFilePath) +{ + std::ifstream file(outputFilePath); + std::stringstream buffer; + buffer << file.rdbuf(); + + return buffer.str(); +} + static int PrintRvsdgTree() { @@ -41,13 +50,14 @@ PrintRvsdgTree() printer.run(*rvsdgModule); // Assert - auto outputFilePath = tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"; + auto tree = ReadFile(tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"); + std::cout << tree; - std::ifstream file(outputFilePath); - std::stringstream buffer; - buffer << file.rdbuf(); + auto expectedTree = "RootRegion\n" + "-LAMBDA[f]\n" + "--Region[0]\n\n"; - assert(buffer.str() == "RootRegion\n-LAMBDA[f]\n--Region[0]\n\n"); + assert(tree == expectedTree); return 0; } @@ -81,16 +91,15 @@ PrintNumRvsdgNodesAnnotation() printer.run(*rvsdgModule); // Assert - auto outputFilePath = tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"; + auto tree = ReadFile(tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"); + std::cout << tree; - std::ifstream file(outputFilePath); - std::stringstream buffer; - buffer << file.rdbuf(); + auto expectedTree = "RootRegion NumRvsdgNodes:2\n" + "-STRUCTURAL_TEST_NODE NumRvsdgNodes:2\n" + "--Region[0] NumRvsdgNodes:1\n" + "--Region[1] NumRvsdgNodes:1\n\n"; - assert( - buffer.str() - == "RootRegion NumRvsdgNodes:2\n-STRUCTURAL_TEST_NODE NumRvsdgNodes:2\n--Region[0] " - "NumRvsdgNodes:1\n--Region[1] NumRvsdgNodes:1\n\n"); + assert(tree == expectedTree); return 0; } @@ -98,3 +107,58 @@ PrintNumRvsdgNodesAnnotation() JLM_UNIT_TEST_REGISTER( "jlm/llvm/opt/RvsdgTreePrinterTests-PrintNumRvsdgNodesAnnotation", PrintNumRvsdgNodesAnnotation) + +static int +PrintNumMemoryStateInputsOutputsAnnotation() +{ + using namespace jlm::llvm; + using namespace jlm::util; + + // Arrange + auto memoryStateType = MemoryStateType::Create(); + auto valueType = jlm::tests::valuetype::Create(); + + std::string fileName = "PrintNumMemoryStateInputsOutputsAnnotationTest"; + auto rvsdgModule = RvsdgModule::Create({ fileName }, "", ""); + auto & rvsdg = rvsdgModule->Rvsdg(); + + auto & x = jlm::tests::GraphImport::Create(rvsdg, memoryStateType, "x"); + auto & y = jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); + + auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 2); + auto & ix = structuralNode->AddInputWithArguments(x); + auto & iy = structuralNode->AddInputWithArguments(y); + + auto & ox = structuralNode->AddOutputWithResults({ &ix.Argument(0), &ix.Argument(1) }); + auto & oy = structuralNode->AddOutputWithResults({ &iy.Argument(0), &iy.Argument(1) }); + + jlm::tests::GraphExport::Create(ox, "x"); + jlm::tests::GraphExport::Create(oy, "y"); + + auto tempDirectory = std::filesystem::temp_directory_path(); + RvsdgTreePrinter::Configuration configuration( + { tempDirectory }, + { RvsdgTreePrinter::Configuration::Annotation::NumMemoryStateInputsOutputs }); + RvsdgTreePrinter printer(configuration); + + // Act + printer.run(*rvsdgModule); + + // Assert + auto tree = ReadFile(tempDirectory.string() + "/" + fileName + "-rvsdgTree-0"); + std::cout << tree; + + auto expectedTree = + "RootRegion NumMemoryStateTypeArguments:1 NumMemoryStateTypeResults:1\n" + "-STRUCTURAL_TEST_NODE NumMemoryStateTypeInputs:1 NumMemoryStateTypeOutputs:1\n" + "--Region[0] NumMemoryStateTypeArguments:1 NumMemoryStateTypeResults:1\n" + "--Region[1] NumMemoryStateTypeArguments:1 NumMemoryStateTypeResults:1\n\n"; + + assert(tree == expectedTree); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/RvsdgTreePrinterTests-PrintNumMemoryStateInputsOutputsAnnotation", + PrintNumMemoryStateInputsOutputsAnnotation) diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 98b204110..7be44298e 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -194,4 +194,70 @@ structural_node::copy(rvsdg::region * parent, rvsdg::substitution_map & smap) co return node; } +StructuralNodeInput & +structural_node::AddInput(rvsdg::output & origin) +{ + auto input = + std::unique_ptr(new StructuralNodeInput(*this, origin, origin.Type())); + return *util::AssertedCast(add_input(std::move(input))); +} + +StructuralNodeInput & +structural_node::AddInputWithArguments(rvsdg::output & origin) +{ + auto & input = AddInput(origin); + for (size_t n = 0; n < nsubregions(); n++) + { + StructuralNodeArgument::Create(*subregion(n), input); + } + + return input; +} + +StructuralNodeOutput & +structural_node::AddOutput(std::shared_ptr type) +{ + auto output = + std::unique_ptr(new StructuralNodeOutput(*this, std::move(type))); + return *util::AssertedCast(add_output(std::move(output))); +} + +StructuralNodeOutput & +structural_node::AddOutputWithResults(const std::vector & origins) +{ + if (origins.size() != nsubregions()) + throw util::error("Insufficient number of origins."); + + auto & output = AddOutput(origins[0]->Type()); + for (size_t n = 0; n < nsubregions(); n++) + { + StructuralNodeResult::Create(*origins[n], output); + } + + return output; +} + +StructuralNodeInput::~StructuralNodeInput() noexcept = default; + +StructuralNodeOutput::~StructuralNodeOutput() noexcept = default; + +StructuralNodeArgument::~StructuralNodeArgument() noexcept = default; + +StructuralNodeArgument & +StructuralNodeArgument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +{ + auto structuralNodeInput = util::AssertedCast(input); + return structuralNodeInput != nullptr ? Create(region, *structuralNodeInput) + : Create(region, Type()); +} + +StructuralNodeResult::~StructuralNodeResult() noexcept = default; + +StructuralNodeResult & +StructuralNodeResult::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +{ + auto structuralNodeOutput = util::AssertedCast(output); + return structuralNodeOutput != nullptr ? Create(origin, *structuralNodeOutput) : Create(origin); +} + } diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 92f23f77b..a32939238 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -205,6 +205,10 @@ class structural_op final : public rvsdg::structural_op copy() const override; }; +class StructuralNodeArgument; +class StructuralNodeInput; +class StructuralNodeOutput; + class structural_node final : public rvsdg::structural_node { public: @@ -216,6 +220,18 @@ class structural_node final : public rvsdg::structural_node {} public: + StructuralNodeInput & + AddInput(rvsdg::output & origin); + + StructuralNodeInput & + AddInputWithArguments(rvsdg::output & origin); + + StructuralNodeOutput & + AddOutput(std::shared_ptr type); + + StructuralNodeOutput & + AddOutputWithResults(const std::vector & origins); + static structural_node * create(rvsdg::region * parent, size_t nsubregions) { @@ -226,6 +242,124 @@ class structural_node final : public rvsdg::structural_node copy(rvsdg::region * region, rvsdg::substitution_map & smap) const override; }; +class StructuralNodeInput final : public rvsdg::structural_input +{ + friend structural_node; + +public: + ~StructuralNodeInput() noexcept override; + +private: + StructuralNodeInput( + structural_node & node, + rvsdg::output & origin, + std::shared_ptr type) + : rvsdg::structural_input(&node, &origin, std::move(type)) + {} + +public: + [[nodiscard]] size_t + NumArguments() const noexcept + { + return arguments.size(); + } + + [[nodiscard]] StructuralNodeArgument & + Argument(size_t n) noexcept + { + JLM_ASSERT(n < NumArguments()); + // FIXME: I did not find a better way of doing it. The arguments attribute should be replaced + // by a std::vector<> to enable efficient access. + for (auto & argument : arguments) + { + if (argument.region()->index() == n) + return *util::AssertedCast(&argument); + } + + JLM_UNREACHABLE("Unknown argument"); + } +}; + +class StructuralNodeOutput final : public rvsdg::structural_output +{ + friend structural_node; + +public: + ~StructuralNodeOutput() noexcept override; + +private: + StructuralNodeOutput(structural_node & node, std::shared_ptr type) + : rvsdg::structural_output(&node, std::move(type)) + {} +}; + +class StructuralNodeArgument final : public rvsdg::RegionArgument +{ + friend structural_node; + +public: + ~StructuralNodeArgument() noexcept override; + + StructuralNodeArgument & + Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + +private: + StructuralNodeArgument( + rvsdg::region & region, + StructuralNodeInput * input, + std::shared_ptr type) + : rvsdg::RegionArgument(®ion, input, std::move(type)) + {} + + static StructuralNodeArgument & + Create(rvsdg::region & region, StructuralNodeInput & input) + { + auto argument = new StructuralNodeArgument(region, &input, input.Type()); + region.append_argument(argument); + return *argument; + } + + static StructuralNodeArgument & + Create(rvsdg::region & region, std::shared_ptr type) + { + auto argument = new StructuralNodeArgument(region, nullptr, std::move(type)); + region.append_argument(argument); + return *argument; + } +}; + +class StructuralNodeResult final : public rvsdg::RegionResult +{ + friend structural_node; + +public: + ~StructuralNodeResult() noexcept override; + + StructuralNodeResult & + Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + +private: + StructuralNodeResult(rvsdg::output & origin, StructuralNodeOutput * output) + : rvsdg::RegionResult(origin.region(), &origin, output, origin.Type()) + {} + + static StructuralNodeResult & + Create(rvsdg::output & origin) + { + auto result = new StructuralNodeResult(origin, nullptr); + origin.region()->append_result(result); + return *result; + } + + static StructuralNodeResult & + Create(rvsdg::output & origin, StructuralNodeOutput & output) + { + auto result = new StructuralNodeResult(origin, &output); + origin.region()->append_result(result); + return *result; + } +}; + class test_op final : public rvsdg::simple_op { public: From fdcd52c544cacf4eaa4de206aa44fa8968770a91 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 14 Sep 2024 18:08:42 +0200 Subject: [PATCH 080/170] Rename theta_op class to ThetaOperation (#622) --- jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp | 2 +- jlm/hls/opt/cne.cpp | 11 +++++------ jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 6 +++--- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 4 ++-- .../alias-analyses/TopDownMemoryNodeEliminator.cpp | 2 +- jlm/llvm/opt/cne.cpp | 11 +++++------ jlm/llvm/opt/inversion.cpp | 2 +- jlm/llvm/opt/push.cpp | 10 +++++----- jlm/llvm/opt/unroll.cpp | 4 ++-- jlm/llvm/opt/unroll.hpp | 2 +- jlm/rvsdg/node.cpp | 2 +- jlm/rvsdg/theta.cpp | 11 +++++------ jlm/rvsdg/theta.hpp | 13 ++++++------- jlm/rvsdg/view.cpp | 2 +- tests/jlm/llvm/opt/test-push.cpp | 4 ++-- tests/jlm/llvm/opt/test-unroll.cpp | 2 +- 17 files changed, 43 insertions(+), 47 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 91aa41103..9d91958fd 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -107,7 +107,7 @@ CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) { for (auto & node : gammaNode.subregion(i)->nodes) { - if (rvsdg::is(&node) || rvsdg::is(&node)) + if (rvsdg::is(&node) || rvsdg::is(&node)) { // don't allow thetas or loops since they could potentially block forever return false; diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index c0bb55c6b..b7179ac2a 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -200,8 +200,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & auto n1 = jlm::rvsdg::node_output::node(o1); auto n2 = jlm::rvsdg::node_output::node(o2); - if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) - && n1 == n2) + if (is(n1) && is(n2) && n1 == n2) { auto so1 = static_cast(o1); auto so2 = static_cast(o2); @@ -319,7 +318,7 @@ mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) static void mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(is(node)); auto theta = static_cast(node); /* mark loop variables */ @@ -414,7 +413,7 @@ mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) static std:: unordered_map map({ { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, - { std::type_index(typeid(jlm::rvsdg::theta_op)), mark_theta }, + { std::type_index(typeid(ThetaOperation)), mark_theta }, { std::type_index(typeid(jlm::hls::loop_op)), mark_loop }, { typeid(llvm::lambda::operation), mark_lambda }, { typeid(llvm::phi::operation), mark_phi }, @@ -525,7 +524,7 @@ divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) static void divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(is(node)); auto theta = static_cast(node); auto subregion = node->subregion(0); @@ -576,7 +575,7 @@ divert(jlm::rvsdg::structural_node * node, cnectx & ctx) { static std::unordered_map map( { { std::type_index(typeid(rvsdg::GammaOperation)), divert_gamma }, - { std::type_index(typeid(jlm::rvsdg::theta_op)), divert_theta }, + { std::type_index(typeid(ThetaOperation)), divert_theta }, { std::type_index(typeid(jlm::hls::loop_op)), divert_loop }, { typeid(llvm::lambda::operation), divert_lambda }, { typeid(llvm::phi::operation), divert_phi }, diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 96e4e7a93..37496553a 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -329,7 +329,7 @@ static inline bool phi_needed(const rvsdg::input * i, const llvm::variable * v) { auto node = rvsdg::input::GetNode(*i); - JLM_ASSERT(is(node)); + JLM_ASSERT(is(node)); auto theta = static_cast(node); auto input = static_cast(i); auto output = theta->output(input->index()); @@ -350,7 +350,7 @@ phi_needed(const rvsdg::input * i, const llvm::variable * v) static inline void convert_theta_node(const rvsdg::node & node, context & ctx) { - JLM_ASSERT(is(&node)); + JLM_ASSERT(is(&node)); auto subregion = static_cast(&node)->subregion(0); auto predicate = subregion->result(0)->origin(); @@ -520,7 +520,7 @@ convert_node(const rvsdg::node & node, context & ctx) unordered_map> map({ { typeid(lambda::operation), convert_lambda_node }, { std::type_index(typeid(rvsdg::GammaOperation)), convert_gamma_node }, - { std::type_index(typeid(rvsdg::theta_op)), convert_theta_node }, + { std::type_index(typeid(rvsdg::ThetaOperation)), convert_theta_node }, { typeid(phi::operation), convert_phi_node }, { typeid(delta::operation), convert_delta_node } }); diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index e0eca37d6..d7054ec0b 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -367,7 +367,7 @@ DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) con std::type_index, std::function> map({ { typeid(rvsdg::GammaOperation), sweepGamma }, - { typeid(jlm::rvsdg::theta_op), sweepTheta }, + { typeid(rvsdg::ThetaOperation), sweepTheta }, { typeid(lambda::operation), sweepLambda }, { typeid(phi::operation), sweepPhi }, { typeid(delta::operation), sweepDelta } }); diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 85e72cb00..63439ab3e 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -95,7 +95,7 @@ void InvariantValueRedirection::RedirectInRegion(rvsdg::region & region) { auto isGammaNode = is(region.node()); - auto isThetaNode = is(region.node()); + auto isThetaNode = is(region.node()); auto isLambdaNode = is(region.node()); JLM_ASSERT(isGammaNode || isThetaNode || isLambdaNode); @@ -128,7 +128,7 @@ void InvariantValueRedirection::RedirectInSubregions(rvsdg::structural_node & structuralNode) { auto isGammaNode = is(&structuralNode); - auto isThetaNode = is(&structuralNode); + auto isThetaNode = is(&structuralNode); JLM_ASSERT(isGammaNode || isThetaNode); for (size_t n = 0; n < structuralNode.nsubregions(); n++) diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index de9bcbd0b..b08e21f37 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -501,7 +501,7 @@ void TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::region & region) { auto isLambdaSubregion = rvsdg::is(region.node()); - auto isThetaSubregion = rvsdg::is(region.node()); + auto isThetaSubregion = rvsdg::is(region.node()); auto isGammaSubregion = rvsdg::is(region.node()); JLM_ASSERT(isLambdaSubregion || isThetaSubregion || isGammaSubregion); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index c5fb290aa..2f14333d7 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -197,8 +197,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & auto n1 = jlm::rvsdg::node_output::node(o1); auto n2 = jlm::rvsdg::node_output::node(o2); - if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) - && n1 == n2) + if (is(n1) && is(n2) && n1 == n2) { auto so1 = static_cast(o1); auto so2 = static_cast(o2); @@ -303,7 +302,7 @@ mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) static void mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(is(node)); auto theta = static_cast(node); /* mark loop variables */ @@ -376,7 +375,7 @@ mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) static std:: unordered_map map({ { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, - { std::type_index(typeid(jlm::rvsdg::theta_op)), mark_theta }, + { std::type_index(typeid(rvsdg::ThetaOperation)), mark_theta }, { typeid(lambda::operation), mark_lambda }, { typeid(phi::operation), mark_phi }, { typeid(delta::operation), mark_delta } }); @@ -486,7 +485,7 @@ divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) static void divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) { - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(is(node)); auto theta = static_cast(node); auto subregion = node->subregion(0); @@ -529,7 +528,7 @@ divert(jlm::rvsdg::structural_node * node, cnectx & ctx) { static std::unordered_map map( { { std::type_index(typeid(rvsdg::GammaOperation)), divert_gamma }, - { std::type_index(typeid(jlm::rvsdg::theta_op)), divert_theta }, + { std::type_index(typeid(rvsdg::ThetaOperation)), divert_theta }, { typeid(lambda::operation), divert_lambda }, { typeid(phi::operation), divert_phi }, { typeid(delta::operation), divert_delta } }); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 29eada91b..cf78dc82e 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -93,7 +93,7 @@ pullin(rvsdg::GammaNode * gamma, jlm::rvsdg::theta_node * theta) static std::vector> collect_condition_nodes(jlm::rvsdg::structural_node * tnode, jlm::rvsdg::structural_node * gnode) { - JLM_ASSERT(jlm::rvsdg::is(tnode)); + JLM_ASSERT(is(tnode)); JLM_ASSERT(rvsdg::is(gnode)); JLM_ASSERT(gnode->region()->node() == tnode); diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 6b4e6b075..463d01c01 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -126,7 +126,7 @@ copy_from_gamma(jlm::rvsdg::node * node, size_t r) static std::vector copy_from_theta(jlm::rvsdg::node * node) { - JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); + JLM_ASSERT(is(node->region()->node())); JLM_ASSERT(node->depth() == 0); auto target = node->region()->node()->region(); @@ -222,7 +222,7 @@ is_theta_invariant( const jlm::rvsdg::node * node, const std::unordered_set & invariants) { - JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); + JLM_ASSERT(is(node->region()->node())); JLM_ASSERT(node->depth() == 0); for (size_t n = 0; n < node->ninputs(); n++) @@ -297,14 +297,14 @@ push_top(jlm::rvsdg::theta_node * theta) static bool is_invariant(const rvsdg::RegionArgument * argument) { - JLM_ASSERT(jlm::rvsdg::is(argument->region()->node())); + JLM_ASSERT(is(argument->region()->node())); return argument->region()->result(argument->index() + 1)->origin() == argument; } static bool is_movable_store(jlm::rvsdg::node * node) { - JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); + JLM_ASSERT(is(node->region()->node())); JLM_ASSERT(jlm::rvsdg::is(node)); auto address = dynamic_cast(node->input(0)->origin()); @@ -334,7 +334,7 @@ is_movable_store(jlm::rvsdg::node * node) static void pushout_store(jlm::rvsdg::node * storenode) { - JLM_ASSERT(jlm::rvsdg::is(storenode->region()->node())); + JLM_ASSERT(is(storenode->region()->node())); JLM_ASSERT(jlm::rvsdg::is(storenode) && is_movable_store(storenode)); auto theta = static_cast(storenode->region()->node()); auto storeop = static_cast(&storenode->operation()); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index cbfe84367..5e2e87242 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -60,7 +60,7 @@ is_eqcmp(const jlm::rvsdg::operation & op) static bool is_theta_invariant(const jlm::rvsdg::output * output) { - JLM_ASSERT(jlm::rvsdg::is(output->region()->node())); + JLM_ASSERT(is(output->region()->node())); if (jlm::rvsdg::is(jlm::rvsdg::node_output::node(output))) return true; @@ -81,7 +81,7 @@ push_from_theta(jlm::rvsdg::output * output) auto tmp = jlm::rvsdg::node_output::node(output); JLM_ASSERT(jlm::rvsdg::is(tmp)); - JLM_ASSERT(jlm::rvsdg::is(tmp->region()->node())); + JLM_ASSERT(is(tmp->region()->node())); auto theta = static_cast(tmp->region()->node()); auto node = tmp->copy(theta->region(), {}); diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index 0ab02fad6..c561d9670 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -78,7 +78,7 @@ class unrollinfo final theta() const noexcept { auto node = idv()->region()->node(); - JLM_ASSERT(jlm::rvsdg::is(node)); + JLM_ASSERT(is(node)); return static_cast(node); } diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 04c2d4164..272710c40 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -311,7 +311,7 @@ producer(const jlm::rvsdg::output * output) noexcept if (!argument->input()) return nullptr; - if (is(argument->region()->node()) + if (is(argument->region()->node()) && (argument->region()->result(argument->index() + 1)->origin() != argument)) return nullptr; diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index fd46c6268..0459b956f 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -12,23 +12,22 @@ namespace jlm::rvsdg /* theta operation */ -theta_op::~theta_op() noexcept -{} +ThetaOperation::~ThetaOperation() noexcept = default; std::string -theta_op::debug_string() const +ThetaOperation::debug_string() const { return "THETA"; } std::unique_ptr -theta_op::copy() const +ThetaOperation::copy() const { - return std::unique_ptr(new theta_op(*this)); + return std::unique_ptr(new ThetaOperation(*this)); } theta_node::theta_node(rvsdg::region & parent) - : structural_node(rvsdg::theta_op(), &parent, 1) + : structural_node(ThetaOperation(), &parent, 1) { auto predicate = control_false(subregion()); ThetaPredicateResult::Create(*predicate); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 581e84efa..c179714c1 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -15,12 +15,11 @@ namespace jlm::rvsdg { -/* theta operation */ - -class theta_op final : public structural_op +class ThetaOperation final : public structural_op { public: - virtual ~theta_op() noexcept; + ~ThetaOperation() noexcept override; + virtual std::string debug_string() const override; @@ -361,7 +360,7 @@ class ThetaArgument final : public RegionArgument ThetaArgument(rvsdg::region & region, theta_input & input) : RegionArgument(®ion, &input, input.Type()) { - JLM_ASSERT(is(region.node())); + JLM_ASSERT(is(region.node())); } static ThetaArgument & @@ -390,7 +389,7 @@ class ThetaResult final : public RegionResult ThetaResult(rvsdg::output & origin, theta_output & thetaOutput) : RegionResult(origin.region(), &origin, &thetaOutput, origin.Type()) { - JLM_ASSERT(is(origin.region()->node())); + JLM_ASSERT(is(origin.region()->node())); } static ThetaResult & @@ -419,7 +418,7 @@ class ThetaPredicateResult final : public RegionResult explicit ThetaPredicateResult(rvsdg::output & origin) : RegionResult(origin.region(), &origin, nullptr, ctltype::Create(2)) { - JLM_ASSERT(is(origin.region()->node())); + JLM_ASSERT(is(origin.region()->node())); } static ThetaPredicateResult & diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index dbae3553c..99ae10c77 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -269,7 +269,7 @@ type(const jlm::rvsdg::node * n) if (dynamic_cast(&n->operation())) return "gamma"; - if (dynamic_cast(&n->operation())) + if (dynamic_cast(&n->operation())) return "theta"; return ""; diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 5b3280e44..a9cda2d75 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -139,9 +139,9 @@ test_push_theta_bottom() auto storenode = jlm::rvsdg::node_output::node(ex.origin()); assert(jlm::rvsdg::is(storenode)); assert(storenode->input(0)->origin() == a); - assert(jlm::rvsdg::is( + assert(jlm::rvsdg::is( jlm::rvsdg::node_output::node(storenode->input(1)->origin()))); - assert(jlm::rvsdg::is( + assert(jlm::rvsdg::is( jlm::rvsdg::node_output::node(storenode->input(2)->origin()))); } diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 9b7faa50f..827ba2180 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -26,7 +26,7 @@ nthetas(jlm::rvsdg::region * region) size_t n = 0; for (const auto & node : region->nodes) { - if (jlm::rvsdg::is(&node)) + if (jlm::rvsdg::is(&node)) n++; } From 0f0586a1326847ab9e5962222429fa9bba320e2b Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 14 Sep 2024 23:59:11 +0200 Subject: [PATCH 081/170] Rename theta_node class to ThetaNode (#623) --- .../backend/rvsdg2rhls/ThetaConversion.cpp | 4 +- .../backend/rvsdg2rhls/ThetaConversion.hpp | 2 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- jlm/hls/opt/cne.cpp | 4 +- .../InterProceduralGraphConversion.cpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +- jlm/llvm/opt/DeadNodeElimination.hpp | 4 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 4 +- jlm/llvm/opt/InvariantValueRedirection.hpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.hpp | 2 +- .../alias-analyses/MemoryNodeProvisioning.hpp | 2 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 8 ++-- .../opt/alias-analyses/MemoryStateEncoder.hpp | 6 +-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 4 +- jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 4 +- .../TopDownMemoryNodeEliminator.cpp | 6 +-- .../TopDownMemoryNodeEliminator.hpp | 4 +- jlm/llvm/opt/cne.cpp | 4 +- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 10 ++-- jlm/llvm/opt/push.cpp | 12 ++--- jlm/llvm/opt/push.hpp | 8 ++-- jlm/llvm/opt/unroll.cpp | 18 +++---- jlm/llvm/opt/unroll.hpp | 8 ++-- jlm/rvsdg/theta.cpp | 19 ++++---- jlm/rvsdg/theta.hpp | 48 +++++++++---------- tests/TestRvsdgs.cpp | 2 +- tests/TestRvsdgs.hpp | 2 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 2 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 2 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 8 ++-- .../opt/InvariantValueRedirectionTests.cpp | 4 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 8 ++-- tests/jlm/llvm/opt/test-cne.cpp | 12 ++--- tests/jlm/llvm/opt/test-inversion.cpp | 4 +- tests/jlm/llvm/opt/test-push.cpp | 4 +- tests/jlm/llvm/opt/test-unroll.cpp | 20 ++++---- tests/jlm/rvsdg/test-theta.cpp | 12 ++--- 44 files changed, 144 insertions(+), 147 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index 5a7c56d34..07617323e 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -11,7 +11,7 @@ namespace jlm::hls { static void -ConvertThetaNode(jlm::rvsdg::theta_node & theta) +ConvertThetaNode(rvsdg::ThetaNode & theta) { jlm::rvsdg::substitution_map smap; @@ -54,7 +54,7 @@ ConvertThetaNodesInStructuralNode(jlm::rvsdg::structural_node & structuralNode) ConvertThetaNodesInRegion(*structuralNode.subregion(n)); } - if (auto thetaNode = dynamic_cast(&structuralNode)) + if (auto thetaNode = dynamic_cast(&structuralNode)) { ConvertThetaNode(*thetaNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.hpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.hpp index 0243cc490..61709faa9 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.hpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.hpp @@ -13,7 +13,7 @@ namespace jlm::hls { /** - * Converts every rvsdg::theta_node in \p rvsdgModule to an hls::loop_node. + * Converts every rvsdg::ThetaNode in \p rvsdgModule to an hls::loop_node. * * @param rvsdgModule The RVSDG module the transformation is performed on. */ diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 5e987f862..af6bfa25d 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -174,7 +174,7 @@ RemoveUnusedStatesFromGammaNode(rvsdg::GammaNode & gammaNode) } static void -RemoveUnusedStatesFromThetaNode(rvsdg::theta_node & thetaNode) +RemoveUnusedStatesFromThetaNode(rvsdg::ThetaNode & thetaNode) { auto thetaSubregion = thetaNode.subregion(); for (int i = thetaSubregion->narguments() - 1; i >= 0; --i) @@ -203,7 +203,7 @@ RemoveUnusedStatesInStructuralNode(rvsdg::structural_node & structuralNode) { RemoveUnusedStatesFromGammaNode(*gammaNode); } - else if (auto thetaNode = dynamic_cast(&structuralNode)) + else if (auto thetaNode = dynamic_cast(&structuralNode)) { RemoveUnusedStatesFromThetaNode(*thetaNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index cbbc3f4b2..20b9164a6 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -82,7 +82,7 @@ route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) gamma->add_entryvar(output); output = region->argument(region->narguments() - 1); } - else if (auto theta = dynamic_cast(region->node())) + else if (auto theta = dynamic_cast(region->node())) { output = theta->add_loopvar(output)->argument(); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 16a205412..1e04418f3 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -100,7 +100,7 @@ add_triggers(jlm::rvsdg::region * region) add_triggers(new_lambda->subregion()); } } - else if (auto t = dynamic_cast(node)) + else if (auto t = dynamic_cast(node)) { JLM_ASSERT(trigger != nullptr); JLM_ASSERT(get_trigger(t->subregion()) == nullptr); diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index fcc51bded..f9f295d93 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -83,7 +83,7 @@ hls::distribute_constants(rvsdg::region * region) { distribute_constants(ln->subregion()); } - else if (auto t = dynamic_cast(node)) + else if (auto t = dynamic_cast(node)) { distribute_constants(t->subregion()); } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 0bb5bd742..1358d17ea 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -119,7 +119,7 @@ route_through(jlm::rvsdg::region * target, jlm::rvsdg::output * response) } JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN"); } - else if (auto tn = dynamic_cast(target->node())) + else if (auto tn = dynamic_cast(target->node())) { auto lv = tn->add_loopvar(parent_response); parrent_user->divert_to(lv); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 963996b10..1fa750bfc 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -153,7 +153,7 @@ eliminate_gamma_eol(rvsdg::GammaNode * gamma) { // eliminates gammas that are only active at the end of the loop and have unused outputs // seems to be mostly loop variables - auto theta = dynamic_cast(gamma->region()->node()); + auto theta = dynamic_cast(gamma->region()->node()); if (!theta || theta->predicate()->origin() != gamma->predicate()->origin()) { return false; diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index b7179ac2a..d07b8f1b4 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -319,7 +319,7 @@ static void mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) { JLM_ASSERT(is(node)); - auto theta = static_cast(node); + auto theta = static_cast(node); /* mark loop variables */ for (size_t i1 = 0; i1 < theta->ninputs(); i1++) @@ -525,7 +525,7 @@ static void divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) { JLM_ASSERT(is(node)); - auto theta = static_cast(node); + auto theta = static_cast(node); auto subregion = node->subregion(0); for (const auto & lv : *theta) diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index ca1fdc574..9e36b8e1f 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -752,7 +752,7 @@ Convert( { auto & parentRegion = regionalizedVariableMap.GetTopRegion(); - auto theta = rvsdg::theta_node::create(&parentRegion); + auto theta = rvsdg::ThetaNode::create(&parentRegion); regionalizedVariableMap.PushRegion(*theta->subregion()); auto & thetaVariableMap = regionalizedVariableMap.GetTopVariableMap(); diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index d7054ec0b..859ce850c 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -348,7 +348,7 @@ DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) con }; auto sweepTheta = [](auto & d, auto & n) { - d.SweepTheta(*util::AssertedCast(&n)); + d.SweepTheta(*util::AssertedCast(&n)); }; auto sweepLambda = [](auto & d, auto & n) { @@ -427,7 +427,7 @@ DeadNodeElimination::SweepGamma(rvsdg::GammaNode & gammaNode) const } void -DeadNodeElimination::SweepTheta(jlm::rvsdg::theta_node & thetaNode) const +DeadNodeElimination::SweepTheta(rvsdg::ThetaNode & thetaNode) const { auto & thetaSubregion = *thetaNode.subregion(); diff --git a/jlm/llvm/opt/DeadNodeElimination.hpp b/jlm/llvm/opt/DeadNodeElimination.hpp index 1aa401744..998cb3c27 100644 --- a/jlm/llvm/opt/DeadNodeElimination.hpp +++ b/jlm/llvm/opt/DeadNodeElimination.hpp @@ -13,7 +13,7 @@ namespace jlm::rvsdg { class GammaNode; -class theta_node; +class ThetaNode; } namespace jlm::llvm @@ -97,7 +97,7 @@ class DeadNodeElimination final : public optimization SweepGamma(rvsdg::GammaNode & gammaNode) const; void - SweepTheta(rvsdg::theta_node & thetaNode) const; + SweepTheta(rvsdg::ThetaNode & thetaNode) const; void SweepLambda(lambda::node & lambdaNode) const; diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 63439ab3e..312cbb725 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -110,7 +110,7 @@ InvariantValueRedirection::RedirectInRegion(rvsdg::region & region) RedirectInSubregions(*gammaNode); RedirectGammaOutputs(*gammaNode); } - else if (auto thetaNode = dynamic_cast(&node)) + else if (auto thetaNode = dynamic_cast(&node)) { // Ensure we redirect invariant values of all nodes in the theta subregion first, otherwise we // might not be able to redirect some of the theta outputs. @@ -153,7 +153,7 @@ InvariantValueRedirection::RedirectGammaOutputs(rvsdg::GammaNode & gammaNode) } void -InvariantValueRedirection::RedirectThetaOutputs(rvsdg::theta_node & thetaNode) +InvariantValueRedirection::RedirectThetaOutputs(rvsdg::ThetaNode & thetaNode) { for (const auto & thetaOutput : thetaNode) { diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index 487eea662..72b144841 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -11,7 +11,7 @@ namespace jlm::rvsdg { class GammaNode; -class theta_node; +class ThetaNode; } namespace jlm::llvm @@ -70,7 +70,7 @@ class InvariantValueRedirection final : public optimization RedirectGammaOutputs(rvsdg::GammaNode & gammaNode); static void - RedirectThetaOutputs(rvsdg::theta_node & thetaNode); + RedirectThetaOutputs(rvsdg::ThetaNode & thetaNode); static void RedirectCallOutputs(CallNode & callNode); diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 4d7b065a5..6888c38ee 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -804,7 +804,7 @@ Andersen::AnalyzeStructuralNode(const rvsdg::structural_node & node) AnalyzePhi(*phiNode); else if (const auto gammaNode = dynamic_cast(&node)) AnalyzeGamma(*gammaNode); - else if (const auto thetaNode = dynamic_cast(&node)) + else if (const auto thetaNode = dynamic_cast(&node)) AnalyzeTheta(*thetaNode); else JLM_UNREACHABLE("Unknown structural node operation"); @@ -963,7 +963,7 @@ Andersen::AnalyzeGamma(const rvsdg::GammaNode & gamma) } void -Andersen::AnalyzeTheta(const rvsdg::theta_node & theta) +Andersen::AnalyzeTheta(const rvsdg::ThetaNode & theta) { // Create a PointerObject for each argument in the inner region // And make it point to a superset of the corresponding input register diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index e5bd942c3..7c01a329d 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -409,7 +409,7 @@ class Andersen final : public AliasAnalysis AnalyzeGamma(const rvsdg::GammaNode & node); void - AnalyzeTheta(const rvsdg::theta_node & node); + AnalyzeTheta(const rvsdg::ThetaNode & node); void AnalyzeRvsdg(const rvsdg::graph & graph); diff --git a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp index f3d1a1ccd..c67eab4fa 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp @@ -57,7 +57,7 @@ class MemoryNodeProvisioning } [[nodiscard]] virtual const jlm::util::HashSet & - GetThetaEntryExitNodes(const jlm::rvsdg::theta_node & thetaNode) const + GetThetaEntryExitNodes(const rvsdg::ThetaNode & thetaNode) const { auto & entryNodes = GetRegionEntryNodes(*thetaNode.subregion()); auto & exitNodes = GetRegionExitNodes(*thetaNode.subregion()); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 497767a16..5a40ea4fa 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -526,7 +526,7 @@ MemoryStateEncoder::EncodeStructuralNode(rvsdg::structural_node & structuralNode { EncodeGamma(*gammaNode); } - else if (auto thetaNode = dynamic_cast(&structuralNode)) + else if (auto thetaNode = dynamic_cast(&structuralNode)) { EncodeTheta(*thetaNode); } @@ -888,7 +888,7 @@ MemoryStateEncoder::EncodeGammaExit(rvsdg::GammaNode & gammaNode) } void -MemoryStateEncoder::EncodeTheta(rvsdg::theta_node & thetaNode) +MemoryStateEncoder::EncodeTheta(rvsdg::ThetaNode & thetaNode) { Context_->GetRegionalizedStateMap().PushRegion(*thetaNode.subregion()); @@ -900,7 +900,7 @@ MemoryStateEncoder::EncodeTheta(rvsdg::theta_node & thetaNode) } std::vector -MemoryStateEncoder::EncodeThetaEntry(rvsdg::theta_node & thetaNode) +MemoryStateEncoder::EncodeThetaEntry(rvsdg::ThetaNode & thetaNode) { auto region = thetaNode.region(); auto & stateMap = Context_->GetRegionalizedStateMap(); @@ -920,7 +920,7 @@ MemoryStateEncoder::EncodeThetaEntry(rvsdg::theta_node & thetaNode) void MemoryStateEncoder::EncodeThetaExit( - rvsdg::theta_node & thetaNode, + rvsdg::ThetaNode & thetaNode, const std::vector & thetaStateOutputs) { auto subregion = thetaNode.subregion(); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index bae1266dc..7db504307 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -143,14 +143,14 @@ class MemoryStateEncoder final EncodeGammaExit(rvsdg::GammaNode & gammaNode); void - EncodeTheta(rvsdg::theta_node & thetaNode); + EncodeTheta(rvsdg::ThetaNode & thetaNode); std::vector - EncodeThetaEntry(rvsdg::theta_node & thetaNode); + EncodeThetaEntry(rvsdg::ThetaNode & thetaNode); void EncodeThetaExit( - rvsdg::theta_node & thetaNode, + rvsdg::ThetaNode & thetaNode, const std::vector & thetaStateOutputs); /** diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 21db7c486..10ae6dd22 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -1652,7 +1652,7 @@ Steensgaard::AnalyzeGamma(const rvsdg::GammaNode & node) } void -Steensgaard::AnalyzeTheta(const jlm::rvsdg::theta_node & theta) +Steensgaard::AnalyzeTheta(const rvsdg::ThetaNode & theta) { for (auto thetaOutput : theta) { @@ -1696,7 +1696,7 @@ Steensgaard::AnalyzeStructuralNode(const jlm::rvsdg::structural_node & node) { AnalyzeGamma(*gammaNode); } - else if (auto thetaNode = dynamic_cast(&node)) + else if (auto thetaNode = dynamic_cast(&node)) { AnalyzeTheta(*thetaNode); } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index ee3456242..ce5aa6829 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -12,7 +12,7 @@ namespace jlm::rvsdg { class GammaNode; -class theta_node; +class ThetaNode; } namespace jlm::llvm::aa @@ -88,7 +88,7 @@ class Steensgaard final : public AliasAnalysis AnalyzeGamma(const rvsdg::GammaNode & node); void - AnalyzeTheta(const rvsdg::theta_node & node); + AnalyzeTheta(const rvsdg::ThetaNode & node); void AnalyzeSimpleNode(const rvsdg::simple_node & node); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index b08e21f37..9ce802a63 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -534,7 +534,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownStructuralNode( { EliminateTopDownGamma(*gammaNode); } - else if (auto thetaNode = dynamic_cast(&structuralNode)) + else if (auto thetaNode = dynamic_cast(&structuralNode)) { EliminateTopDownTheta(*thetaNode); } @@ -722,7 +722,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownGamma(const rvsdg::GammaNode & gamm } void -TopDownMemoryNodeEliminator::EliminateTopDownTheta(const rvsdg::theta_node & thetaNode) +TopDownMemoryNodeEliminator::EliminateTopDownTheta(const rvsdg::ThetaNode & thetaNode) { auto & thetaRegion = *thetaNode.region(); auto & thetaSubregion = *thetaNode.subregion(); @@ -946,7 +946,7 @@ TopDownMemoryNodeEliminator::CheckInvariants( collectRegionsAndCalls(*subregion, regions, callNodes); } } - else if (auto thetaNode = dynamic_cast(&node)) + else if (auto thetaNode = dynamic_cast(&node)) { auto subregion = thetaNode->subregion(); regions.push_back(subregion); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp index 7da9057df..05f3a5dd5 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp @@ -32,7 +32,7 @@ class node; class region; class simple_node; class structural_node; -class theta_node; +class ThetaNode; } namespace jlm::llvm::aa @@ -152,7 +152,7 @@ class TopDownMemoryNodeEliminator final : public MemoryNodeEliminator EliminateTopDownGamma(const rvsdg::GammaNode & gammaNode); void - EliminateTopDownTheta(const rvsdg::theta_node & thetaNode); + EliminateTopDownTheta(const rvsdg::ThetaNode & thetaNode); void EliminateTopDownSimpleNode(const rvsdg::simple_node & simpleNode); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 2f14333d7..030fc2cb8 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -303,7 +303,7 @@ static void mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) { JLM_ASSERT(is(node)); - auto theta = static_cast(node); + auto theta = static_cast(node); /* mark loop variables */ for (size_t i1 = 0; i1 < theta->ninputs(); i1++) @@ -486,7 +486,7 @@ static void divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) { JLM_ASSERT(is(node)); - auto theta = static_cast(node); + auto theta = static_cast(node); auto subregion = node->subregion(0); for (const auto & lv : *theta) diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 0dc8b47d7..80c16308a 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -76,7 +76,7 @@ route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) gamma->add_entryvar(output); output = region->argument(region->narguments() - 1); } - else if (auto theta = dynamic_cast(region->node())) + else if (auto theta = dynamic_cast(region->node())) { output = theta->add_loopvar(output)->argument(); } diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index cf78dc82e..9ffa21a3d 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -49,7 +49,7 @@ class ivtstat final : public util::Statistics }; static rvsdg::GammaNode * -is_applicable(const jlm::rvsdg::theta_node * theta) +is_applicable(const rvsdg::ThetaNode * theta) { auto matchnode = jlm::rvsdg::node_output::node(theta->predicate()->origin()); if (!jlm::rvsdg::is(matchnode)) @@ -74,7 +74,7 @@ is_applicable(const jlm::rvsdg::theta_node * theta) } static void -pullin(rvsdg::GammaNode * gamma, jlm::rvsdg::theta_node * theta) +pullin(rvsdg::GammaNode * gamma, rvsdg::ThetaNode * theta) { pullin_bottom(gamma); for (const auto & lv : *theta) @@ -137,7 +137,7 @@ to_structural_output(jlm::rvsdg::output * output) } static void -invert(jlm::rvsdg::theta_node * otheta) +invert(rvsdg::ThetaNode * otheta) { auto ogamma = is_applicable(otheta); if (!ogamma) @@ -190,7 +190,7 @@ invert(jlm::rvsdg::theta_node * otheta) /* handle subregion 1 */ jlm::rvsdg::substitution_map r1map; { - auto ntheta = jlm::rvsdg::theta_node::create(ngamma->subregion(1)); + auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); /* add loop variables to new theta node and setup substitution map */ auto osubregion0 = ogamma->subregion(0); @@ -296,7 +296,7 @@ invert(jlm::rvsdg::region * region) for (size_t r = 0; r < structnode->nsubregions(); r++) invert(structnode->subregion(r)); - if (auto theta = dynamic_cast(structnode)) + if (auto theta = dynamic_cast(structnode)) invert(theta); } } diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 463d01c01..b82f36478 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -130,7 +130,7 @@ copy_from_theta(jlm::rvsdg::node * node) JLM_ASSERT(node->depth() == 0); auto target = node->region()->node()->region(); - auto theta = static_cast(node->region()->node()); + auto theta = static_cast(node->region()->node()); std::vector operands; for (size_t n = 0; n < node->ninputs(); n++) @@ -237,7 +237,7 @@ is_theta_invariant( } void -push_top(jlm::rvsdg::theta_node * theta) +push_top(rvsdg::ThetaNode * theta) { auto subregion = theta->subregion(); @@ -336,7 +336,7 @@ pushout_store(jlm::rvsdg::node * storenode) { JLM_ASSERT(is(storenode->region()->node())); JLM_ASSERT(jlm::rvsdg::is(storenode) && is_movable_store(storenode)); - auto theta = static_cast(storenode->region()->node()); + auto theta = static_cast(storenode->region()->node()); auto storeop = static_cast(&storenode->operation()); auto oaddress = static_cast(storenode->input(0)->origin()); auto ovalue = storenode->input(1)->origin(); @@ -375,7 +375,7 @@ pushout_store(jlm::rvsdg::node * storenode) } void -push_bottom(jlm::rvsdg::theta_node * theta) +push_bottom(rvsdg::ThetaNode * theta) { for (const auto & lv : *theta) { @@ -389,7 +389,7 @@ push_bottom(jlm::rvsdg::theta_node * theta) } void -push(jlm::rvsdg::theta_node * theta) +push(rvsdg::ThetaNode * theta) { bool done = false; while (!done) @@ -416,7 +416,7 @@ push(jlm::rvsdg::region * region) if (auto gamma = dynamic_cast(node)) push(gamma); - if (auto theta = dynamic_cast(node)) + if (auto theta = dynamic_cast(node)) push(theta); } } diff --git a/jlm/llvm/opt/push.hpp b/jlm/llvm/opt/push.hpp index 0820cd279..b25aac191 100644 --- a/jlm/llvm/opt/push.hpp +++ b/jlm/llvm/opt/push.hpp @@ -11,7 +11,7 @@ namespace jlm::rvsdg { class GammaNode; -class theta_node; +class ThetaNode; } namespace jlm::llvm @@ -32,13 +32,13 @@ class pushout final : public optimization }; void -push_top(jlm::rvsdg::theta_node * theta); +push_top(rvsdg::ThetaNode * theta); void -push_bottom(jlm::rvsdg::theta_node * theta); +push_bottom(rvsdg::ThetaNode * theta); void -push(jlm::rvsdg::theta_node * theta); +push(rvsdg::ThetaNode * theta); void push(rvsdg::GammaNode * gamma); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 5e2e87242..e352f1dba 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -82,7 +82,7 @@ push_from_theta(jlm::rvsdg::output * output) auto tmp = jlm::rvsdg::node_output::node(output); JLM_ASSERT(jlm::rvsdg::is(tmp)); JLM_ASSERT(is(tmp->region()->node())); - auto theta = static_cast(tmp->region()->node()); + auto theta = static_cast(tmp->region()->node()); auto node = tmp->copy(theta->region(), {}); auto lv = theta->add_loopvar(node->output(0)); @@ -131,7 +131,7 @@ unrollinfo::niterations() const noexcept } std::unique_ptr -unrollinfo::create(jlm::rvsdg::theta_node * theta) +unrollinfo::create(rvsdg::ThetaNode * theta) { using namespace jlm::rvsdg; @@ -175,7 +175,7 @@ unrollinfo::create(jlm::rvsdg::theta_node * theta) static void unroll_body( - const jlm::rvsdg::theta_node * theta, + const rvsdg::ThetaNode * theta, jlm::rvsdg::region * target, jlm::rvsdg::substitution_map & smap, size_t factor) @@ -197,7 +197,7 @@ unroll_body( The theta itself is not deleted. */ static void -copy_body_and_unroll(const jlm::rvsdg::theta_node * theta, size_t factor) +copy_body_and_unroll(const rvsdg::ThetaNode * theta, size_t factor) { jlm::rvsdg::substitution_map smap; for (const auto & olv : *theta) @@ -217,7 +217,7 @@ unroll_theta(const unrollinfo & ui, jlm::rvsdg::substitution_map & smap, size_t { auto theta = ui.theta(); auto remainder = ui.remainder(factor); - auto unrolled_theta = jlm::rvsdg::theta_node::create(theta->region()); + auto unrolled_theta = rvsdg::ThetaNode::create(theta->region()); for (const auto & olv : *theta) { @@ -400,7 +400,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) { auto pred = create_unrolled_gamma_predicate(ui, factor); auto ngamma = rvsdg::GammaNode::create(pred, 2); - auto ntheta = jlm::rvsdg::theta_node::create(ngamma->subregion(1)); + auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); jlm::rvsdg::substitution_map rmap[2]; for (const auto & olv : *otheta) @@ -433,7 +433,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) { auto pred = create_residual_gamma_predicate(smap, ui); auto ngamma = rvsdg::GammaNode::create(pred, 2); - auto ntheta = jlm::rvsdg::theta_node::create(ngamma->subregion(1)); + auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); jlm::rvsdg::substitution_map rmap[2]; for (const auto & olv : *otheta) @@ -461,7 +461,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) } void -unroll(jlm::rvsdg::theta_node * otheta, size_t factor) +unroll(rvsdg::ThetaNode * otheta, size_t factor) { if (factor < 2) return; @@ -495,7 +495,7 @@ unroll(jlm::rvsdg::region * region, size_t factor) /* Try to unroll if an inner loop hasn't already been found */ if (!unrolled) { - if (auto theta = dynamic_cast(node)) + if (auto theta = dynamic_cast(node)) { unroll(theta, factor); unrolled = true; diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index c561d9670..677dc7a98 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -74,12 +74,12 @@ class unrollinfo final unrollinfo & operator=(unrollinfo &&) = delete; - inline jlm::rvsdg::theta_node * + inline rvsdg::ThetaNode * theta() const noexcept { auto node = idv()->region()->node(); JLM_ASSERT(is(node)); - return static_cast(node); + return static_cast(node); } inline bool @@ -201,7 +201,7 @@ class unrollinfo final } static std::unique_ptr - create(jlm::rvsdg::theta_node * theta); + create(rvsdg::ThetaNode * theta); private: inline bool @@ -240,7 +240,7 @@ class unrollinfo final * body is duplicated in the unrolled loop. */ void -unroll(jlm::rvsdg::theta_node * node, size_t factor); +unroll(rvsdg::ThetaNode * node, size_t factor); } diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 0459b956f..4cf029dff 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -26,7 +26,7 @@ ThetaOperation::copy() const return std::unique_ptr(new ThetaOperation(*this)); } -theta_node::theta_node(rvsdg::region & parent) +ThetaNode::ThetaNode(rvsdg::region & parent) : structural_node(ThetaOperation(), &parent, 1) { auto predicate = control_false(subregion()); @@ -78,11 +78,10 @@ ThetaPredicateResult::Copy(rvsdg::output & origin, structural_output * output) /* theta node */ -theta_node::~theta_node() -{} +ThetaNode::~ThetaNode() noexcept = default; -const theta_node::loopvar_iterator & -theta_node::loopvar_iterator::operator++() noexcept +const ThetaNode::loopvar_iterator & +ThetaNode::loopvar_iterator::operator++() noexcept { if (output_ == nullptr) return *this; @@ -101,13 +100,13 @@ theta_node::loopvar_iterator::operator++() noexcept } jlm::rvsdg::theta_output * -theta_node::add_loopvar(jlm::rvsdg::output * origin) +ThetaNode::add_loopvar(jlm::rvsdg::output * origin) { node::add_input(std::make_unique(this, origin, origin->Type())); node::add_output(std::make_unique(this, origin->Type())); - auto input = theta_node::input(ninputs() - 1); - auto output = theta_node::output(noutputs() - 1); + auto input = ThetaNode::input(ninputs() - 1); + auto output = ThetaNode::output(noutputs() - 1); input->output_ = output; output->input_ = input; @@ -116,8 +115,8 @@ theta_node::add_loopvar(jlm::rvsdg::output * origin) return output; } -jlm::rvsdg::theta_node * -theta_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +ThetaNode * +ThetaNode::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const { auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index c179714c1..3e1b65382 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -27,12 +27,10 @@ class ThetaOperation final : public structural_op copy() const override; }; -/* theta node */ - class theta_input; class theta_output; -class theta_node final : public structural_node +class ThetaNode final : public structural_node { public: class loopvar_iterator @@ -87,16 +85,16 @@ class theta_node final : public structural_node jlm::rvsdg::theta_output * output_; }; - virtual ~theta_node(); + ~ThetaNode() noexcept override; private: - explicit theta_node(rvsdg::region & parent); + explicit ThetaNode(rvsdg::region & parent); public: - static jlm::rvsdg::theta_node * + static ThetaNode * create(jlm::rvsdg::region * parent) { - return new theta_node(*parent); + return new ThetaNode(*parent); } inline jlm::rvsdg::region * @@ -130,7 +128,7 @@ class theta_node final : public structural_node return ninputs(); } - inline theta_node::loopvar_iterator + inline ThetaNode::loopvar_iterator begin() const { if (ninputs() == 0) @@ -139,7 +137,7 @@ class theta_node final : public structural_node return loopvar_iterator(output(0)); } - inline theta_node::loopvar_iterator + inline ThetaNode::loopvar_iterator end() const { return loopvar_iterator(nullptr); @@ -245,7 +243,7 @@ class theta_node final : public structural_node jlm::rvsdg::theta_output * add_loopvar(jlm::rvsdg::output * origin); - virtual jlm::rvsdg::theta_node * + virtual ThetaNode * copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; }; @@ -253,24 +251,24 @@ class theta_node final : public structural_node class theta_input final : public structural_input { - friend theta_node; + friend ThetaNode; friend theta_output; public: virtual ~theta_input() noexcept; inline theta_input( - theta_node * node, + ThetaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : structural_input(node, origin, std::move(type)), output_(nullptr) {} - theta_node * + ThetaNode * node() const noexcept { - return static_cast(structural_input::node()); + return static_cast(structural_input::node()); } inline jlm::rvsdg::theta_output * @@ -303,21 +301,21 @@ is_invariant(const jlm::rvsdg::theta_input * input) noexcept class theta_output final : public structural_output { - friend theta_node; + friend ThetaNode; friend theta_input; public: virtual ~theta_output() noexcept; - inline theta_output(theta_node * node, const std::shared_ptr type) + inline theta_output(ThetaNode * node, const std::shared_ptr type) : structural_output(node, std::move(type)), input_(nullptr) {} - theta_node * + ThetaNode * node() const noexcept { - return static_cast(structural_output::node()); + return static_cast(structural_output::node()); } inline jlm::rvsdg::theta_input * @@ -348,7 +346,7 @@ class theta_output final : public structural_output */ class ThetaArgument final : public RegionArgument { - friend theta_node; + friend ThetaNode; public: ~ThetaArgument() noexcept override; @@ -377,7 +375,7 @@ class ThetaArgument final : public RegionArgument */ class ThetaResult final : public RegionResult { - friend theta_node; + friend ThetaNode; public: ~ThetaResult() noexcept override; @@ -406,7 +404,7 @@ class ThetaResult final : public RegionResult */ class ThetaPredicateResult final : public RegionResult { - friend theta_node; + friend ThetaNode; public: ~ThetaPredicateResult() noexcept override; @@ -439,20 +437,20 @@ is_invariant(const jlm::rvsdg::theta_output * output) noexcept /* theta node method definitions */ inline jlm::rvsdg::theta_input * -theta_node::input(size_t index) const noexcept +ThetaNode::input(size_t index) const noexcept { return static_cast(node::input(index)); } inline jlm::rvsdg::theta_output * -theta_node::output(size_t index) const noexcept +ThetaNode::output(size_t index) const noexcept { return static_cast(node::output(index)); } template util::HashSet -theta_node::RemoveThetaOutputsWhere(const F & match) +ThetaNode::RemoveThetaOutputsWhere(const F & match) { util::HashSet deadInputs; @@ -475,7 +473,7 @@ theta_node::RemoveThetaOutputsWhere(const F & match) template util::HashSet -theta_node::RemoveThetaInputsWhere(const F & match) +ThetaNode::RemoveThetaInputsWhere(const F & match) { util::HashSet deadOutputs; diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index e0015f461..29c769335 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1559,7 +1559,7 @@ ThetaTest::SetupRvsdg() auto zero = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 0); - auto thetanode = jlm::rvsdg::theta_node::create(fct->subregion()); + auto thetanode = jlm::rvsdg::ThetaNode::create(fct->subregion()); auto n = thetanode->add_loopvar(zero); auto l = thetanode->add_loopvar(fct->fctargument(0)); diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index 557f42f7b..ce3bfca67 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -1113,7 +1113,7 @@ class ThetaTest final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::theta_node * theta; + jlm::rvsdg::ThetaNode * theta; jlm::rvsdg::node * gep; }; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index b16296120..1cd017666 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -30,7 +30,7 @@ TestUnknownBoundaries() jlm::rvsdg::bitadd_op add(32); jlm::rvsdg::bitsub_op sub(32); - auto theta = jlm::rvsdg::theta_node::create(lambda->subregion()); + auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); auto subregion = theta->subregion(); auto idv = theta->add_loopvar(lambda->fctargument(0)); auto lvs = theta->add_loopvar(lambda->fctargument(1)); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 750d47269..70d134525 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -89,7 +89,7 @@ TestTheta() auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); - auto thetaNode = jlm::rvsdg::theta_node::create(rvsdg.root()); + auto thetaNode = jlm::rvsdg::ThetaNode::create(rvsdg.root()); auto thetaOutput0 = thetaNode->add_loopvar(p); auto thetaOutput1 = thetaNode->add_loopvar(x); diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 20c041d99..db730e2a4 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -190,10 +190,10 @@ TestCallTypeClassifierNonRecursiveDirectCall() { auto SetupOuterTheta = [](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * functionG) { - auto outerTheta = jlm::rvsdg::theta_node::create(region); + auto outerTheta = jlm::rvsdg::ThetaNode::create(region); auto otf = outerTheta->add_loopvar(functionG); - auto innerTheta = jlm::rvsdg::theta_node::create(outerTheta->subregion()); + auto innerTheta = jlm::rvsdg::ThetaNode::create(outerTheta->subregion()); auto itf = innerTheta->add_loopvar(otf->argument()); auto predicate = jlm::rvsdg::control_false(innerTheta->subregion()); @@ -289,13 +289,13 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() { auto SetupInnerTheta = [&](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * g) { - auto innerTheta = jlm::rvsdg::theta_node::create(region); + auto innerTheta = jlm::rvsdg::ThetaNode::create(region); auto thetaOutputG = innerTheta->add_loopvar(g); return thetaOutputG; }; - auto outerTheta = jlm::rvsdg::theta_node::create(region); + auto outerTheta = jlm::rvsdg::ThetaNode::create(region); auto thetaOutputG = outerTheta->add_loopvar(g); auto thetaOutputValue = outerTheta->add_loopvar(value); auto thetaOutputIoState = outerTheta->add_loopvar(iOState); diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index e6e1c0406..c2c64fa0b 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -103,12 +103,12 @@ TestTheta() auto x = lambdaNode->fctargument(1); auto l = lambdaNode->fctargument(2); - auto thetaNode1 = jlm::rvsdg::theta_node::create(lambdaNode->subregion()); + auto thetaNode1 = jlm::rvsdg::ThetaNode::create(lambdaNode->subregion()); auto thetaOutput1 = thetaNode1->add_loopvar(c); auto thetaOutput2 = thetaNode1->add_loopvar(x); auto thetaOutput3 = thetaNode1->add_loopvar(l); - auto thetaNode2 = jlm::rvsdg::theta_node::create(thetaNode1->subregion()); + auto thetaNode2 = jlm::rvsdg::ThetaNode::create(thetaNode1->subregion()); auto thetaOutput4 = thetaNode2->add_loopvar(thetaOutput1->argument()); thetaNode2->add_loopvar(thetaOutput2->argument()); auto thetaOutput5 = thetaNode2->add_loopvar(thetaOutput3->argument()); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 94a8d1e6c..6a8bbecd0 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -128,7 +128,7 @@ TestTheta() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lv1 = theta->add_loopvar(x); auto lv2 = theta->add_loopvar(y); @@ -171,13 +171,13 @@ TestNestedTheta() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto otheta = jlm::rvsdg::theta_node::create(graph.root()); + auto otheta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lvo1 = otheta->add_loopvar(c); auto lvo2 = otheta->add_loopvar(x); auto lvo3 = otheta->add_loopvar(y); - auto itheta = jlm::rvsdg::theta_node::create(otheta->subregion()); + auto itheta = jlm::rvsdg::ThetaNode::create(otheta->subregion()); auto lvi1 = itheta->add_loopvar(lvo1->argument()); auto lvi2 = itheta->add_loopvar(lvo2->argument()); @@ -217,7 +217,7 @@ TestEvolvingTheta() auto x3 = &jlm::tests::GraphImport::Create(graph, vt, "x3"); auto x4 = &jlm::tests::GraphImport::Create(graph, vt, "x4"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lv0 = theta->add_loopvar(c); auto lv1 = theta->add_loopvar(x1); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 1e4ae15c6..a3b09cc20 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -145,7 +145,7 @@ test_theta() auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto region = theta->subregion(); auto lv1 = theta->add_loopvar(c); @@ -198,7 +198,7 @@ test_theta2() auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto region = theta->subregion(); auto lv1 = theta->add_loopvar(c); @@ -242,7 +242,7 @@ test_theta3() auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta1 = jlm::rvsdg::theta_node::create(graph.root()); + auto theta1 = jlm::rvsdg::ThetaNode::create(graph.root()); auto r1 = theta1->subregion(); auto lv1 = theta1->add_loopvar(c); @@ -250,7 +250,7 @@ test_theta3() auto lv3 = theta1->add_loopvar(x); auto lv4 = theta1->add_loopvar(x); - auto theta2 = jlm::rvsdg::theta_node::create(r1); + auto theta2 = jlm::rvsdg::ThetaNode::create(r1); auto r2 = theta2->subregion(); auto p = theta2->add_loopvar(lv1->argument()); theta2->add_loopvar(lv2->argument()); @@ -302,7 +302,7 @@ test_theta4() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto region = theta->subregion(); auto lv1 = theta->add_loopvar(c); @@ -355,7 +355,7 @@ test_theta5() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto region = theta->subregion(); auto lv0 = theta->add_loopvar(c); diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index 05dc778e9..008dc6da9 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -29,7 +29,7 @@ test1() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lvx = theta->add_loopvar(x); auto lvy = theta->add_loopvar(y); @@ -85,7 +85,7 @@ test2() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lv1 = theta->add_loopvar(x); diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index a9cda2d75..191b42aa5 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -72,7 +72,7 @@ test_theta() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto s = &jlm::tests::GraphImport::Create(graph, st, "s"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lv1 = theta->add_loopvar(c); auto lv2 = theta->add_loopvar(x); @@ -117,7 +117,7 @@ test_push_theta_bottom() auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lvc = theta->add_loopvar(c); auto lva = theta->add_loopvar(a); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 827ba2180..c614b7eb0 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -33,7 +33,7 @@ nthetas(jlm::rvsdg::region * region) return n; } -static jlm::rvsdg::theta_node * +static jlm::rvsdg::ThetaNode * create_theta( const jlm::rvsdg::bitcompare_op & cop, const jlm::rvsdg::bitbinary_op & aop, @@ -45,7 +45,7 @@ create_theta( auto graph = init->region()->graph(); - auto theta = theta_node::create(graph->root()); + auto theta = ThetaNode::create(graph->root()); auto subregion = theta->subregion(); auto idv = theta->add_loopvar(init); auto lvs = theta->add_loopvar(step); @@ -242,7 +242,7 @@ test_unknown_boundaries() auto x = &jlm::tests::GraphImport::Create(graph, bt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, bt, "y"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lv1 = theta->add_loopvar(x); auto lv2 = theta->add_loopvar(y); @@ -273,13 +273,13 @@ test_unknown_boundaries() // jlm::rvsdg::view(graph, stdout); } -static std::vector +static std::vector find_thetas(jlm::rvsdg::region * region) { - std::vector thetas; + std::vector thetas; for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto theta = dynamic_cast(node)) + if (auto theta = dynamic_cast(node)) thetas.push_back(theta); } @@ -300,7 +300,7 @@ test_nested_theta() auto end = jlm::rvsdg::create_bitconstant(graph.root(), 32, 97); /* Outer loop */ - auto otheta = jlm::rvsdg::theta_node::create(graph.root()); + auto otheta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lvo_init = otheta->add_loopvar(init); auto lvo_step = otheta->add_loopvar(step); @@ -313,7 +313,7 @@ test_nested_theta() lvo_init->result()->divert_to(add); /* First inner loop in the original loop */ - auto inner_theta = jlm::rvsdg::theta_node::create(otheta->subregion()); + auto inner_theta = jlm::rvsdg::ThetaNode::create(otheta->subregion()); auto inner_init = jlm::rvsdg::create_bitconstant(otheta->subregion(), 32, 0); auto lvi_init = inner_theta->add_loopvar(inner_init); @@ -327,7 +327,7 @@ test_nested_theta() lvi_init->result()->divert_to(inner_add); /* Nested inner loop */ - auto inner_nested_theta = jlm::rvsdg::theta_node::create(inner_theta->subregion()); + auto inner_nested_theta = jlm::rvsdg::ThetaNode::create(inner_theta->subregion()); auto inner_nested_init = jlm::rvsdg::create_bitconstant(inner_theta->subregion(), 32, 0); auto lvi_nested_init = inner_nested_theta->add_loopvar(inner_nested_init); @@ -343,7 +343,7 @@ test_nested_theta() lvi_nested_init->result()->divert_to(inner_nested_add); /* Second inner loop in the original loop */ - auto inner2_theta = jlm::rvsdg::theta_node::create(otheta->subregion()); + auto inner2_theta = jlm::rvsdg::ThetaNode::create(otheta->subregion()); auto inner2_init = jlm::rvsdg::create_bitconstant(otheta->subregion(), 32, 0); auto lvi2_init = inner2_theta->add_loopvar(inner2_init); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index 1433a4c62..20340ecca 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -22,7 +22,7 @@ TestThetaCreation() auto imp2 = &jlm::tests::GraphImport::Create(graph, t, "imp2"); auto imp3 = &jlm::tests::GraphImport::Create(graph, t, "imp3"); - auto theta = jlm::rvsdg::theta_node::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); auto lv1 = theta->add_loopvar(imp1); auto lv2 = theta->add_loopvar(imp2); @@ -45,7 +45,7 @@ TestThetaCreation() assert(theta->nloopvars() == 3); assert((*theta->begin())->result() == theta->subregion()->result(1)); - assert(dynamic_cast(theta2)); + assert(dynamic_cast(theta2)); } static void @@ -61,7 +61,7 @@ TestRemoveThetaOutputsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = theta_node::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(rvsdg.root()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); auto thetaOutput1 = thetaNode->add_loopvar(x); @@ -111,7 +111,7 @@ TestPruneThetaOutputs() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = theta_node::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(rvsdg.root()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); thetaNode->add_loopvar(x); @@ -146,7 +146,7 @@ TestRemoveThetaInputsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = theta_node::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(rvsdg.root()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); auto thetaOutput1 = thetaNode->add_loopvar(x); @@ -202,7 +202,7 @@ TestPruneThetaInputs() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = theta_node::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(rvsdg.root()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); auto thetaOutput1 = thetaNode->add_loopvar(x); From ebce5fdf1396d644e5a086cd638876850e463756 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 15 Sep 2024 16:21:30 +0200 Subject: [PATCH 082/170] Rename theta_input class to ThetaInput (#624) --- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/llvm/ir/operators/call.cpp | 4 +- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +- jlm/llvm/opt/unroll.cpp | 4 +- jlm/rvsdg/theta.cpp | 8 ++-- jlm/rvsdg/theta.hpp | 45 +++++++++---------- tests/jlm/rvsdg/test-theta.cpp | 4 +- 9 files changed, 34 insertions(+), 41 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index f9f295d93..30672e96e 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -24,7 +24,7 @@ distribute_constant(const rvsdg::simple_op & op, rvsdg::simple_output * out) changed = false; for (auto user : *out) { - if (auto ti = dynamic_cast(user)) + if (auto ti = dynamic_cast(user)) { auto arg = ti->argument(); auto res = ti->result(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 1358d17ea..2cb56ead7 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -211,7 +211,7 @@ trace_edge( common_edge = subres->output(); } } - else if (auto ti = dynamic_cast(user)) + else if (auto ti = dynamic_cast(user)) { auto tn = ti->node(); auto lv = tn->add_loopvar(new_edge); diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index ee353ef4f..cb2a804f7 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -62,7 +62,7 @@ invariantInput(const rvsdg::GammaOutput & output, InvariantOutputMap & invariant return nullptr; } -static rvsdg::theta_input * +static rvsdg::ThetaInput * invariantInput(const rvsdg::theta_output & output, InvariantOutputMap & invariantOutputs) { auto origin = output.result()->origin(); @@ -102,7 +102,7 @@ invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutpu if (auto thetaArgument = dynamic_cast(&output)) { - auto thetaInput = static_cast(thetaArgument->input()); + auto thetaInput = static_cast(thetaArgument->input()); return invariantInput(*thetaInput->output(), invariantOutputs); } diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index f0c1eba7e..24f78559c 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -322,7 +322,7 @@ node::ComputeCallSummary() const continue; } - if (auto theta_input = dynamic_cast(input)) + if (auto theta_input = dynamic_cast(input)) { auto argument = theta_input->argument(); worklist.insert(worklist.end(), argument->begin(), argument->end()); diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 859ce850c..7011fb2e1 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -224,7 +224,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) if (auto thetaArgument = dynamic_cast(&output)) { - auto thetaInput = util::AssertedCast(thetaArgument->input()); + auto thetaInput = util::AssertedCast(thetaArgument->input()); MarkOutput(*thetaInput->output()); MarkOutput(*thetaInput->origin()); return; @@ -440,7 +440,7 @@ DeadNodeElimination::SweepTheta(rvsdg::ThetaNode & thetaNode) const SweepRegion(thetaSubregion); - auto matchInput = [&](const rvsdg::theta_input & input) + auto matchInput = [&](const rvsdg::ThetaInput & input) { return deadInputs.Contains(&input); }; diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index e352f1dba..65dd373b3 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -69,7 +69,7 @@ is_theta_invariant(const jlm::rvsdg::output * output) if (!argument) return false; - return is_invariant(static_cast(argument->input())); + return is_invariant(static_cast(argument->input())); } static rvsdg::RegionArgument * @@ -103,7 +103,7 @@ is_idv(jlm::rvsdg::input * input) if (!a) return false; - auto tinput = static_cast(a->input()); + auto tinput = static_cast(a->input()); return jlm::rvsdg::node_output::node(tinput->result()->origin()) == node; } diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 4cf029dff..fa07cf9bb 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -33,9 +33,7 @@ ThetaNode::ThetaNode(rvsdg::region & parent) ThetaPredicateResult::Create(*predicate); } -/* theta input */ - -theta_input::~theta_input() noexcept +ThetaInput::~ThetaInput() noexcept { if (output_) output_->input_ = nullptr; @@ -54,7 +52,7 @@ ThetaArgument::~ThetaArgument() noexcept = default; ThetaArgument & ThetaArgument::Copy(rvsdg::region & region, structural_input * input) { - auto thetaInput = util::AssertedCast(input); + auto thetaInput = util::AssertedCast(input); return ThetaArgument::Create(region, *thetaInput); } @@ -102,7 +100,7 @@ ThetaNode::loopvar_iterator::operator++() noexcept jlm::rvsdg::theta_output * ThetaNode::add_loopvar(jlm::rvsdg::output * origin) { - node::add_input(std::make_unique(this, origin, origin->Type())); + node::add_input(std::make_unique(this, origin, origin->Type())); node::add_output(std::make_unique(this, origin->Type())); auto input = ThetaNode::input(ninputs() - 1); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 3e1b65382..b0159f419 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -27,7 +27,7 @@ class ThetaOperation final : public structural_op copy() const override; }; -class theta_input; +class ThetaInput; class theta_output; class ThetaNode final : public structural_node @@ -161,7 +161,7 @@ class ThetaNode final : public structural_node * \see theta_output#IsDead() */ template - util::HashSet + util::HashSet RemoveThetaOutputsWhere(const F & match); /** @@ -177,7 +177,7 @@ class ThetaNode final : public structural_node * \see RemoveThetaOutputsWhere() * \see theta_output#IsDead() */ - util::HashSet + util::HashSet PruneThetaOutputs() { auto match = [](const theta_output &) @@ -194,7 +194,7 @@ class ThetaNode final : public structural_node * An input must match the condition specified by \p match and its respective argument must be * dead. * - * @tparam F A type that supports the function call operator: bool operator(const theta_input&) + * @tparam F A type that supports the function call operator: bool operator(const ThetaInput&) * @param match Defines the condition of the elements to remove. * @return The outputs corresponding to the removed outputs. * @@ -226,7 +226,7 @@ class ThetaNode final : public structural_node util::HashSet PruneThetaInputs() { - auto match = [](const theta_input &) + auto match = [](const ThetaInput &) { return true; }; @@ -234,7 +234,7 @@ class ThetaNode final : public structural_node return RemoveThetaInputsWhere(match); } - theta_input * + ThetaInput * input(size_t index) const noexcept; theta_output * @@ -247,20 +247,15 @@ class ThetaNode final : public structural_node copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; }; -/* theta input */ - -class theta_input final : public structural_input +class ThetaInput final : public structural_input { friend ThetaNode; friend theta_output; public: - virtual ~theta_input() noexcept; + ~ThetaInput() noexcept override; - inline theta_input( - ThetaNode * node, - jlm::rvsdg::output * origin, - std::shared_ptr type) + ThetaInput(ThetaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : structural_input(node, origin, std::move(type)), output_(nullptr) {} @@ -292,7 +287,7 @@ class theta_input final : public structural_input }; static inline bool -is_invariant(const jlm::rvsdg::theta_input * input) noexcept +is_invariant(const ThetaInput * input) noexcept { return input->result()->origin() == input->argument(); } @@ -302,7 +297,7 @@ is_invariant(const jlm::rvsdg::theta_input * input) noexcept class theta_output final : public structural_output { friend ThetaNode; - friend theta_input; + friend ThetaInput; public: virtual ~theta_output() noexcept; @@ -318,7 +313,7 @@ class theta_output final : public structural_output return static_cast(structural_output::node()); } - inline jlm::rvsdg::theta_input * + [[nodiscard]] ThetaInput * input() const noexcept { return input_; @@ -338,7 +333,7 @@ class theta_output final : public structural_output } private: - jlm::rvsdg::theta_input * input_; + ThetaInput * input_; }; /** @@ -355,14 +350,14 @@ class ThetaArgument final : public RegionArgument Copy(rvsdg::region & region, structural_input * input) override; private: - ThetaArgument(rvsdg::region & region, theta_input & input) + ThetaArgument(rvsdg::region & region, ThetaInput & input) : RegionArgument(®ion, &input, input.Type()) { JLM_ASSERT(is(region.node())); } static ThetaArgument & - Create(rvsdg::region & region, theta_input & input) + Create(rvsdg::region & region, ThetaInput & input) { auto thetaArgument = new ThetaArgument(region, input); region.append_argument(thetaArgument); @@ -436,10 +431,10 @@ is_invariant(const jlm::rvsdg::theta_output * output) noexcept /* theta node method definitions */ -inline jlm::rvsdg::theta_input * +inline ThetaInput * ThetaNode::input(size_t index) const noexcept { - return static_cast(node::input(index)); + return static_cast(node::input(index)); } inline jlm::rvsdg::theta_output * @@ -449,10 +444,10 @@ ThetaNode::output(size_t index) const noexcept } template -util::HashSet +util::HashSet ThetaNode::RemoveThetaOutputsWhere(const F & match) { - util::HashSet deadInputs; + util::HashSet deadInputs; // iterate backwards to avoid the invalidation of 'n' by RemoveOutput() for (size_t n = noutputs() - 1; n != static_cast(-1); n--) @@ -497,7 +492,7 @@ ThetaNode::RemoveThetaInputsWhere(const F & match) /* theta input method definitions */ [[nodiscard]] inline RegionResult * -theta_input::result() const noexcept +ThetaInput::result() const noexcept { return output_->result(); } diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index 20340ecca..e463c02de 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -163,7 +163,7 @@ TestRemoveThetaInputsWhere() // Act & Assert auto deadOutputs = thetaNode->RemoveThetaInputsWhere( - [&](const theta_input & input) + [&](const ThetaInput & input) { return input.index() == thetaOutput1->input()->index(); }); @@ -177,7 +177,7 @@ TestRemoveThetaInputsWhere() assert(thetaOutput2->argument()->index() == 1); deadOutputs = thetaNode->RemoveThetaInputsWhere( - [](const theta_input & input) + [](const ThetaInput & input) { return true; }); From f78700b22cc7c63568f128e0c48fcfda2243a941 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 15 Sep 2024 23:00:44 +0200 Subject: [PATCH 083/170] Rename theta_output class to ThetaOutput (#625) --- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 2 +- jlm/hls/util/view.cpp | 2 +- .../InterProceduralGraphConversion.cpp | 2 +- jlm/llvm/ir/operators/call.cpp | 6 +- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 6 +- .../opt/alias-analyses/MemoryStateEncoder.hpp | 4 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 2 +- jlm/rvsdg/theta.cpp | 8 +-- jlm/rvsdg/theta.hpp | 56 +++++++++---------- .../alias-analyses/TestMemoryStateEncoder.cpp | 6 +- tests/jlm/rvsdg/test-theta.cpp | 4 +- 13 files changed, 51 insertions(+), 53 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 93e2f91e0..c4468521f 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -113,7 +113,7 @@ trace_call(jlm::rvsdg::input * input) auto argument = dynamic_cast(input->origin()); const jlm::rvsdg::output * result; - if (auto to = dynamic_cast(input->origin())) + if (auto to = dynamic_cast(input->origin())) { result = trace_call(to->input()); } diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 270862b69..56e6d3942 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -367,7 +367,7 @@ region_to_dot(jlm::rvsdg::region * region) { dot << edge(be->argument(), be, true); } - else if (auto to = dynamic_cast(region->result(i)->output())) + else if (auto to = dynamic_cast(region->result(i)->output())) { dot << edge(to->argument(), to->result(), true); } diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 9e36b8e1f..67504c209 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -763,7 +763,7 @@ Convert( * Add loop variables */ auto & demandSet = demandMap.Lookup(loopAggregationNode); - std::unordered_map thetaOutputMap; + std::unordered_map thetaOutputMap; for (auto & v : demandSet.LoopVariables().Variables()) { rvsdg::output * value = nullptr; diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index cb2a804f7..8bf4ac82e 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -63,7 +63,7 @@ invariantInput(const rvsdg::GammaOutput & output, InvariantOutputMap & invariant } static rvsdg::ThetaInput * -invariantInput(const rvsdg::theta_output & output, InvariantOutputMap & invariantOutputs) +invariantInput(const rvsdg::ThetaOutput & output, InvariantOutputMap & invariantOutputs) { auto origin = output.result()->origin(); @@ -97,7 +97,7 @@ invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutpu if (invariantOutputs.find(&output) != invariantOutputs.end()) return invariantOutputs[&output]; - if (auto thetaOutput = dynamic_cast(&output)) + if (auto thetaOutput = dynamic_cast(&output)) return invariantInput(*thetaOutput, invariantOutputs); if (auto thetaArgument = dynamic_cast(&output)) @@ -198,7 +198,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) continue; } - if (auto thetaOutput = dynamic_cast(origin)) + if (auto thetaOutput = dynamic_cast(origin)) { if (auto input = invariantInput(*thetaOutput)) { diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 7011fb2e1..1bc58ed22 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -214,7 +214,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto thetaOutput = dynamic_cast(&output)) + if (auto thetaOutput = dynamic_cast(&output)) { MarkOutput(*thetaOutput->node()->predicate()->origin()); MarkOutput(*thetaOutput->result()->origin()); @@ -431,7 +431,7 @@ DeadNodeElimination::SweepTheta(rvsdg::ThetaNode & thetaNode) const { auto & thetaSubregion = *thetaNode.subregion(); - auto matchOutput = [&](const rvsdg::theta_output & output) + auto matchOutput = [&](const rvsdg::ThetaOutput & output) { auto & argument = *output.argument(); return !Context_->IsAlive(argument) && !Context_->IsAlive(output); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 5a40ea4fa..1a6398e77 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -899,14 +899,14 @@ MemoryStateEncoder::EncodeTheta(rvsdg::ThetaNode & thetaNode) Context_->GetRegionalizedStateMap().PopRegion(*thetaNode.subregion()); } -std::vector +std::vector MemoryStateEncoder::EncodeThetaEntry(rvsdg::ThetaNode & thetaNode) { auto region = thetaNode.region(); auto & stateMap = Context_->GetRegionalizedStateMap(); auto & memoryNodes = Context_->GetMemoryNodeProvisioning().GetThetaEntryExitNodes(thetaNode); - std::vector thetaStateOutputs; + std::vector thetaStateOutputs; auto memoryNodeStatePairs = stateMap.GetStates(*region, memoryNodes); for (auto & memoryNodeStatePair : memoryNodeStatePairs) { @@ -921,7 +921,7 @@ MemoryStateEncoder::EncodeThetaEntry(rvsdg::ThetaNode & thetaNode) void MemoryStateEncoder::EncodeThetaExit( rvsdg::ThetaNode & thetaNode, - const std::vector & thetaStateOutputs) + const std::vector & thetaStateOutputs) { auto subregion = thetaNode.subregion(); auto & stateMap = Context_->GetRegionalizedStateMap(); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index 7db504307..6daa04217 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -145,13 +145,13 @@ class MemoryStateEncoder final void EncodeTheta(rvsdg::ThetaNode & thetaNode); - std::vector + std::vector EncodeThetaEntry(rvsdg::ThetaNode & thetaNode); void EncodeThetaExit( rvsdg::ThetaNode & thetaNode, - const std::vector & thetaStateOutputs); + const std::vector & thetaStateOutputs); /** * Replace \p loadNode with a new copy that takes the provided \p memoryStates. All users of the diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 10ae6dd22..bea13a147 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -239,7 +239,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":arg", index); } - if (is(Output_)) + if (is(Output_)) { auto dbgstr = jlm::rvsdg::node_output::node(Output_)->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 9ffa21a3d..ed7dbce9a 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -195,7 +195,7 @@ invert(rvsdg::ThetaNode * otheta) /* add loop variables to new theta node and setup substitution map */ auto osubregion0 = ogamma->subregion(0); auto osubregion1 = ogamma->subregion(1); - std::unordered_map nlvs; + std::unordered_map nlvs; for (const auto & olv : *otheta) { auto ev = ngamma->add_entryvar(olv->input()->origin()); diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index fa07cf9bb..37bedd982 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -41,7 +41,7 @@ ThetaInput::~ThetaInput() noexcept /* theta output */ -theta_output::~theta_output() noexcept +ThetaOutput::~ThetaOutput() noexcept { if (input_) input_->output_ = nullptr; @@ -61,7 +61,7 @@ ThetaResult::~ThetaResult() noexcept = default; ThetaResult & ThetaResult::Copy(rvsdg::output & origin, structural_output * output) { - auto thetaOutput = util::AssertedCast(output); + auto thetaOutput = util::AssertedCast(output); return ThetaResult::Create(origin, *thetaOutput); } @@ -97,11 +97,11 @@ ThetaNode::loopvar_iterator::operator++() noexcept return *this; } -jlm::rvsdg::theta_output * +ThetaOutput * ThetaNode::add_loopvar(jlm::rvsdg::output * origin) { node::add_input(std::make_unique(this, origin, origin->Type())); - node::add_output(std::make_unique(this, origin->Type())); + node::add_output(std::make_unique(this, origin->Type())); auto input = ThetaNode::input(ninputs() - 1); auto output = ThetaNode::output(noutputs() - 1); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index b0159f419..48d2bb0a6 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -28,7 +28,7 @@ class ThetaOperation final : public structural_op }; class ThetaInput; -class theta_output; +class ThetaOutput; class ThetaNode final : public structural_node { @@ -36,7 +36,7 @@ class ThetaNode final : public structural_node class loopvar_iterator { public: - inline constexpr loopvar_iterator(jlm::rvsdg::theta_output * output) noexcept + constexpr loopvar_iterator(ThetaOutput * output) noexcept : output_(output) {} @@ -63,26 +63,26 @@ class ThetaNode final : public structural_node return !(*this == other); } - inline theta_output * + ThetaOutput * operator*() noexcept { return output_; } - inline theta_output ** + ThetaOutput ** operator->() noexcept { return &output_; } - inline jlm::rvsdg::theta_output * + ThetaOutput * output() const noexcept { return output_; } private: - jlm::rvsdg::theta_output * output_; + ThetaOutput * output_; }; ~ThetaNode() noexcept override; @@ -148,7 +148,7 @@ class ThetaNode final : public structural_node * * An output must match the condition specified by \p match and it must be dead. * - * @tparam F A type that supports the function call operator: bool operator(const theta_output&) + * @tparam F A type that supports the function call operator: bool operator(const ThetaOutput&) * @param match Defines the condition of the elements to remove. * @return The inputs corresponding to the removed outputs. * @@ -158,7 +158,7 @@ class ThetaNode final : public structural_node * again. * * \see RemoveThetaInputsWhere() - * \see theta_output#IsDead() + * \see ThetaOutput#IsDead() */ template util::HashSet @@ -175,12 +175,12 @@ class ThetaNode final : public structural_node * again. * * \see RemoveThetaOutputsWhere() - * \see theta_output#IsDead() + * \see ThetaOutput#IsDead() */ util::HashSet PruneThetaOutputs() { - auto match = [](const theta_output &) + auto match = [](const ThetaOutput &) { return true; }; @@ -207,7 +207,7 @@ class ThetaNode final : public structural_node * \see RegionArgument#IsDead() */ template - util::HashSet + util::HashSet RemoveThetaInputsWhere(const F & match); /** @@ -223,7 +223,7 @@ class ThetaNode final : public structural_node * \see RemoveThetaInputsWhere() * \see RegionArgument#IsDead() */ - util::HashSet + util::HashSet PruneThetaInputs() { auto match = [](const ThetaInput &) @@ -237,10 +237,10 @@ class ThetaNode final : public structural_node ThetaInput * input(size_t index) const noexcept; - theta_output * + ThetaOutput * output(size_t index) const noexcept; - jlm::rvsdg::theta_output * + ThetaOutput * add_loopvar(jlm::rvsdg::output * origin); virtual ThetaNode * @@ -250,7 +250,7 @@ class ThetaNode final : public structural_node class ThetaInput final : public structural_input { friend ThetaNode; - friend theta_output; + friend ThetaOutput; public: ~ThetaInput() noexcept override; @@ -266,7 +266,7 @@ class ThetaInput final : public structural_input return static_cast(structural_input::node()); } - inline jlm::rvsdg::theta_output * + ThetaOutput * output() const noexcept { return output_; @@ -283,7 +283,7 @@ class ThetaInput final : public structural_input result() const noexcept; private: - jlm::rvsdg::theta_output * output_; + ThetaOutput * output_; }; static inline bool @@ -292,17 +292,15 @@ is_invariant(const ThetaInput * input) noexcept return input->result()->origin() == input->argument(); } -/* theta output */ - -class theta_output final : public structural_output +class ThetaOutput final : public structural_output { friend ThetaNode; friend ThetaInput; public: - virtual ~theta_output() noexcept; + ~ThetaOutput() noexcept override; - inline theta_output(ThetaNode * node, const std::shared_ptr type) + ThetaOutput(ThetaNode * node, const std::shared_ptr type) : structural_output(node, std::move(type)), input_(nullptr) {} @@ -379,14 +377,14 @@ class ThetaResult final : public RegionResult Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; private: - ThetaResult(rvsdg::output & origin, theta_output & thetaOutput) + ThetaResult(rvsdg::output & origin, ThetaOutput & thetaOutput) : RegionResult(origin.region(), &origin, &thetaOutput, origin.Type()) { JLM_ASSERT(is(origin.region()->node())); } static ThetaResult & - Create(rvsdg::output & origin, theta_output & thetaOutput) + Create(rvsdg::output & origin, ThetaOutput & thetaOutput) { auto thetaResult = new ThetaResult(origin, thetaOutput); origin.region()->append_result(thetaResult); @@ -424,7 +422,7 @@ class ThetaPredicateResult final : public RegionResult }; static inline bool -is_invariant(const jlm::rvsdg::theta_output * output) noexcept +is_invariant(const ThetaOutput * output) noexcept { return output->result()->origin() == output->argument(); } @@ -437,10 +435,10 @@ ThetaNode::input(size_t index) const noexcept return static_cast(node::input(index)); } -inline jlm::rvsdg::theta_output * +inline ThetaOutput * ThetaNode::output(size_t index) const noexcept { - return static_cast(node::output(index)); + return static_cast(node::output(index)); } template @@ -467,10 +465,10 @@ ThetaNode::RemoveThetaOutputsWhere(const F & match) } template -util::HashSet +util::HashSet ThetaNode::RemoveThetaInputsWhere(const F & match) { - util::HashSet deadOutputs; + util::HashSet deadOutputs; // iterate backwards to avoid the invalidation of 'n' by RemoveInput() for (size_t n = ninputs() - 1; n != static_cast(-1); n--) diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 09ea2519e..0a3559114 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -1376,7 +1376,7 @@ ValidateThetaTestSteensgaardAgnostic(const jlm::tests::ThetaTest & test) assert(is(*lambda_exit_mux, 2, 1)); auto thetaOutput = - jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); + jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); auto theta = jlm::rvsdg::node_output::node(thetaOutput); assert(theta == test.theta); @@ -1400,7 +1400,7 @@ ValidateThetaTestSteensgaardRegionAware(const jlm::tests::ThetaTest & test) assert(is(*lambdaExitMerge, 2, 1)); auto thetaOutput = - jlm::util::AssertedCast(lambdaExitMerge->input(0)->origin()); + jlm::util::AssertedCast(lambdaExitMerge->input(0)->origin()); auto theta = jlm::rvsdg::node_output::node(thetaOutput); assert(theta == test.theta); @@ -1424,7 +1424,7 @@ ValidateThetaTestSteensgaardAgnosticTopDown(const jlm::tests::ThetaTest & test) assert(is(*lambda_exit_mux, 2, 1)); auto thetaOutput = - jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); + jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); auto theta = jlm::rvsdg::node_output::node(thetaOutput); assert(theta == test.theta); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index e463c02de..f5620301f 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -72,7 +72,7 @@ TestRemoveThetaOutputsWhere() // Act & Assert auto deadInputs = thetaNode->RemoveThetaOutputsWhere( - [&](const theta_output & output) + [&](const ThetaOutput & output) { return output.index() == thetaOutput1->index(); }); @@ -86,7 +86,7 @@ TestRemoveThetaOutputsWhere() assert(thetaOutput2->result()->index() == 2); deadInputs = thetaNode->RemoveThetaOutputsWhere( - [](const theta_output &) + [](const ThetaOutput &) { return true; }); From c0e107b4873e69eaaf798fd35d1c8007ef8da4ff Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 16 Sep 2024 14:31:07 +0200 Subject: [PATCH 084/170] Move RVSDG to CFG gamma tests into single file (#627) 1. Moves all the gamma tests of the RVSDG to CFG pass into a single file 2. Cleans them up a bit 3. Fix indentation of test files in Makefile This is part of the fix for #586. --- jlm/llvm/Makefile.sub | 141 ++++++----- .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 232 ++++++++++++++++++ .../backend/llvm/r2j/test-empty-gamma.cpp | 159 ------------ .../backend/llvm/r2j/test-partial-gamma.cpp | 60 ----- 4 files changed, 302 insertions(+), 290 deletions(-) create mode 100644 tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp delete mode 100644 tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp delete mode 100644 tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 054f58218..b88fef2d4 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -141,77 +141,76 @@ libllvm_HEADERS = \ libllvm_TESTS += \ tests/jlm/llvm/backend/dot/DotWriterTests \ - tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma \ - tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma \ - tests/jlm/llvm/backend/llvm/r2j/test-recursive-data \ - tests/jlm/llvm/backend/llvm/jlm-llvm/LoadTests \ - tests/jlm/llvm/backend/llvm/jlm-llvm/MemCpyTests \ - tests/jlm/llvm/backend/llvm/jlm-llvm/StoreTests \ - tests/jlm/llvm/backend/llvm/jlm-llvm/TestAttributeConversion \ - tests/jlm/llvm/backend/llvm/jlm-llvm/test-bitconstant \ - tests/jlm/llvm/backend/llvm/jlm-llvm/test-function-calls \ - tests/jlm/llvm/backend/llvm/jlm-llvm/test-select-with-state \ - tests/jlm/llvm/backend/llvm/jlm-llvm/test-type-conversion \ - tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests \ - tests/jlm/llvm/frontend/llvm/LoadTests \ - tests/jlm/llvm/frontend/llvm/MemCpyTests \ - tests/jlm/llvm/frontend/llvm/StoreTests \ - tests/jlm/llvm/frontend/llvm/TestAttributeConversion \ - tests/jlm/llvm/frontend/llvm/test-endless-loop \ - tests/jlm/llvm/frontend/llvm/test-export \ - tests/jlm/llvm/frontend/llvm/TestFNeg \ - tests/jlm/llvm/frontend/llvm/test-function-call \ - tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests \ - tests/jlm/llvm/frontend/llvm/test-recursive-data \ - tests/jlm/llvm/frontend/llvm/test-restructuring \ - tests/jlm/llvm/frontend/llvm/test-select \ - tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests \ - tests/jlm/llvm/ir/operators/LoadTests \ - tests/jlm/llvm/ir/operators/MemCpyTests \ - tests/jlm/llvm/ir/operators/MemoryStateOperationTests \ - tests/jlm/llvm/ir/operators/TestCall \ - tests/jlm/llvm/ir/operators/test-ConstantFP \ - tests/jlm/llvm/ir/operators/test-delta \ - tests/jlm/llvm/ir/operators/TestFree \ - tests/jlm/llvm/ir/operators/TestGetElementPtr \ - tests/jlm/llvm/ir/operators/TestLambda \ - tests/jlm/llvm/ir/operators/TestPhi \ - tests/jlm/llvm/ir/operators/test-sext \ - tests/jlm/llvm/ir/operators/StoreTests \ - tests/jlm/llvm/ir/AttributeSetTests \ - tests/jlm/llvm/ir/test-aggregation \ - tests/jlm/llvm/ir/test-cfg \ - tests/jlm/llvm/ir/test-cfg-node \ - tests/jlm/llvm/ir/test-cfg-orderings \ - tests/jlm/llvm/ir/test-cfg-prune \ - tests/jlm/llvm/ir/test-cfg-purge \ - tests/jlm/llvm/ir/test-cfg-structure \ - tests/jlm/llvm/ir/test-cfg-validity \ - tests/jlm/llvm/ir/test-domtree \ - tests/jlm/llvm/ir/test-ssa-destruction \ - tests/jlm/llvm/ir/TestTypes \ - tests/jlm/llvm/ir/TestAnnotation \ - tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider \ - tests/jlm/llvm/opt/alias-analyses/TestAndersen \ - tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation \ - tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection \ - tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder \ - tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet \ - tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph \ - tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider \ - tests/jlm/llvm/opt/alias-analyses/TestSteensgaard \ - tests/jlm/llvm/opt/alias-analyses/TestTopDownMemoryNodeEliminator \ - tests/jlm/llvm/opt/InvariantValueRedirectionTests \ - tests/jlm/llvm/opt/RvsdgTreePrinterTests \ - tests/jlm/llvm/opt/test-cne \ - tests/jlm/llvm/opt/TestDeadNodeElimination \ - tests/jlm/llvm/opt/test-inlining \ - tests/jlm/llvm/opt/test-inversion \ - tests/jlm/llvm/opt/TestLoadMuxReduction \ - tests/jlm/llvm/opt/TestLoadStoreReduction \ - tests/jlm/llvm/opt/test-pull \ - tests/jlm/llvm/opt/test-push \ - tests/jlm/llvm/opt/test-unroll \ + tests/jlm/llvm/backend/llvm/r2j/GammaTests \ + tests/jlm/llvm/backend/llvm/r2j/test-recursive-data \ + tests/jlm/llvm/backend/llvm/jlm-llvm/LoadTests \ + tests/jlm/llvm/backend/llvm/jlm-llvm/MemCpyTests \ + tests/jlm/llvm/backend/llvm/jlm-llvm/StoreTests \ + tests/jlm/llvm/backend/llvm/jlm-llvm/TestAttributeConversion \ + tests/jlm/llvm/backend/llvm/jlm-llvm/test-bitconstant \ + tests/jlm/llvm/backend/llvm/jlm-llvm/test-function-calls \ + tests/jlm/llvm/backend/llvm/jlm-llvm/test-select-with-state \ + tests/jlm/llvm/backend/llvm/jlm-llvm/test-type-conversion \ + tests/jlm/llvm/frontend/llvm/LlvmTypeConversionTests \ + tests/jlm/llvm/frontend/llvm/LoadTests \ + tests/jlm/llvm/frontend/llvm/MemCpyTests \ + tests/jlm/llvm/frontend/llvm/StoreTests \ + tests/jlm/llvm/frontend/llvm/TestAttributeConversion \ + tests/jlm/llvm/frontend/llvm/test-endless-loop \ + tests/jlm/llvm/frontend/llvm/test-export \ + tests/jlm/llvm/frontend/llvm/TestFNeg \ + tests/jlm/llvm/frontend/llvm/test-function-call \ + tests/jlm/llvm/frontend/llvm/LlvmPhiConversionTests \ + tests/jlm/llvm/frontend/llvm/test-recursive-data \ + tests/jlm/llvm/frontend/llvm/test-restructuring \ + tests/jlm/llvm/frontend/llvm/test-select \ + tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests \ + tests/jlm/llvm/ir/operators/LoadTests \ + tests/jlm/llvm/ir/operators/MemCpyTests \ + tests/jlm/llvm/ir/operators/MemoryStateOperationTests \ + tests/jlm/llvm/ir/operators/TestCall \ + tests/jlm/llvm/ir/operators/test-ConstantFP \ + tests/jlm/llvm/ir/operators/test-delta \ + tests/jlm/llvm/ir/operators/TestFree \ + tests/jlm/llvm/ir/operators/TestGetElementPtr \ + tests/jlm/llvm/ir/operators/TestLambda \ + tests/jlm/llvm/ir/operators/TestPhi \ + tests/jlm/llvm/ir/operators/test-sext \ + tests/jlm/llvm/ir/operators/StoreTests \ + tests/jlm/llvm/ir/AttributeSetTests \ + tests/jlm/llvm/ir/test-aggregation \ + tests/jlm/llvm/ir/test-cfg \ + tests/jlm/llvm/ir/test-cfg-node \ + tests/jlm/llvm/ir/test-cfg-orderings \ + tests/jlm/llvm/ir/test-cfg-prune \ + tests/jlm/llvm/ir/test-cfg-purge \ + tests/jlm/llvm/ir/test-cfg-structure \ + tests/jlm/llvm/ir/test-cfg-validity \ + tests/jlm/llvm/ir/test-domtree \ + tests/jlm/llvm/ir/test-ssa-destruction \ + tests/jlm/llvm/ir/TestTypes \ + tests/jlm/llvm/ir/TestAnnotation \ + tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider \ + tests/jlm/llvm/opt/alias-analyses/TestAndersen \ + tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation \ + tests/jlm/llvm/opt/alias-analyses/TestLazyCycleDetection \ + tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder \ + tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet \ + tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph \ + tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider \ + tests/jlm/llvm/opt/alias-analyses/TestSteensgaard \ + tests/jlm/llvm/opt/alias-analyses/TestTopDownMemoryNodeEliminator \ + tests/jlm/llvm/opt/InvariantValueRedirectionTests \ + tests/jlm/llvm/opt/RvsdgTreePrinterTests \ + tests/jlm/llvm/opt/test-cne \ + tests/jlm/llvm/opt/TestDeadNodeElimination \ + tests/jlm/llvm/opt/test-inlining \ + tests/jlm/llvm/opt/test-inversion \ + tests/jlm/llvm/opt/TestLoadMuxReduction \ + tests/jlm/llvm/opt/TestLoadStoreReduction \ + tests/jlm/llvm/opt/test-pull \ + tests/jlm/llvm/opt/test-push \ + tests/jlm/llvm/opt/test-unroll \ libllvm_TEST_LIBS = \ libjlmtest \ diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp new file mode 100644 index 000000000..be4b9d403 --- /dev/null +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -0,0 +1,232 @@ +/* + * Copyright 2018 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static int +GammaWithMatch() +{ + using namespace jlm::llvm; + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + auto valueType = valuetype::Create(); + auto functionType = + FunctionType::Create({ jlm::rvsdg::bittype::Create(1), valueType, valueType }, { valueType }); + + RvsdgModule rvsdgModule(filepath(""), "", ""); + auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + auto lambdaNode = lambda::node::create( + rvsdgModule.Rvsdg().root(), + functionType, + "lambdaOutput", + linkage::external_linkage); + + auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->fctargument(0)); + auto gamma = jlm::rvsdg::GammaNode::create(match, 2); + auto gammaInput1 = gamma->add_entryvar(lambdaNode->fctargument(1)); + auto gammaInput2 = gamma->add_entryvar(lambdaNode->fctargument(2)); + auto gammaOutput = gamma->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); + + auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + jlm::llvm::GraphExport::Create(*lambdaOutput, ""); + + view(rvsdgModule.Rvsdg(), stdout); + + // Act + StatisticsCollector statisticsCollector; + auto module = rvsdg2jlm::rvsdg2jlm(rvsdgModule, statisticsCollector); + print(*module, stdout); + + // Assert + auto & ipg = module->ipgraph(); + assert(ipg.nnodes() == 1); + + auto cfg = dynamic_cast(*ipg.begin()).cfg(); + assert(cfg->nnodes() == 1); + auto node = cfg->entry()->outedge(0)->sink(); + auto bb = dynamic_cast(node); + assert(is(bb->tacs().last()->operation())); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/llvm/r2j/GammaTests-GammaWithMatch", GammaWithMatch) + +static int +GammaWithoutMatch() +{ + using namespace jlm::llvm; + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + auto valueType = valuetype::Create(); + auto functionType = + FunctionType::Create({ jlm::rvsdg::ctltype::Create(2), valueType, valueType }, { valueType }); + + RvsdgModule rvsdgModule(filepath(""), "", ""); + auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + auto lambdaNode = lambda::node::create( + rvsdgModule.Rvsdg().root(), + functionType, + "lambdaOutput", + linkage::external_linkage); + + auto gammaNode = jlm::rvsdg::GammaNode::create(lambdaNode->fctargument(0), 2); + auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->fctargument(1)); + auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->fctargument(2)); + auto gammaOutput = gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); + + auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + jlm::llvm::GraphExport::Create(*lambdaOutput, ""); + + jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); + + // Act + StatisticsCollector statisticsCollector; + auto module = rvsdg2jlm::rvsdg2jlm(rvsdgModule, statisticsCollector); + print(*module, stdout); + + // Assert + auto & ipg = module->ipgraph(); + assert(ipg.nnodes() == 1); + + auto cfg = dynamic_cast(*ipg.begin()).cfg(); + assert(cfg->nnodes() == 1); + auto node = cfg->entry()->outedge(0)->sink(); + auto bb = dynamic_cast(node); + assert(is(bb->tacs().first()->operation())); + assert(is(bb->tacs().last()->operation())); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/llvm/r2j/GammaTests-GammaWithoutMatch", GammaWithoutMatch) + +static int +EmptyGammaWithThreeSubregions() +{ + using namespace jlm::llvm; + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + auto functionType = FunctionType::Create( + { jlm::rvsdg::bittype::Create(32), valueType, valueType }, + { valueType }); + + RvsdgModule rvsdgModule(filepath(""), "", ""); + auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + auto lambdaNode = lambda::node::create( + rvsdgModule.Rvsdg().root(), + functionType, + "lambdaOutput", + linkage::external_linkage); + + auto match = jlm::rvsdg::match(32, { { 0, 0 }, { 1, 1 } }, 2, 3, lambdaNode->fctargument(0)); + + auto gammaNode = jlm::rvsdg::GammaNode::create(match, 3); + auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->fctargument(1)); + auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->fctargument(2)); + auto gammaOutput = gammaNode->add_exitvar( + { gammaInput1->argument(0), gammaInput1->argument(1), gammaInput2->argument(2) }); + + auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + jlm::llvm::GraphExport::Create(*lambdaOutput, ""); + + jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); + + // Act + StatisticsCollector statisticsCollector; + auto module = rvsdg2jlm::rvsdg2jlm(rvsdgModule, statisticsCollector); + print(*module, stdout); + + // Assert + auto & ipg = module->ipgraph(); + assert(ipg.nnodes() == 1); + + auto cfg = dynamic_cast(*ipg.begin()).cfg(); + assert(is_closed(*cfg)); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/backend/llvm/r2j/GammaTests-EmptyGammaWithThreeSubregions", + EmptyGammaWithThreeSubregions) + +static int +PartialEmptyGamma() +{ + using namespace jlm::llvm; + using namespace jlm::tests; + using namespace jlm::util; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + auto functionType = + FunctionType::Create({ jlm::rvsdg::bittype::Create(1), valueType }, { valueType }); + + RvsdgModule rvsdgModule(filepath(""), "", ""); + + auto lambdaNode = lambda::node::create( + rvsdgModule.Rvsdg().root(), + functionType, + "lambdaOutput", + linkage::external_linkage); + + auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->fctargument(0)); + auto gammaNode = jlm::rvsdg::GammaNode::create(match, 2); + auto gammaInput = gammaNode->add_entryvar(lambdaNode->fctargument(1)); + auto output = jlm::tests::create_testop( + gammaNode->subregion(1), + { gammaInput->argument(1) }, + { valueType })[0]; + auto gammaOutput = gammaNode->add_exitvar({ gammaInput->argument(0), output }); + + auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + + jlm::llvm::GraphExport::Create(*lambdaOutput, ""); + + jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); + + // Act + StatisticsCollector statisticsCollector; + auto module = rvsdg2jlm::rvsdg2jlm(rvsdgModule, statisticsCollector); + + // Assert + auto & ipg = module->ipgraph(); + assert(ipg.nnodes() == 1); + + auto cfg = dynamic_cast(*ipg.begin()).cfg(); + print_ascii(*cfg, stdout); + + assert(!is_proper_structured(*cfg)); + assert(is_structured(*cfg)); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/llvm/r2j/GammaTests-PartialEmptyGamma", PartialEmptyGamma) diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp deleted file mode 100644 index a02285788..000000000 --- a/tests/jlm/llvm/backend/llvm/r2j/test-empty-gamma.cpp +++ /dev/null @@ -1,159 +0,0 @@ -/* - * Copyright 2018 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include "test-registry.hpp" -#include "test-types.hpp" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -static void -test_with_match() -{ - using namespace jlm::llvm; - - auto vt = jlm::tests::valuetype::Create(); - auto ft = FunctionType::Create({ jlm::rvsdg::bittype::Create(1), vt, vt }, { vt }); - - RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); - nf->set_mutable(false); - - /* setup graph */ - - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - - auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->fctargument(0)); - auto gamma = jlm::rvsdg::GammaNode::create(match, 2); - auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); - auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); - auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); - - auto f = lambda->finalize({ ex }); - GraphExport::Create(*f, ""); - - jlm::rvsdg::view(rm.Rvsdg(), stdout); - - jlm::util::StatisticsCollector statisticsCollector; - auto module = rvsdg2jlm::rvsdg2jlm(rm, statisticsCollector); - print(*module, stdout); - - /* verify output */ - - auto & ipg = module->ipgraph(); - assert(ipg.nnodes() == 1); - - auto cfg = dynamic_cast(*ipg.begin()).cfg(); - assert(cfg->nnodes() == 1); - auto node = cfg->entry()->outedge(0)->sink(); - auto bb = dynamic_cast(node); - assert(jlm::rvsdg::is(bb->tacs().last()->operation())); -} - -static void -test_without_match() -{ - using namespace jlm::llvm; - - auto vt = jlm::tests::valuetype::Create(); - auto ft = FunctionType::Create({ jlm::rvsdg::ctltype::Create(2), vt, vt }, { vt }); - - RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); - nf->set_mutable(false); - - /* setup graph */ - - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - - auto gamma = jlm::rvsdg::GammaNode::create(lambda->fctargument(0), 2); - auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); - auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); - auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); - - auto f = lambda->finalize({ ex }); - GraphExport::Create(*f, ""); - - jlm::rvsdg::view(rm.Rvsdg(), stdout); - - jlm::util::StatisticsCollector statisticsCollector; - auto module = rvsdg2jlm::rvsdg2jlm(rm, statisticsCollector); - print(*module, stdout); - - /* verify output */ - - auto & ipg = module->ipgraph(); - assert(ipg.nnodes() == 1); - - auto cfg = dynamic_cast(*ipg.begin()).cfg(); - assert(cfg->nnodes() == 1); - auto node = cfg->entry()->outedge(0)->sink(); - auto bb = dynamic_cast(node); - assert(jlm::rvsdg::is(bb->tacs().first()->operation())); - assert(jlm::rvsdg::is(bb->tacs().last()->operation())); -} - -static void -test_gamma3() -{ - using namespace jlm::llvm; - - auto vt = jlm::tests::valuetype::Create(); - auto ft = FunctionType::Create({ jlm::rvsdg::bittype::Create(32), vt, vt }, { vt }); - - RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); - nf->set_mutable(false); - - /* setup graph */ - - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - - auto match = jlm::rvsdg::match(32, { { 0, 0 }, { 1, 1 } }, 2, 3, lambda->fctargument(0)); - - auto gamma = jlm::rvsdg::GammaNode::create(match, 3); - auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); - auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); - auto ex = gamma->add_exitvar({ ev1->argument(0), ev1->argument(1), ev2->argument(2) }); - - auto f = lambda->finalize({ ex }); - GraphExport::Create(*f, ""); - - jlm::rvsdg::view(rm.Rvsdg(), stdout); - - jlm::util::StatisticsCollector statisticsCollector; - auto module = rvsdg2jlm::rvsdg2jlm(rm, statisticsCollector); - print(*module, stdout); - - /* verify output */ - - auto & ipg = module->ipgraph(); - assert(ipg.nnodes() == 1); - - auto cfg = dynamic_cast(*ipg.begin()).cfg(); - assert(is_closed(*cfg)); -} - -static int -test() -{ - test_with_match(); - test_without_match(); - - test_gamma3(); - - return 0; -} - -JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/llvm/r2j/test-empty-gamma", test) diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp deleted file mode 100644 index 4698c9d5e..000000000 --- a/tests/jlm/llvm/backend/llvm/r2j/test-partial-gamma.cpp +++ /dev/null @@ -1,60 +0,0 @@ -/* - * Copyright 2018 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include "test-operation.hpp" -#include "test-registry.hpp" -#include "test-types.hpp" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include - -static int -test() -{ - using namespace jlm::llvm; - - auto vt = jlm::tests::valuetype::Create(); - auto ft = FunctionType::Create({ jlm::rvsdg::bittype::Create(1), vt }, { vt }); - - RvsdgModule rm(jlm::util::filepath(""), "", ""); - - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - - auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->fctargument(0)); - auto gamma = jlm::rvsdg::GammaNode::create(match, 2); - auto ev = gamma->add_entryvar(lambda->fctargument(1)); - auto output = jlm::tests::create_testop(gamma->subregion(1), { ev->argument(1) }, { vt })[0]; - auto ex = gamma->add_exitvar({ ev->argument(0), output }); - - auto f = lambda->finalize({ ex }); - - GraphExport::Create(*f, ""); - - jlm::rvsdg::view(rm.Rvsdg(), stdout); - - jlm::util::StatisticsCollector statisticsCollector; - auto module = rvsdg2jlm::rvsdg2jlm(rm, statisticsCollector); - auto & ipg = module->ipgraph(); - assert(ipg.nnodes() == 1); - - auto cfg = dynamic_cast(*ipg.begin()).cfg(); - print_ascii(*cfg, stdout); - - assert(!is_proper_structured(*cfg)); - assert(is_structured(*cfg)); - - return 0; -} - -JLM_UNIT_TEST_REGISTER("jlm/llvm/backend/llvm/r2j/test-partial-gamma", test) From 0260f3e918fcaf7992afe0c6c144c5b2f539d1fc Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 17 Sep 2024 13:52:36 +0200 Subject: [PATCH 085/170] Rename region class to Region (#628) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 4 +- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 4 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.hpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.hpp | 2 +- .../rvsdg2rhls/DeadNodeElimination.cpp | 2 +- .../backend/rvsdg2rhls/GammaConversion.cpp | 4 +- .../backend/rvsdg2rhls/ThetaConversion.cpp | 4 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-buffers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-buffers.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-forks.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-forks.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 6 +-- jlm/hls/backend/rvsdg2rhls/add-prints.hpp | 6 +-- jlm/hls/backend/rvsdg2rhls/add-sinks.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-sinks.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-triggers.hpp | 4 +- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/alloca-conv.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/check-rhls.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/check-rhls.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/dae-conv.hpp | 2 +- .../rvsdg2rhls/distribute-constants.cpp | 2 +- .../rvsdg2rhls/distribute-constants.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 6 +-- jlm/hls/backend/rvsdg2rhls/mem-conv.hpp | 4 +- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 20 +++---- jlm/hls/backend/rvsdg2rhls/mem-queue.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 8 +-- jlm/hls/backend/rvsdg2rhls/mem-sep.hpp | 4 +- jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/memstate-conv.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp | 2 +- .../rvsdg2rhls/remove-redundant-buf.cpp | 2 +- .../rvsdg2rhls/remove-redundant-buf.hpp | 2 +- .../rvsdg2rhls/remove-unused-state.cpp | 2 +- .../rvsdg2rhls/remove-unused-state.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 +- jlm/hls/ir/hls.cpp | 8 +-- jlm/hls/ir/hls.hpp | 26 +++++----- jlm/hls/opt/cne.cpp | 10 ++-- jlm/hls/util/view.cpp | 6 +-- jlm/hls/util/view.hpp | 6 +-- jlm/llvm/backend/dot/DotWriter.cpp | 4 +- jlm/llvm/backend/dot/DotWriter.hpp | 2 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 2 +- .../InterProceduralGraphConversion.cpp | 24 ++++----- jlm/llvm/ir/RvsdgModule.cpp | 2 +- jlm/llvm/ir/RvsdgModule.hpp | 2 +- jlm/llvm/ir/operators/Load.cpp | 6 +-- jlm/llvm/ir/operators/Load.hpp | 20 +++---- .../ir/operators/MemoryStateOperations.hpp | 4 +- jlm/llvm/ir/operators/Phi.cpp | 6 +-- jlm/llvm/ir/operators/Phi.hpp | 31 +++++------ jlm/llvm/ir/operators/Store.cpp | 6 +-- jlm/llvm/ir/operators/Store.hpp | 20 +++---- jlm/llvm/ir/operators/call.cpp | 2 +- jlm/llvm/ir/operators/call.hpp | 8 +-- jlm/llvm/ir/operators/delta.cpp | 6 +-- jlm/llvm/ir/operators/delta.hpp | 16 +++--- jlm/llvm/ir/operators/lambda.cpp | 10 ++-- jlm/llvm/ir/operators/lambda.hpp | 25 +++++---- jlm/llvm/ir/operators/operators.hpp | 12 ++--- jlm/llvm/opt/DeadNodeElimination.cpp | 6 +-- jlm/llvm/opt/DeadNodeElimination.hpp | 6 +-- jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.hpp | 2 +- jlm/llvm/opt/RvsdgTreePrinter.cpp | 6 +-- .../AgnosticMemoryNodeProvider.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 2 +- jlm/llvm/opt/alias-analyses/Andersen.hpp | 2 +- .../alias-analyses/MemoryNodeProvisioning.hpp | 4 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 32 ++++++------ .../opt/alias-analyses/MemoryStateEncoder.hpp | 2 +- .../RegionAwareMemoryNodeProvider.cpp | 34 ++++++------ .../RegionAwareMemoryNodeProvider.hpp | 8 +-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 2 +- .../TopDownMemoryNodeEliminator.cpp | 44 ++++++++-------- .../TopDownMemoryNodeEliminator.hpp | 6 +-- jlm/llvm/opt/cne.cpp | 10 ++-- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 4 +- jlm/llvm/opt/pull.cpp | 4 +- jlm/llvm/opt/pull.hpp | 2 +- jlm/llvm/opt/push.cpp | 2 +- jlm/llvm/opt/unroll.cpp | 6 +-- jlm/mlir/backend/JlmToMlirConverter.cpp | 2 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 2 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 12 ++--- jlm/mlir/frontend/MlirToJlmConverter.hpp | 12 ++--- jlm/rvsdg/binary.cpp | 8 +-- jlm/rvsdg/binary.hpp | 6 +-- jlm/rvsdg/bitstring/concat.cpp | 2 +- jlm/rvsdg/bitstring/constant.hpp | 8 +-- jlm/rvsdg/control.cpp | 2 +- jlm/rvsdg/control.hpp | 6 +-- jlm/rvsdg/gamma.cpp | 4 +- jlm/rvsdg/gamma.hpp | 12 ++--- jlm/rvsdg/graph.cpp | 2 +- jlm/rvsdg/graph.hpp | 4 +- jlm/rvsdg/node-normal-form.hpp | 2 +- jlm/rvsdg/node.cpp | 8 +-- jlm/rvsdg/node.hpp | 26 +++++----- jlm/rvsdg/notifiers.cpp | 4 +- jlm/rvsdg/notifiers.hpp | 6 +-- jlm/rvsdg/nullary.hpp | 2 +- jlm/rvsdg/operation.hpp | 2 +- jlm/rvsdg/region.cpp | 52 +++++++++---------- jlm/rvsdg/region.hpp | 36 ++++++------- jlm/rvsdg/simple-node.cpp | 7 ++- jlm/rvsdg/simple-node.hpp | 11 ++-- jlm/rvsdg/simple-normal-form.cpp | 4 +- jlm/rvsdg/simple-normal-form.hpp | 2 +- jlm/rvsdg/statemux.cpp | 2 +- jlm/rvsdg/statemux.hpp | 2 +- jlm/rvsdg/structural-node.cpp | 4 +- jlm/rvsdg/structural-node.hpp | 8 +-- jlm/rvsdg/substitution.hpp | 16 +++--- jlm/rvsdg/theta.cpp | 6 +-- jlm/rvsdg/theta.hpp | 14 ++--- jlm/rvsdg/tracker.hpp | 2 +- jlm/rvsdg/traverser.cpp | 4 +- jlm/rvsdg/traverser.hpp | 12 ++--- jlm/rvsdg/unary.cpp | 2 +- jlm/rvsdg/unary.hpp | 2 +- jlm/rvsdg/view.cpp | 30 +++++------ jlm/rvsdg/view.hpp | 12 ++--- jlm/tooling/Command.cpp | 2 +- tests/TestRvsdgs.cpp | 8 +-- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 4 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 8 +-- tests/jlm/llvm/ir/operators/TestCall.cpp | 6 +-- tests/jlm/llvm/ir/operators/TestPhi.cpp | 4 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 8 +-- .../opt/alias-analyses/TestPointsToGraph.cpp | 2 +- tests/jlm/llvm/opt/test-inlining.cpp | 2 +- tests/jlm/llvm/opt/test-unroll.cpp | 4 +- tests/jlm/rvsdg/RegionTests.cpp | 40 +++++++------- tests/jlm/rvsdg/test-binary.cpp | 4 +- tests/jlm/rvsdg/test-graph.cpp | 2 +- tests/test-operation.cpp | 6 +-- tests/test-operation.hpp | 38 +++++++------- 152 files changed, 548 insertions(+), 560 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index ae55eccdd..ad37a6d26 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -2494,7 +2494,7 @@ RhlsToFirrtlConverter::TraceArgument(rvsdg::RegionArgument * arg) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * circuitBody) +RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitBody) { // Generate a vector with all inputs and outputs of the module ::llvm::SmallVector ports; @@ -2702,7 +2702,7 @@ RhlsToFirrtlConverter::MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * cir std::unordered_map RhlsToFirrtlConverter::createInstances( - jlm::rvsdg::region * subRegion, + rvsdg::Region * subRegion, mlir::Block * circuitBody, mlir::Block * body) { diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index f5fade519..63bd27aa5 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -107,7 +107,7 @@ class RhlsToFirrtlConverter : public BaseHLS std::unordered_map MlirGen(hls::loop_node * loopNode, mlir::Block * body, mlir::Block * circuitBody); circt::firrtl::FModuleOp - MlirGen(jlm::rvsdg::region * subRegion, mlir::Block * circuitBody); + MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitBody); circt::firrtl::FModuleOp MlirGen(const jlm::rvsdg::simple_node * node); // Operations @@ -275,7 +275,7 @@ class RhlsToFirrtlConverter : public BaseHLS IsIdentityMapping(const jlm::rvsdg::match_op & op); std::unordered_map - createInstances(jlm::rvsdg::region * subRegion, mlir::Block * circuitBody, mlir::Block * body); + createInstances(rvsdg::Region * subRegion, mlir::Block * circuitBody, mlir::Block * body); void check_module(circt::firrtl::FModuleOp & module); diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 3edbf9d50..7064e2336 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -137,7 +137,7 @@ BaseHLS::JlmSize(const jlm::rvsdg::type * type) } void -BaseHLS::create_node_names(jlm::rvsdg::region * r) +BaseHLS::create_node_names(rvsdg::Region * r) { for (auto & node : r->nodes) { diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index 64a400813..8c8769492 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -64,7 +64,7 @@ class BaseHLS get_hls_lambda(llvm::RvsdgModule & rm); void - create_node_names(jlm::rvsdg::region * r); + create_node_names(rvsdg::Region * r); virtual std::string get_text(llvm::RvsdgModule & rm) = 0; diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index 75d2b3c52..bb426c605 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -352,7 +352,7 @@ DotHLS::prepare_loop_out_port(hls::loop_node * ln) } std::string -DotHLS::subregion_to_dot(jlm::rvsdg::region * sr) +DotHLS::subregion_to_dot(rvsdg::Region * sr) { std::ostringstream dot; dot << "digraph G {\n"; diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp index 841119524..24130563c 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp @@ -40,7 +40,7 @@ class DotHLS : public BaseHLS prepare_loop_out_port(hls::loop_node * ln); std::string - subregion_to_dot(jlm::rvsdg::region * sr); + subregion_to_dot(rvsdg::Region * sr); int loop_ctr = 0; }; diff --git a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp index 393b4ae05..99a976748 100644 --- a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp +++ b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp @@ -81,7 +81,7 @@ RemoveUnusedInputs(hls::loop_node & loopNode) } static bool -EliminateDeadNodesInRegion(jlm::rvsdg::region & region) +EliminateDeadNodesInRegion(rvsdg::Region & region) { bool changed; bool anyChanged = false; diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 9d91958fd..cc36f8384 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -131,7 +131,7 @@ CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) } static void -ConvertGammaNodesInRegion(rvsdg::region & region); +ConvertGammaNodesInRegion(rvsdg::Region & region); static void ConvertGammaNodesInStructuralNode(rvsdg::structural_node & structuralNode) @@ -155,7 +155,7 @@ ConvertGammaNodesInStructuralNode(rvsdg::structural_node & structuralNode) } static void -ConvertGammaNodesInRegion(rvsdg::region & region) +ConvertGammaNodesInRegion(rvsdg::Region & region) { for (auto & node : rvsdg::topdown_traverser(®ion)) { diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index 07617323e..dace17626 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -44,7 +44,7 @@ ConvertThetaNode(rvsdg::ThetaNode & theta) } static void -ConvertThetaNodesInRegion(jlm::rvsdg::region & region); +ConvertThetaNodesInRegion(rvsdg::Region & region); static void ConvertThetaNodesInStructuralNode(jlm::rvsdg::structural_node & structuralNode) @@ -61,7 +61,7 @@ ConvertThetaNodesInStructuralNode(jlm::rvsdg::structural_node & structuralNode) } static void -ConvertThetaNodesInRegion(jlm::rvsdg::region & region) +ConvertThetaNodesInRegion(rvsdg::Region & region) { for (auto & node : jlm::rvsdg::topdown_traverser(®ion)) { diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index af6bfa25d..9d40330f7 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -188,7 +188,7 @@ RemoveUnusedStatesFromThetaNode(rvsdg::ThetaNode & thetaNode) } static void -RemoveUnusedStatesInRegion(rvsdg::region & region); +RemoveUnusedStatesInRegion(rvsdg::Region & region); static void RemoveUnusedStatesInStructuralNode(rvsdg::structural_node & structuralNode) @@ -214,7 +214,7 @@ RemoveUnusedStatesInStructuralNode(rvsdg::structural_node & structuralNode) } static void -RemoveUnusedStatesInRegion(rvsdg::region & region) +RemoveUnusedStatesInRegion(rvsdg::Region & region) { for (auto & node : rvsdg::topdown_traverser(®ion)) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp index ad0986c24..25dfff69b 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp @@ -12,7 +12,7 @@ namespace jlm::hls { void -add_buffers(jlm::rvsdg::region * region, bool pass_through) +add_buffers(rvsdg::Region * region, bool pass_through) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-buffers.hpp b/jlm/hls/backend/rvsdg2rhls/add-buffers.hpp index d5e50bc35..7b126652e 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-buffers.hpp +++ b/jlm/hls/backend/rvsdg2rhls/add-buffers.hpp @@ -13,7 +13,7 @@ namespace jlm::hls { void -add_buffers(rvsdg::region * region, bool pass_through); +add_buffers(rvsdg::Region * region, bool pass_through); void add_buffers(llvm::RvsdgModule & rm, bool pass_through); diff --git a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp index 39087dd84..8996bcdda 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp @@ -12,7 +12,7 @@ namespace jlm::hls { void -add_forks(jlm::rvsdg::region * region) +add_forks(rvsdg::Region * region) { for (size_t i = 0; i < region->narguments(); ++i) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-forks.hpp b/jlm/hls/backend/rvsdg2rhls/add-forks.hpp index 39d8847cb..3f3ddcc02 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-forks.hpp +++ b/jlm/hls/backend/rvsdg2rhls/add-forks.hpp @@ -19,7 +19,7 @@ namespace jlm::hls * /param region The region for which to insert forks. */ void -add_forks(rvsdg::region * region); +add_forks(rvsdg::Region * region); /** * Adds a fork for every output that has multiple consumers (node inputs). The original output is diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 20b9164a6..0b41139c7 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -14,7 +14,7 @@ namespace jlm::hls { void -add_prints(jlm::rvsdg::region * region) +add_prints(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -68,7 +68,7 @@ convert_prints(llvm::RvsdgModule & rm) } jlm::rvsdg::output * -route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) +route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) { JLM_ASSERT(region != nullptr); @@ -100,7 +100,7 @@ route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) void convert_prints( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::output * printf, const std::shared_ptr & functionType) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.hpp b/jlm/hls/backend/rvsdg2rhls/add-prints.hpp index a8eedb659..0311e2359 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.hpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.hpp @@ -13,7 +13,7 @@ namespace jlm::hls { void -add_prints(rvsdg::region * region); +add_prints(rvsdg::Region * region); void add_prints(llvm::RvsdgModule & rm); @@ -23,12 +23,12 @@ convert_prints(llvm::RvsdgModule & rm); void convert_prints( - rvsdg::region * region, + rvsdg::Region * region, rvsdg::output * printf, const std::shared_ptr & functionType); rvsdg::output * -route_to_region(rvsdg::output * output, rvsdg::region * region); +route_to_region(rvsdg::output * output, rvsdg::Region * region); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp b/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp index 5ab434c25..b4afc740e 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp @@ -11,7 +11,7 @@ namespace jlm::hls { void -add_sinks(jlm::rvsdg::region * region) +add_sinks(rvsdg::Region * region) { for (size_t i = 0; i < region->narguments(); ++i) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-sinks.hpp b/jlm/hls/backend/rvsdg2rhls/add-sinks.hpp index 81513ab84..92880d5d1 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-sinks.hpp +++ b/jlm/hls/backend/rvsdg2rhls/add-sinks.hpp @@ -13,7 +13,7 @@ namespace jlm::hls { void -add_sinks(rvsdg::region * region); +add_sinks(rvsdg::Region * region); void add_sinks(jlm::llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 1e04418f3..ec14d7917 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -15,7 +15,7 @@ namespace jlm::hls { jlm::rvsdg::output * -get_trigger(jlm::rvsdg::region * region) +get_trigger(rvsdg::Region * region) { for (size_t i = 0; i < region->narguments(); ++i) { @@ -84,7 +84,7 @@ add_lambda_argument(llvm::lambda::node * ln, std::shared_ptr(region->nodes.begin().ptr()); bool changed; diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.hpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.hpp index 37ade11ea..0851c7e1b 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.hpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.hpp @@ -11,7 +11,7 @@ namespace jlm::hls { void -dae_conv(jlm::rvsdg::region * region); +dae_conv(rvsdg::Region * region); void dae_conv(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 30672e96e..ba80c7b84 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -71,7 +71,7 @@ distribute_constant(const rvsdg::simple_op & op, rvsdg::simple_output * out) } void -hls::distribute_constants(rvsdg::region * region) +hls::distribute_constants(rvsdg::Region * region) { // push constants down as far as possible, since this is cheaper than having forks and potentially // buffers for them diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.hpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.hpp index 083f58062..9f9c05dd3 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.hpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.hpp @@ -15,7 +15,7 @@ namespace jlm namespace hls { void -distribute_constants(rvsdg::region * region); +distribute_constants(rvsdg::Region * region); void distribute_constants(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 5fc17f2f1..ac2aab97d 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -138,7 +138,7 @@ instrument_ref(llvm::RvsdgModule & rm) void instrument_ref( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::output * ioState, jlm::rvsdg::output * load_func, const std::shared_ptr & loadFunctionType, diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.hpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.hpp index d7f6d0e38..03149bf5f 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.hpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.hpp @@ -17,7 +17,7 @@ instrument_ref(llvm::RvsdgModule & rm); void instrument_ref( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::output * ioState, jlm::rvsdg::output * load_func, const std::shared_ptr & loadFunctionType, diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 414ed8cfa..5a8945fe8 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -19,7 +19,7 @@ #include jlm::rvsdg::output * -jlm::hls::route_response(jlm::rvsdg::region * target, jlm::rvsdg::output * response) +jlm::hls::route_response(rvsdg::Region * target, jlm::rvsdg::output * response) { if (response->region() == target) { @@ -37,7 +37,7 @@ jlm::hls::route_response(jlm::rvsdg::region * target, jlm::rvsdg::output * respo } jlm::rvsdg::output * -jlm::hls::route_request(jlm::rvsdg::region * target, jlm::rvsdg::output * request) +jlm::hls::route_request(rvsdg::Region * target, jlm::rvsdg::output * request) { if (request->region() == target) { @@ -324,7 +324,7 @@ replace_store(jlm::rvsdg::simple_node * orig) void gather_mem_nodes( - jlm::rvsdg::region * region, + jlm::rvsdg::Region * region, std::vector & load_nodes, std::vector & store_nodes, std::vector & decouple_nodes, diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp index b2acb2f17..c848eba21 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp @@ -53,10 +53,10 @@ jlm::rvsdg::simple_node * ReplaceStore(jlm::rvsdg::substitution_map & smap, const jlm::rvsdg::simple_node * originalStore); jlm::rvsdg::output * -route_response(jlm::rvsdg::region * target, jlm::rvsdg::output * response); +route_response(rvsdg::Region * target, jlm::rvsdg::output * response); jlm::rvsdg::output * -route_request(jlm::rvsdg::region * target, jlm::rvsdg::output * request); +route_request(rvsdg::Region * target, jlm::rvsdg::output * request); } // namespace jlm::hls diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index fde25af0d..2e64bf0f5 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -28,7 +28,7 @@ jlm::hls::mem_queue(llvm::RvsdgModule & rm) } void -dump_xml(const jlm::rvsdg::region * region, const std::string & file_name) +dump_xml(const jlm::rvsdg::Region * region, const std::string & file_name) { auto xml_file = fopen(file_name.c_str(), "w"); jlm::rvsdg::view_xml(region, xml_file); @@ -133,11 +133,11 @@ find_loop_output(jlm::rvsdg::structural_input * sti) JLM_UNREACHABLE("This should never happen"); } -std::deque -get_parent_regions(jlm::rvsdg::region * region) +std::deque +get_parent_regions(jlm::rvsdg::Region * region) { - std::deque regions; - jlm::rvsdg::region * target_region = region; + std::deque regions; + jlm::rvsdg::Region * target_region = region; while (!dynamic_cast(&target_region->node()->operation())) { regions.push_front(target_region); @@ -147,14 +147,14 @@ get_parent_regions(jlm::rvsdg::region * region) } jlm::rvsdg::output * -route_to_region(jlm::rvsdg::region * target, jlm::rvsdg::output * out) +route_to_region(jlm::rvsdg::Region * target, jlm::rvsdg::output * out) { // create lists of nested regions - std::deque target_regions = get_parent_regions(target); - std::deque out_regions = get_parent_regions(out->region()); + std::deque target_regions = get_parent_regions(target); + std::deque out_regions = get_parent_regions(out->region()); JLM_ASSERT(target_regions.front() == out_regions.front()); // remove common ancestor regions - jlm::rvsdg::region * common_region = nullptr; + jlm::rvsdg::Region * common_region = nullptr; while (!target_regions.empty() && !out_regions.empty() && target_regions.front() == out_regions.front()) { @@ -520,7 +520,7 @@ process_loops(jlm::rvsdg::output * state_edge) } void -jlm::hls::mem_queue(jlm::rvsdg::region * region) +jlm::hls::mem_queue(jlm::rvsdg::Region * region) { auto lambda = dynamic_cast(region->nodes.first()); auto state_arg = GetMemoryStateArgument(*lambda); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.hpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.hpp index ec0cd4d95..88d9737cd 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.hpp @@ -12,7 +12,7 @@ namespace jlm::hls { void -mem_queue(jlm::rvsdg::region * region); +mem_queue(rvsdg::Region * region); void mem_queue(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 2cb56ead7..cc5896362 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -67,7 +67,7 @@ GetMemoryStateResult(const llvm::lambda::node & lambda) } void -gather_mem_nodes(jlm::rvsdg::region * region, std::vector & mem_nodes) +gather_mem_nodes(rvsdg::Region * region, std::vector & mem_nodes) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -91,7 +91,7 @@ gather_mem_nodes(jlm::rvsdg::region * region, std::vectorregion() == target) { @@ -131,7 +131,7 @@ route_through(jlm::rvsdg::region * target, jlm::rvsdg::output * response) /* assign each load and store its own state edge. */ void -mem_sep_independent(jlm::rvsdg::region * region) +mem_sep_independent(rvsdg::Region * region) { auto lambda = dynamic_cast(region->nodes.begin().ptr()); auto lambda_region = lambda->subregion(); @@ -275,7 +275,7 @@ trace_edge( /* assign each pointer argument its own state edge. */ void -mem_sep_argument(jlm::rvsdg::region * region) +mem_sep_argument(rvsdg::Region * region) { auto lambda = dynamic_cast(region->nodes.begin().ptr()); auto lambda_region = lambda->subregion(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp index bd212835b..16ce2be7d 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.hpp @@ -12,13 +12,13 @@ namespace jlm::hls { void -mem_sep_independent(jlm::rvsdg::region * region); +mem_sep_independent(rvsdg::Region * region); void mem_sep_independent(llvm::RvsdgModule & rm); void -mem_sep_argument(jlm::rvsdg::region * region); +mem_sep_argument(rvsdg::Region * region); void mem_sep_argument(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp index f97841ce1..9abe4a5bc 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp @@ -25,7 +25,7 @@ memstate_conv(llvm::RvsdgModule & rm) } void -memstate_conv(jlm::rvsdg::region * region) +memstate_conv(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.hpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.hpp index 2bc00d1ac..03922711b 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.hpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.hpp @@ -12,7 +12,7 @@ namespace jlm::hls { void -memstate_conv(jlm::rvsdg::region * region); +memstate_conv(rvsdg::Region * region); void memstate_conv(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 1fa750bfc..95863b924 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -193,7 +193,7 @@ eliminate_gamma_eol(rvsdg::GammaNode * gamma) } void -merge_gamma(jlm::rvsdg::region * region) +merge_gamma(rvsdg::Region * region) { bool changed = true; while (changed) diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp index dd5475889..2abbaf3c3 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.hpp @@ -13,7 +13,7 @@ namespace jlm::hls { void -merge_gamma(jlm::rvsdg::region * region); +merge_gamma(rvsdg::Region * region); void merge_gamma(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index 78a36ec41..fa096103d 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -33,7 +33,7 @@ eliminate_buf(jlm::rvsdg::output * o) } void -remove_redundant_buf(jlm::rvsdg::region * region) +remove_redundant_buf(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.hpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.hpp index 9e194f29f..3b12a8c1d 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.hpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.hpp @@ -17,7 +17,7 @@ void remove_redundant_buf(llvm::RvsdgModule & rm); void -remove_redundant_buf(jlm::rvsdg::region * region); +remove_redundant_buf(rvsdg::Region * region); } diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index e35f333d7..bc4ebfb2b 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -13,7 +13,7 @@ namespace jlm::hls { void -remove_unused_state(jlm::rvsdg::region * region, bool can_remove_arguments) +remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) { // process children first so that unnecessary users get removed for (auto & node : jlm::rvsdg::topdown_traverser(region)) diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp index e0a11ad54..6a1d721db 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp @@ -33,7 +33,7 @@ void remove_unused_state(llvm::RvsdgModule & rm); void -remove_unused_state(jlm::rvsdg::region * region, bool can_remove_arguments = true); +remove_unused_state(rvsdg::Region * region, bool can_remove_arguments = true); } // namespace jlm::hls diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp index c7c6596e1..06b42857c 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp @@ -268,7 +268,7 @@ dead_loop(jlm::rvsdg::node * ndmux_node) } bool -dne(jlm::rvsdg::region * sr) +dne(rvsdg::Region * sr) { bool any_changed = false; bool changed; diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.hpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.hpp index b3c904738..3bf472a37 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.hpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.hpp @@ -25,7 +25,7 @@ bool remove_unused_loop_inputs(hls::loop_node * ln); bool -dne(jlm::rvsdg::region * sr); +dne(rvsdg::Region * sr); void dne(llvm::RvsdgModule & rm); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index c4468521f..4d60fab56 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -134,7 +134,7 @@ trace_call(jlm::rvsdg::input * input) } void -inline_calls(jlm::rvsdg::region * region) +inline_calls(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -176,7 +176,7 @@ inline_calls(jlm::rvsdg::region * region) size_t alloca_cnt = 0; void -convert_alloca(jlm::rvsdg::region * region) +convert_alloca(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 28dd71155..cf627fdfa 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -38,13 +38,13 @@ bundletype::ComputeHash() const noexcept EntryArgument::~EntryArgument() noexcept = default; EntryArgument & -EntryArgument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +EntryArgument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { return EntryArgument::Create(region, *input, Type()); } backedge_argument & -backedge_argument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) +backedge_argument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) { JLM_ASSERT(input == nullptr); return *backedge_argument::create(®ion, Type()); @@ -99,7 +99,7 @@ loop_node::add_loopconst(jlm::rvsdg::output * origin) } loop_node * -loop_node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +loop_node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const { auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); @@ -165,7 +165,7 @@ loop_node::add_backedge(std::shared_ptr type) } loop_node * -loop_node::create(jlm::rvsdg::region * parent, bool init) +loop_node::create(rvsdg::Region * parent, bool init) { auto ln = new loop_node(parent); if (init) diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index b105bde1e..001040623 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -619,7 +619,7 @@ class EntryArgument : public rvsdg::RegionArgument private: EntryArgument( - rvsdg::region & region, + rvsdg::Region & region, rvsdg::structural_input & input, const std::shared_ptr type) : rvsdg::RegionArgument(®ion, &input, std::move(type)) @@ -627,13 +627,13 @@ class EntryArgument : public rvsdg::RegionArgument public: EntryArgument & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; // FIXME: This should not be public, but we currently still have some transformations that use // this one. Make it eventually private. static EntryArgument & Create( - rvsdg::region & region, + rvsdg::Region & region, rvsdg::structural_input & input, const std::shared_ptr type) { @@ -658,18 +658,16 @@ class backedge_argument : public rvsdg::RegionArgument } backedge_argument & - Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; private: - backedge_argument( - jlm::rvsdg::region * region, - const std::shared_ptr & type) + backedge_argument(rvsdg::Region * region, const std::shared_ptr & type) : rvsdg::RegionArgument(region, nullptr, type), result_(nullptr) {} static backedge_argument * - create(jlm::rvsdg::region * region, std::shared_ptr type) + create(rvsdg::Region * region, std::shared_ptr type) { auto argument = new backedge_argument(region, std::move(type)); region->append_argument(argument); @@ -752,7 +750,7 @@ class loop_node final : public jlm::rvsdg::structural_node {} private: - inline loop_node(jlm::rvsdg::region * parent) + inline loop_node(rvsdg::Region * parent) : structural_node(loop_op(), parent, 1) {} @@ -760,9 +758,9 @@ class loop_node final : public jlm::rvsdg::structural_node public: static loop_node * - create(jlm::rvsdg::region * parent, bool init = true); + create(rvsdg::Region * parent, bool init = true); - inline jlm::rvsdg::region * + rvsdg::Region * subregion() const noexcept { return structural_node::subregion(0); @@ -795,7 +793,7 @@ class loop_node final : public jlm::rvsdg::structural_node add_loopconst(jlm::rvsdg::output * origin); virtual loop_node * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; }; class bundletype final : public jlm::rvsdg::valuetype @@ -1319,7 +1317,7 @@ class mem_req_op final : public jlm::rvsdg::simple_op const std::vector & load_operands, const std::vector> & loadTypes, const std::vector & store_operands, - jlm::rvsdg::region * region) + rvsdg::Region * region) { // Stores have both addr and data operand // But we are only interested in the data operand type @@ -1479,7 +1477,7 @@ class local_mem_op final : public jlm::rvsdg::simple_op } static std::vector - create(std::shared_ptr at, jlm::rvsdg::region * region) + create(std::shared_ptr at, rvsdg::Region * region) { local_mem_op op(std::move(at)); return jlm::rvsdg::simple_node::create_normalized(region, op, {}); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index d07b8f1b4..96ca5bd18 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -287,7 +287,7 @@ mark_arguments(jlm::rvsdg::structural_input * i1, jlm::rvsdg::structural_input * } static void -mark(jlm::rvsdg::region *, cnectx &); +mark(jlm::rvsdg::Region *, cnectx &); static void mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) @@ -464,7 +464,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) } static void -mark(jlm::rvsdg::region * region, cnectx & ctx) +mark(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -494,14 +494,14 @@ divert_outputs(jlm::rvsdg::node * node, cnectx & ctx) } static void -divert_arguments(jlm::rvsdg::region * region, cnectx & ctx) +divert_arguments(rvsdg::Region * region, cnectx & ctx) { for (size_t n = 0; n < region->narguments(); n++) divert_users(region->argument(n), ctx); } static void -divert(jlm::rvsdg::region *, cnectx &); +divert(rvsdg::Region *, cnectx &); static void divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) @@ -587,7 +587,7 @@ divert(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert(jlm::rvsdg::region * region, cnectx & ctx) +divert(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 56e6d3942..62ab85cfe 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -309,7 +309,7 @@ simple_node_to_dot(jlm::rvsdg::simple_node * simpleNode) } std::string -region_to_dot(jlm::rvsdg::region * region) +region_to_dot(rvsdg::Region * region) { std::ostringstream dot; dot << "subgraph cluster_reg" << hex((intptr_t)region) << " {\n"; @@ -394,7 +394,7 @@ region_to_dot(jlm::rvsdg::region * region) } std::string -to_dot(jlm::rvsdg::region * region) +to_dot(rvsdg::Region * region) { std::ostringstream dot; dot << "digraph G {\n"; @@ -404,7 +404,7 @@ to_dot(jlm::rvsdg::region * region) } void -view_dot(jlm::rvsdg::region * region, FILE * out) +view_dot(rvsdg::Region * region, FILE * out) { fputs(jlm::hls::to_dot(region).c_str(), out); fflush(out); diff --git a/jlm/hls/util/view.hpp b/jlm/hls/util/view.hpp index 86074a754..d7e4dd6f2 100644 --- a/jlm/hls/util/view.hpp +++ b/jlm/hls/util/view.hpp @@ -14,13 +14,13 @@ namespace jlm::hls { std::string -region_to_dot(jlm::rvsdg::region * region); +region_to_dot(rvsdg::Region * region); std::string -to_dot(jlm::rvsdg::region * region); +to_dot(rvsdg::Region * region); void -view_dot(jlm::rvsdg::region * region, FILE * out); +view_dot(rvsdg::Region * region, FILE * out); void dump_dot(llvm::RvsdgModule & rvsdgModule, const std::string & file_name); diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp index 66c65bd23..d3ea2a7fc 100644 --- a/jlm/llvm/backend/dot/DotWriter.cpp +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -133,7 +133,7 @@ AttachNodeOutput( * If the type does not already exist in the type graph, it is created. */ static void -CreateGraphNodes(util::Graph & graph, rvsdg::region & region, util::Graph * typeGraph) +CreateGraphNodes(util::Graph & graph, rvsdg::Region & region, util::Graph * typeGraph) { graph.SetProgramObject(region); @@ -189,7 +189,7 @@ CreateGraphNodes(util::Graph & graph, rvsdg::region & region, util::Graph * type } util::Graph & -WriteGraphs(util::GraphWriter & writer, rvsdg::region & region, bool emitTypeGraph) +WriteGraphs(util::GraphWriter & writer, rvsdg::Region & region, bool emitTypeGraph) { util::Graph * typeGraph = nullptr; if (emitTypeGraph) diff --git a/jlm/llvm/backend/dot/DotWriter.hpp b/jlm/llvm/backend/dot/DotWriter.hpp index 021b07bd1..7716be25a 100644 --- a/jlm/llvm/backend/dot/DotWriter.hpp +++ b/jlm/llvm/backend/dot/DotWriter.hpp @@ -23,7 +23,7 @@ namespace jlm::llvm::dot * @return a reference to the top-level graph corresponding to the region */ util::Graph & -WriteGraphs(util::GraphWriter & writer, rvsdg::region & region, bool emitTypeGraph); +WriteGraphs(util::GraphWriter & writer, rvsdg::Region & region, bool emitTypeGraph); } #endif // JLM_LLVM_BACKEND_DOT_DOTWRITER_HPP diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 37496553a..f3ff729f9 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -102,7 +102,7 @@ static void convert_node(const rvsdg::node & node, context & ctx); static inline void -convert_region(rvsdg::region & region, context & ctx) +convert_region(rvsdg::Region & region, context & ctx) { auto entry = basic_block::create(*ctx.cfg()); ctx.lpbb()->add_outedge(entry); diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 67504c209..566ab3c59 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -61,7 +61,7 @@ class RegionalizedVariableMap final JLM_ASSERT(NumRegions() == 0); } - RegionalizedVariableMap(const ipgraph_module & interProceduralGraphModule, rvsdg::region & region) + RegionalizedVariableMap(const ipgraph_module & interProceduralGraphModule, rvsdg::Region & region) : InterProceduralGraphModule_(interProceduralGraphModule) { PushRegion(region); @@ -88,14 +88,14 @@ class RegionalizedVariableMap final return VariableMap(NumRegions() - 1); } - rvsdg::region & + rvsdg::Region & GetRegion(size_t n) noexcept { JLM_ASSERT(n < NumRegions()); return *RegionStack_[n]; } - rvsdg::region & + rvsdg::Region & GetTopRegion() noexcept { JLM_ASSERT(NumRegions() > 0); @@ -103,7 +103,7 @@ class RegionalizedVariableMap final } void - PushRegion(rvsdg::region & region) + PushRegion(rvsdg::Region & region) { VariableMapStack_.push_back(std::make_unique()); RegionStack_.push_back(®ion); @@ -125,7 +125,7 @@ class RegionalizedVariableMap final private: const ipgraph_module & InterProceduralGraphModule_; std::vector> VariableMapStack_; - std::vector RegionStack_; + std::vector RegionStack_; }; class ControlFlowRestructuringStatistics final : public util::Statistics @@ -461,7 +461,7 @@ requiresExport(const ipgraph_node & ipgNode) static void ConvertAssignment( const llvm::tac & threeAddressCode, - rvsdg::region & region, + rvsdg::Region & region, llvm::VariableMap & variableMap) { JLM_ASSERT(is(threeAddressCode.operation())); @@ -474,7 +474,7 @@ ConvertAssignment( static void ConvertSelect( const llvm::tac & threeAddressCode, - rvsdg::region & region, + rvsdg::Region & region, llvm::VariableMap & variableMap) { JLM_ASSERT(is(threeAddressCode.operation())); @@ -494,7 +494,7 @@ ConvertSelect( static void ConvertBranch( const llvm::tac & threeAddressCode, - rvsdg::region & region, + rvsdg::Region & region, llvm::VariableMap & variableMap) { JLM_ASSERT(is(threeAddressCode.operation())); @@ -505,7 +505,7 @@ ConvertBranch( template static void -Convert(const llvm::tac & threeAddressCode, rvsdg::region & region, llvm::VariableMap & variableMap) +Convert(const llvm::tac & threeAddressCode, rvsdg::Region & region, llvm::VariableMap & variableMap) { std::vector operands; for (size_t n = 0; n < threeAddressCode.noperands(); n++) @@ -528,7 +528,7 @@ Convert(const llvm::tac & threeAddressCode, rvsdg::region & region, llvm::Variab static void ConvertThreeAddressCode( const llvm::tac & threeAddressCode, - rvsdg::region & region, + rvsdg::Region & region, llvm::VariableMap & variableMap) { if (is(&threeAddressCode)) @@ -581,7 +581,7 @@ ConvertThreeAddressCode( static void ConvertBasicBlock( const taclist & basicBlock, - rvsdg::region & region, + rvsdg::Region & region, llvm::VariableMap & variableMap) { for (const auto & threeAddressCode : basicBlock) @@ -1001,7 +1001,7 @@ ConvertFunctionNode( static rvsdg::output * ConvertDataNodeInitialization( const data_node_init & init, - rvsdg::region & region, + rvsdg::Region & region, RegionalizedVariableMap & regionalizedVariableMap) { auto & variableMap = regionalizedVariableMap.GetTopVariableMap(); diff --git a/jlm/llvm/ir/RvsdgModule.cpp b/jlm/llvm/ir/RvsdgModule.cpp index 7d253d1fc..c927f43c8 100644 --- a/jlm/llvm/ir/RvsdgModule.cpp +++ b/jlm/llvm/ir/RvsdgModule.cpp @@ -9,7 +9,7 @@ namespace jlm::llvm { GraphImport & -GraphImport::Copy(rvsdg::region & region, rvsdg::structural_input * input) +GraphImport::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { return GraphImport::Create(*region.graph(), ValueType(), Name(), Linkage()); } diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 3f161cb3b..857673879 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -45,7 +45,7 @@ class GraphImport final : public rvsdg::GraphImport } GraphImport & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; static GraphImport & Create( diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 74132a3b9..f960636bc 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -81,7 +81,7 @@ LoadNonVolatileNode::CopyWithNewMemoryStates( } rvsdg::node * -LoadNonVolatileNode::copy(rvsdg::region * region, const std::vector & operands) +LoadNonVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); @@ -157,7 +157,7 @@ LoadVolatileNode::CopyWithNewMemoryStates(const std::vector & m } rvsdg::node * -LoadVolatileNode::copy(rvsdg::region * region, const std::vector & operands) const +LoadVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); } @@ -631,7 +631,7 @@ load_normal_form::normalize_node(rvsdg::node * node) const std::vector load_normal_form::normalized_create( - rvsdg::region * region, + rvsdg::Region * region, const rvsdg::simple_op & op, const std::vector & operands) const { diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index d472269a3..73da8730d 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -32,7 +32,7 @@ class load_normal_form final : public rvsdg::simple_normal_form virtual std::vector normalized_create( - rvsdg::region * region, + rvsdg::Region * region, const rvsdg::simple_op & op, const std::vector & operands) const override; @@ -259,7 +259,7 @@ class LoadNode : public rvsdg::simple_node { protected: LoadNode( - rvsdg::region & region, + rvsdg::Region & region, const LoadOperation & operation, const std::vector & operands) : simple_node(®ion, operation, operands) @@ -357,7 +357,7 @@ class LoadVolatileNode final : public LoadNode { private: LoadVolatileNode( - rvsdg::region & region, + rvsdg::Region & region, const LoadVolatileOperation & operation, const std::vector & operands) : LoadNode(region, operation, operands) @@ -365,7 +365,7 @@ class LoadVolatileNode final : public LoadNode public: rvsdg::node * - copy(rvsdg::region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; [[nodiscard]] const LoadVolatileOperation & GetOperation() const noexcept override; @@ -397,7 +397,7 @@ class LoadVolatileNode final : public LoadNode static LoadVolatileNode & CreateNode( - rvsdg::region & region, + rvsdg::Region & region, const LoadVolatileOperation & loadOperation, const std::vector & operands) { @@ -421,7 +421,7 @@ class LoadVolatileNode final : public LoadNode static std::vector Create( - rvsdg::region & region, + rvsdg::Region & region, const LoadVolatileOperation & loadOperation, const std::vector & operands) { @@ -510,7 +510,7 @@ class LoadNonVolatileNode final : public LoadNode { private: LoadNonVolatileNode( - rvsdg::region & region, + rvsdg::Region & region, const LoadNonVolatileOperation & operation, const std::vector & operands) : LoadNode(region, operation, operands) @@ -530,7 +530,7 @@ class LoadNonVolatileNode final : public LoadNode CopyWithNewMemoryStates(const std::vector & memoryStates) const override; rvsdg::node * - copy(rvsdg::region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; static std::vector Create( @@ -558,7 +558,7 @@ class LoadNonVolatileNode final : public LoadNode static std::vector Create( - rvsdg::region & region, + rvsdg::Region & region, const LoadNonVolatileOperation & loadOperation, const std::vector & operands) { @@ -567,7 +567,7 @@ class LoadNonVolatileNode final : public LoadNode static LoadNonVolatileNode & CreateNode( - rvsdg::region & region, + rvsdg::Region & region, const LoadNonVolatileOperation & loadOperation, const std::vector & operands) { diff --git a/jlm/llvm/ir/operators/MemoryStateOperations.hpp b/jlm/llvm/ir/operators/MemoryStateOperations.hpp index 4a9777664..2212b5ee2 100644 --- a/jlm/llvm/ir/operators/MemoryStateOperations.hpp +++ b/jlm/llvm/ir/operators/MemoryStateOperations.hpp @@ -173,7 +173,7 @@ class LambdaExitMemoryStateMergeOperation final : public MemoryStateOperation copy() const override; static rvsdg::output & - Create(rvsdg::region & region, const std::vector & operands) + Create(rvsdg::Region & region, const std::vector & operands) { LambdaExitMemoryStateMergeOperation operation(operands.size()); return *rvsdg::simple_node::create_normalized(®ion, operation, operands)[0]; @@ -209,7 +209,7 @@ class CallEntryMemoryStateMergeOperation final : public MemoryStateOperation copy() const override; static rvsdg::output & - Create(rvsdg::region & region, const std::vector & operands) + Create(rvsdg::Region & region, const std::vector & operands) { CallEntryMemoryStateMergeOperation operation(operands.size()); return *rvsdg::simple_node::create_normalized(®ion, operation, operands)[0]; diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 27275f7f4..6a387d5c5 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -55,7 +55,7 @@ node::add_ctxvar(jlm::rvsdg::output * origin) } phi::node * -node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const { phi::builder pb; pb.begin(region); @@ -169,7 +169,7 @@ rvargument::~rvargument() {} rvargument & -rvargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +rvargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { JLM_ASSERT(input == nullptr); return *rvargument::create(®ion, Type()); @@ -181,7 +181,7 @@ cvargument::~cvargument() {} cvargument & -cvargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +cvargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { auto phiInput = util::AssertedCast(input); return *cvargument::create(®ion, phiInput, Type()); diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index f5eece551..50393066e 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -304,12 +304,12 @@ class node final : public jlm::rvsdg::structural_node ~node() override; private: - node(jlm::rvsdg::region * parent, const phi::operation & op) + node(rvsdg::Region * parent, const phi::operation & op) : structural_node(op, parent, 1) {} static phi::node * - create(jlm::rvsdg::region * parent, const phi::operation & op) + create(rvsdg::Region * parent, const phi::operation & op) { return new phi::node(parent, op); } @@ -375,7 +375,7 @@ class node final : public jlm::rvsdg::structural_node return rviterator(nullptr); } - jlm::rvsdg::region * + rvsdg::Region * subregion() const noexcept { return structural_node::subregion(0); @@ -486,7 +486,7 @@ class node final : public jlm::rvsdg::structural_node output(size_t n) const noexcept; virtual phi::node * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; /** * Extracts all lambda nodes from a phi node. @@ -511,14 +511,14 @@ class builder final : node_(nullptr) {} - jlm::rvsdg::region * + rvsdg::Region * subregion() const noexcept { return node_ ? node_->subregion() : nullptr; } void - begin(jlm::rvsdg::region * parent) + begin(rvsdg::Region * parent) { if (node_) return; @@ -660,7 +660,7 @@ class rvargument final : public rvsdg::RegionArgument ~rvargument() override; private: - rvargument(jlm::rvsdg::region * region, const std::shared_ptr type) + rvargument(rvsdg::Region * region, const std::shared_ptr type) : RegionArgument(region, nullptr, std::move(type)), output_(nullptr) {} @@ -676,7 +676,7 @@ class rvargument final : public rvsdg::RegionArgument operator=(rvargument &&) = delete; static rvargument * - create(jlm::rvsdg::region * region, std::shared_ptr type) + create(rvsdg::Region * region, std::shared_ptr type) { auto argument = new rvargument(region, std::move(type)); region->append_argument(argument); @@ -698,7 +698,7 @@ class rvargument final : public rvsdg::RegionArgument } rvargument & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; private: rvoutput * output_; @@ -716,10 +716,7 @@ class cvargument final : public rvsdg::RegionArgument public: ~cvargument() override; - cvargument( - jlm::rvsdg::region * region, - phi::cvinput * input, - std::shared_ptr type) + cvargument(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) : rvsdg::RegionArgument(region, input, std::move(type)) {} @@ -735,10 +732,10 @@ class cvargument final : public rvsdg::RegionArgument operator=(cvargument &&) = delete; cvargument & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; static cvargument * - create(jlm::rvsdg::region * region, phi::cvinput * input, std::shared_ptr type) + create(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) { auto argument = new cvargument(region, input, std::move(type)); region->append_argument(argument); @@ -764,7 +761,7 @@ class rvresult final : public rvsdg::RegionResult private: rvresult( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::output * origin, rvoutput * output, std::shared_ptr type) @@ -786,7 +783,7 @@ class rvresult final : public rvsdg::RegionResult static rvresult * create( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::output * origin, rvoutput * output, std::shared_ptr type) diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 04eb919c6..eb3d73ef5 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -79,7 +79,7 @@ StoreNonVolatileNode::CopyWithNewMemoryStates( } rvsdg::node * -StoreNonVolatileNode::copy(rvsdg::region * region, const std::vector & operands) +StoreNonVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); @@ -155,7 +155,7 @@ StoreVolatileNode::CopyWithNewMemoryStates(const std::vector & } rvsdg::node * -StoreVolatileNode::copy(rvsdg::region * region, const std::vector & operands) const +StoreVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); } @@ -383,7 +383,7 @@ store_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector store_normal_form::normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & ops) const { diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 9ec691d20..4699fb10c 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -32,7 +32,7 @@ class store_normal_form final : public jlm::rvsdg::simple_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & operands) const override; @@ -216,7 +216,7 @@ class StoreNode : public rvsdg::simple_node { protected: StoreNode( - rvsdg::region & region, + rvsdg::Region & region, const StoreOperation & operation, const std::vector & operands) : simple_node(®ion, operation, operands) @@ -314,7 +314,7 @@ class StoreNonVolatileNode final : public StoreNode { private: StoreNonVolatileNode( - jlm::rvsdg::region & region, + rvsdg::Region & region, const StoreNonVolatileOperation & operation, const std::vector & operands) : StoreNode(region, operation, operands) @@ -334,7 +334,7 @@ class StoreNonVolatileNode final : public StoreNode CopyWithNewMemoryStates(const std::vector & memoryStates) const override; rvsdg::node * - copy(rvsdg::region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; static std::vector Create( @@ -364,7 +364,7 @@ class StoreNonVolatileNode final : public StoreNode static std::vector Create( - rvsdg::region & region, + rvsdg::Region & region, const StoreNonVolatileOperation & storeOperation, const std::vector & operands) { @@ -373,7 +373,7 @@ class StoreNonVolatileNode final : public StoreNode static StoreNonVolatileNode & CreateNode( - rvsdg::region & region, + rvsdg::Region & region, const StoreNonVolatileOperation & storeOperation, const std::vector & operands) { @@ -485,7 +485,7 @@ class StoreVolatileOperation final : public StoreOperation class StoreVolatileNode final : public StoreNode { StoreVolatileNode( - rvsdg::region & region, + rvsdg::Region & region, const StoreVolatileOperation & operation, const std::vector & operands) : StoreNode(region, operation, operands) @@ -521,11 +521,11 @@ class StoreVolatileNode final : public StoreNode } rvsdg::node * - copy(rvsdg::region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; static StoreVolatileNode & CreateNode( - rvsdg::region & region, + rvsdg::Region & region, const StoreVolatileOperation & storeOperation, const std::vector & operands) { @@ -551,7 +551,7 @@ class StoreVolatileNode final : public StoreNode static std::vector Create( - rvsdg::region & region, + rvsdg::Region & region, const StoreVolatileOperation & loadOperation, const std::vector & operands) { diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 8bf4ac82e..636be3534 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -145,7 +145,7 @@ CallOperation::copy() const } rvsdg::node * -CallNode::copy(rvsdg::region * region, const std::vector & operands) const +CallNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); } diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index b2592af79..f4d047811 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -256,7 +256,7 @@ class CallNode final : public jlm::rvsdg::simple_node { private: CallNode( - jlm::rvsdg::region & region, + rvsdg::Region & region, const CallOperation & operation, const std::vector & operands) : simple_node(®ion, operation, operands) @@ -416,7 +416,7 @@ class CallNode final : public jlm::rvsdg::simple_node } rvsdg::node * - copy(rvsdg::region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; static std::vector Create( @@ -429,7 +429,7 @@ class CallNode final : public jlm::rvsdg::simple_node static std::vector Create( - rvsdg::region & region, + rvsdg::Region & region, const CallOperation & callOperation, const std::vector & operands) { @@ -438,7 +438,7 @@ class CallNode final : public jlm::rvsdg::simple_node static CallNode & CreateNode( - rvsdg::region & region, + rvsdg::Region & region, const CallOperation & callOperation, const std::vector & operands) { diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index 6305e798b..1af42804f 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -43,13 +43,13 @@ node::~node() {} delta::node * -node::copy(jlm::rvsdg::region * region, const std::vector & operands) const +node::copy(rvsdg::Region * region, const std::vector & operands) const { return static_cast(jlm::rvsdg::node::copy(region, operands)); } delta::node * -node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const { auto delta = Create(region, Type(), name(), linkage(), Section(), constant()); @@ -173,7 +173,7 @@ cvargument::~cvargument() {} cvargument & -cvargument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) +cvargument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) { auto deltaInput = util::AssertedCast(input); return *cvargument::create(®ion, deltaInput); diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 32015dcc6..a1ef4aa81 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -139,7 +139,7 @@ class node final : public rvsdg::structural_node ~node() override; private: - node(rvsdg::region * parent, delta::operation && op) + node(rvsdg::Region * parent, delta::operation && op) : structural_node(op, parent, 1) {} @@ -150,7 +150,7 @@ class node final : public rvsdg::structural_node ctxvar_constrange ctxvars() const; - rvsdg::region * + rvsdg::Region * subregion() const noexcept { return structural_node::subregion(0); @@ -259,10 +259,10 @@ class node final : public rvsdg::structural_node result() const noexcept; virtual delta::node * - copy(rvsdg::region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; virtual delta::node * - copy(rvsdg::region * region, rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::substitution_map & smap) const override; /** * Creates a delta node in the region \p parent with the pointer type \p type and name \p name. @@ -281,7 +281,7 @@ class node final : public rvsdg::structural_node */ static node * Create( - rvsdg::region * parent, + rvsdg::Region * parent, std::shared_ptr type, const std::string & name, const llvm::linkage & linkage, @@ -414,15 +414,15 @@ class cvargument final : public rvsdg::RegionArgument ~cvargument() override; cvargument & - Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; private: - cvargument(rvsdg::region * region, cvinput * input) + cvargument(rvsdg::Region * region, cvinput * input) : rvsdg::RegionArgument(region, input, input->Type()) {} static cvargument * - create(rvsdg::region * region, delta::cvinput * input) + create(rvsdg::Region * region, delta::cvinput * input) { auto argument = new cvargument(region, input); region->append_argument(argument); diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 24f78559c..32618d770 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -193,7 +193,7 @@ node::GetMemoryStateEntrySplit(const lambda::node & lambdaNode) noexcept lambda::node * node::create( - jlm::rvsdg::region * parent, + rvsdg::Region * parent, std::shared_ptr type, const std::string & name, const llvm::linkage & linkage, @@ -239,13 +239,13 @@ node::finalize(const std::vector & results) } lambda::node * -node::copy(jlm::rvsdg::region * region, const std::vector & operands) const +node::copy(rvsdg::Region * region, const std::vector & operands) const { return util::AssertedCast(jlm::rvsdg::node::copy(region, operands)); } lambda::node * -node::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const { auto lambda = create(region, Type(), name(), linkage(), attributes()); @@ -418,7 +418,7 @@ output::~output() = default; fctargument::~fctargument() = default; fctargument & -fctargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +fctargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { JLM_ASSERT(input == nullptr); return *fctargument::create(®ion, Type()); @@ -429,7 +429,7 @@ fctargument::Copy(rvsdg::region & region, rvsdg::structural_input * input) cvargument::~cvargument() = default; cvargument & -cvargument::Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) +cvargument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) { auto lambdaInput = util::AssertedCast(input); return *cvargument::create(®ion, lambdaInput); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 3f7f77ec7..a4425970c 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -156,7 +156,7 @@ class node final : public jlm::rvsdg::structural_node ~node() override; private: - node(jlm::rvsdg::region * parent, lambda::operation && op) + node(rvsdg::Region * parent, lambda::operation && op) : structural_node(op, parent, 1) {} @@ -179,7 +179,7 @@ class node final : public jlm::rvsdg::structural_node [[nodiscard]] fctresult_constrange fctresults() const; - [[nodiscard]] jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * subregion() const noexcept { return structural_node::subregion(0); @@ -297,11 +297,10 @@ class node final : public jlm::rvsdg::structural_node fctresult(size_t n) const noexcept; lambda::node * - copy(jlm::rvsdg::region * region, const std::vector & operands) - const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; lambda::node * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; /** * @return The memory state argument of the lambda subregion. @@ -355,7 +354,7 @@ class node final : public jlm::rvsdg::structural_node */ static node * create( - jlm::rvsdg::region * parent, + rvsdg::Region * parent, std::shared_ptr type, const std::string & name, const jlm::llvm::linkage & linkage, @@ -366,7 +365,7 @@ class node final : public jlm::rvsdg::structural_node */ static node * create( - jlm::rvsdg::region * parent, + rvsdg::Region * parent, std::shared_ptr type, const std::string & name, const jlm::llvm::linkage & linkage) @@ -527,15 +526,15 @@ class fctargument final : public rvsdg::RegionArgument } fctargument & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; private: - fctargument(jlm::rvsdg::region * region, std::shared_ptr type) + fctargument(rvsdg::Region * region, std::shared_ptr type) : rvsdg::RegionArgument(region, nullptr, std::move(type)) {} static fctargument * - create(jlm::rvsdg::region * region, std::shared_ptr type) + create(rvsdg::Region * region, std::shared_ptr type) { auto argument = new fctargument(region, std::move(type)); region->append_argument(argument); @@ -604,15 +603,15 @@ class cvargument final : public rvsdg::RegionArgument ~cvargument() override; cvargument & - Copy(rvsdg::region & region, jlm::rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; private: - cvargument(jlm::rvsdg::region * region, cvinput * input) + cvargument(rvsdg::Region * region, cvinput * input) : rvsdg::RegionArgument(region, input, input->Type()) {} static cvargument * - create(jlm::rvsdg::region * region, lambda::cvinput * input) + create(rvsdg::Region * region, lambda::cvinput * input) { auto argument = new cvargument(region, input); region->append_argument(argument); diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index 6b3f4aa89..1c0e948f3 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -470,7 +470,7 @@ class ConstantPointerNullOperation final : public jlm::rvsdg::simple_op } static jlm::rvsdg::output * - Create(jlm::rvsdg::region * region, std::shared_ptr type) + Create(rvsdg::Region * region, std::shared_ptr type) { ConstantPointerNullOperation operation(CheckAndExtractType(type)); return jlm::rvsdg::simple_node::create_normalized(region, operation, {})[0]; @@ -1039,7 +1039,7 @@ class UndefValueOperation final : public jlm::rvsdg::simple_op } static jlm::rvsdg::output * - Create(jlm::rvsdg::region & region, std::shared_ptr type) + Create(rvsdg::Region & region, std::shared_ptr type) { UndefValueOperation operation(std::move(type)); return jlm::rvsdg::simple_node::create_normalized(®ion, operation, {})[0]; @@ -1120,7 +1120,7 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op } static jlm::rvsdg::output * - Create(jlm::rvsdg::region * region, const std::shared_ptr & type) + Create(rvsdg::Region * region, const std::shared_ptr & type) { auto valueType = CheckAndConvertType(type); @@ -1470,7 +1470,7 @@ class valist_op final : public jlm::rvsdg::simple_op } static rvsdg::output * - Create(rvsdg::region & region, const std::vector & operands) + Create(rvsdg::Region & region, const std::vector & operands) { std::vector> operandTypes; operandTypes.reserve(operands.size()); @@ -1606,7 +1606,7 @@ class ConstantStruct final : public jlm::rvsdg::simple_op static rvsdg::output & Create( - rvsdg::region & region, + rvsdg::Region & region, const std::vector & operands, std::shared_ptr resultType) { @@ -1945,7 +1945,7 @@ class ConstantAggregateZero final : public jlm::rvsdg::simple_op } static jlm::rvsdg::output * - Create(jlm::rvsdg::region & region, std::shared_ptr type) + Create(rvsdg::Region & region, std::shared_ptr type) { ConstantAggregateZero operation(std::move(type)); return jlm::rvsdg::simple_node::create_normalized(®ion, operation, {})[0]; diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 1bc58ed22..c970fad32 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -142,7 +142,7 @@ DeadNodeElimination::~DeadNodeElimination() noexcept = default; DeadNodeElimination::DeadNodeElimination() = default; void -DeadNodeElimination::run(jlm::rvsdg::region & region) +DeadNodeElimination::run(rvsdg::Region & region) { Context_ = Context::Create(); @@ -175,7 +175,7 @@ DeadNodeElimination::run(RvsdgModule & module, jlm::util::StatisticsCollector & } void -DeadNodeElimination::MarkRegion(const jlm::rvsdg::region & region) +DeadNodeElimination::MarkRegion(const rvsdg::Region & region) { for (size_t n = 0; n < region.nresults(); n++) { @@ -309,7 +309,7 @@ DeadNodeElimination::SweepRvsdg(jlm::rvsdg::graph & rvsdg) const } void -DeadNodeElimination::SweepRegion(jlm::rvsdg::region & region) const +DeadNodeElimination::SweepRegion(rvsdg::Region & region) const { region.prune(false); diff --git a/jlm/llvm/opt/DeadNodeElimination.hpp b/jlm/llvm/opt/DeadNodeElimination.hpp index 998cb3c27..388c939b3 100644 --- a/jlm/llvm/opt/DeadNodeElimination.hpp +++ b/jlm/llvm/opt/DeadNodeElimination.hpp @@ -72,14 +72,14 @@ class DeadNodeElimination final : public optimization operator=(DeadNodeElimination &&) = delete; void - run(jlm::rvsdg::region & region); + run(rvsdg::Region & region); void run(RvsdgModule & module, jlm::util::StatisticsCollector & statisticsCollector) override; private: void - MarkRegion(const jlm::rvsdg::region & region); + MarkRegion(const rvsdg::Region & region); void MarkOutput(const jlm::rvsdg::output & output); @@ -88,7 +88,7 @@ class DeadNodeElimination final : public optimization SweepRvsdg(jlm::rvsdg::graph & rvsdg) const; void - SweepRegion(jlm::rvsdg::region & region) const; + SweepRegion(rvsdg::Region & region) const; void SweepStructuralNode(jlm::rvsdg::structural_node & node) const; diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 312cbb725..33749d293 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -92,7 +92,7 @@ InvariantValueRedirection::RedirectInRootRegion(rvsdg::graph & rvsdg) } void -InvariantValueRedirection::RedirectInRegion(rvsdg::region & region) +InvariantValueRedirection::RedirectInRegion(rvsdg::Region & region) { auto isGammaNode = is(region.node()); auto isThetaNode = is(region.node()); diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index 72b144841..73e56767a 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -61,7 +61,7 @@ class InvariantValueRedirection final : public optimization RedirectInRootRegion(rvsdg::graph & rvsdg); static void - RedirectInRegion(rvsdg::region & region); + RedirectInRegion(rvsdg::Region & region); static void RedirectInSubregions(rvsdg::structural_node & structuralNode); diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index d57e16b7c..6a1da32f5 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -50,7 +50,7 @@ RvsdgTreePrinter::run(RvsdgModule & rvsdgModule, util::StatisticsCollector & sta statistics->Start(); auto annotationMap = ComputeAnnotationMap(rvsdgModule.Rvsdg()); - auto tree = rvsdg::region::ToTree(*rvsdgModule.Rvsdg().root(), annotationMap); + auto tree = rvsdg::Region::ToTree(*rvsdgModule.Rvsdg().root(), annotationMap); WriteTreeToFile(rvsdgModule, tree); statistics->Stop(); @@ -92,7 +92,7 @@ RvsdgTreePrinter::AnnotateNumRvsdgNodes( { static std::string_view label("NumRvsdgNodes"); - std::function annotateRegion = [&](const rvsdg::region & region) + std::function annotateRegion = [&](const rvsdg::Region & region) { for (auto & node : region.nodes) { @@ -128,7 +128,7 @@ RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( std::string_view inputLabel("NumMemoryStateTypeInputs"); std::string_view outputLabel("NumMemoryStateTypeOutputs"); - std::function annotateRegion = [&](const rvsdg::region & region) + std::function annotateRegion = [&](const rvsdg::Region & region) { size_t numMemoryStateArguments = 0; for (size_t n = 0; n < region.narguments(); n++) diff --git a/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp index 9994db7fc..4894a626e 100644 --- a/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp @@ -42,13 +42,13 @@ class AgnosticMemoryNodeProvisioning final : public MemoryNodeProvisioning } [[nodiscard]] const util::HashSet & - GetRegionEntryNodes(const rvsdg::region & region) const override + GetRegionEntryNodes(const rvsdg::Region & region) const override { return MemoryNodes_; } [[nodiscard]] const util::HashSet & - GetRegionExitNodes(const rvsdg::region & region) const override + GetRegionExitNodes(const rvsdg::Region & region) const override { return MemoryNodes_; } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 6888c38ee..e439d1d05 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -1006,7 +1006,7 @@ Andersen::AnalyzeTheta(const rvsdg::ThetaNode & theta) } void -Andersen::AnalyzeRegion(rvsdg::region & region) +Andersen::AnalyzeRegion(rvsdg::Region & region) { // Check that all region arguments of pointing types have PointerObjects for (size_t i = 0; i < region.narguments(); i++) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index 7c01a329d..e37c326a1 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -337,7 +337,7 @@ class Andersen final : public AliasAnalysis private: void - AnalyzeRegion(rvsdg::region & region); + AnalyzeRegion(rvsdg::Region & region); void AnalyzeSimpleNode(const rvsdg::simple_node & node); diff --git a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp index c67eab4fa..eea1ba9bf 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryNodeProvisioning.hpp @@ -30,10 +30,10 @@ class MemoryNodeProvisioning GetPointsToGraph() const noexcept = 0; [[nodiscard]] virtual const jlm::util::HashSet & - GetRegionEntryNodes(const jlm::rvsdg::region & region) const = 0; + GetRegionEntryNodes(const rvsdg::Region & region) const = 0; [[nodiscard]] virtual const jlm::util::HashSet & - GetRegionExitNodes(const jlm::rvsdg::region & region) const = 0; + GetRegionExitNodes(const rvsdg::Region & region) const = 0; [[nodiscard]] virtual const jlm::util::HashSet & GetCallEntryNodes(const CallNode & callNode) const = 0; diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 1a6398e77..cfe5bade4 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -297,7 +297,7 @@ class RegionalizedStateMap final } StateMap::MemoryNodeStatePair * - InsertUndefinedState(rvsdg::region & region, const PointsToGraph::MemoryNode & memoryNode) + InsertUndefinedState(rvsdg::Region & region, const PointsToGraph::MemoryNode & memoryNode) { auto & undefinedState = GetOrInsertUndefinedMemoryState(region); return InsertState(memoryNode, undefinedState); @@ -319,20 +319,20 @@ class RegionalizedStateMap final std::vector GetStates( - const rvsdg::region & region, + const rvsdg::Region & region, const util::HashSet & memoryNodes) { return GetStateMap(region).GetStates(memoryNodes); } bool - HasState(const rvsdg::region & region, const PointsToGraph::MemoryNode & memoryNode) + HasState(const rvsdg::Region & region, const PointsToGraph::MemoryNode & memoryNode) { return GetStateMap(region).HasState(memoryNode); } StateMap::MemoryNodeStatePair * - GetState(const rvsdg::region & region, const PointsToGraph::MemoryNode & memoryNode) + GetState(const rvsdg::Region & region, const PointsToGraph::MemoryNode & memoryNode) { return GetStateMap(region).GetState(memoryNode); } @@ -345,7 +345,7 @@ class RegionalizedStateMap final } void - PushRegion(const rvsdg::region & region) + PushRegion(const rvsdg::Region & region) { JLM_ASSERT(StateMaps_.find(®ion) == StateMaps_.end()); JLM_ASSERT(MemoryNodeCacheMaps_.find(®ion) == MemoryNodeCacheMaps_.end()); @@ -355,7 +355,7 @@ class RegionalizedStateMap final } void - PopRegion(const rvsdg::region & region) + PopRegion(const rvsdg::Region & region) { JLM_ASSERT(StateMaps_.find(®ion) != StateMaps_.end()); JLM_ASSERT(MemoryNodeCacheMaps_.find(®ion) != MemoryNodeCacheMaps_.end()); @@ -366,27 +366,27 @@ class RegionalizedStateMap final private: rvsdg::output & - GetOrInsertUndefinedMemoryState(rvsdg::region & region) + GetOrInsertUndefinedMemoryState(rvsdg::Region & region) { return HasUndefinedMemoryState(region) ? GetUndefinedMemoryState(region) : InsertUndefinedMemoryState(region); } bool - HasUndefinedMemoryState(const rvsdg::region & region) const noexcept + HasUndefinedMemoryState(const rvsdg::Region & region) const noexcept { return UndefinedMemoryStates_.find(®ion) != UndefinedMemoryStates_.end(); } rvsdg::output & - GetUndefinedMemoryState(const rvsdg::region & region) const noexcept + GetUndefinedMemoryState(const rvsdg::Region & region) const noexcept { JLM_ASSERT(HasUndefinedMemoryState(region)); return *UndefinedMemoryStates_.find(®ion)->second; } rvsdg::output & - InsertUndefinedMemoryState(rvsdg::region & region) noexcept + InsertUndefinedMemoryState(rvsdg::Region & region) noexcept { auto undefinedMemoryState = UndefValueOperation::Create(region, MemoryStateType::Create()); UndefinedMemoryStates_[®ion] = undefinedMemoryState; @@ -394,22 +394,22 @@ class RegionalizedStateMap final } StateMap & - GetStateMap(const rvsdg::region & region) const noexcept + GetStateMap(const rvsdg::Region & region) const noexcept { JLM_ASSERT(StateMaps_.find(®ion) != StateMaps_.end()); return *StateMaps_.at(®ion); } MemoryNodeCache & - GetMemoryNodeCache(const rvsdg::region & region) const noexcept + GetMemoryNodeCache(const rvsdg::Region & region) const noexcept { JLM_ASSERT(MemoryNodeCacheMaps_.find(®ion) != MemoryNodeCacheMaps_.end()); return *MemoryNodeCacheMaps_.at(®ion); } - std::unordered_map> StateMaps_; - std::unordered_map> MemoryNodeCacheMaps_; - std::unordered_map UndefinedMemoryStates_; + std::unordered_map> StateMaps_; + std::unordered_map> MemoryNodeCacheMaps_; + std::unordered_map UndefinedMemoryStates_; const MemoryNodeProvisioning & MemoryNodeProvisioning_; }; @@ -485,7 +485,7 @@ MemoryStateEncoder::Encode( } void -MemoryStateEncoder::EncodeRegion(rvsdg::region & region) +MemoryStateEncoder::EncodeRegion(rvsdg::Region & region) { using namespace jlm::rvsdg; diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index 6daa04217..f4d3df1b2 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -83,7 +83,7 @@ class MemoryStateEncoder final private: void - EncodeRegion(rvsdg::region & region); + EncodeRegion(rvsdg::Region & region); void EncodeStructuralNode(rvsdg::structural_node & structuralNode); diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index b824affc4..1978f945f 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -45,7 +45,7 @@ class RegionAwareMemoryNodeProvider::Statistics final : public util::Statistics return; AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(rvsdgModule.Rvsdg().root())); - AddMeasurement(NumRvsdgRegionsLabel_, rvsdg::region::NumRegions(*rvsdgModule.Rvsdg().root())); + AddMeasurement(NumRvsdgRegionsLabel_, rvsdg::Region::NumRegions(*rvsdgModule.Rvsdg().root())); AddMeasurement(Label::NumPointsToGraphMemoryNodes, pointsToGraph.NumMemoryNodes()); } @@ -148,7 +148,7 @@ class RegionAwareMemoryNodeProvider::Statistics final : public util::Statistics class RegionSummary final { public: - explicit RegionSummary(const rvsdg::region & region) + explicit RegionSummary(const rvsdg::Region & region) : Region_(®ion) {} @@ -228,7 +228,7 @@ class RegionSummary final StructuralNodes_.Insert(&structuralNode); } - [[nodiscard]] const rvsdg::region & + [[nodiscard]] const rvsdg::Region & GetRegion() const noexcept { return *Region_; @@ -242,13 +242,13 @@ class RegionSummary final } static std::unique_ptr - Create(const rvsdg::region & region) + Create(const rvsdg::Region & region) { return std::make_unique(region); } private: - const rvsdg::region * Region_; + const rvsdg::Region * Region_; util::HashSet MemoryNodes_; util::HashSet UnknownMemoryNodeReferences_; @@ -263,7 +263,7 @@ class RegionSummary final class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning { using RegionSummaryMap = - std::unordered_map>; + std::unordered_map>; class RegionSummaryConstIterator final { @@ -356,14 +356,14 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning } [[nodiscard]] const util::HashSet & - GetRegionEntryNodes(const rvsdg::region & region) const override + GetRegionEntryNodes(const rvsdg::Region & region) const override { auto & regionSummary = GetRegionSummary(region); return regionSummary.GetMemoryNodes(); } [[nodiscard]] const util::HashSet & - GetRegionExitNodes(const rvsdg::region & region) const override + GetRegionExitNodes(const rvsdg::Region & region) const override { auto & regionSummary = GetRegionSummary(region); return regionSummary.GetMemoryNodes(); @@ -438,7 +438,7 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning } [[nodiscard]] bool - ContainsRegionSummary(const rvsdg::region & region) const + ContainsRegionSummary(const rvsdg::Region & region) const { return RegionSummaries_.find(®ion) != RegionSummaries_.end(); } @@ -450,14 +450,14 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning } [[nodiscard]] RegionSummary & - GetRegionSummary(const rvsdg::region & region) const + GetRegionSummary(const rvsdg::Region & region) const { JLM_ASSERT(ContainsRegionSummary(region)); return *RegionSummaries_.find(®ion)->second; } [[nodiscard]] RegionSummary * - TryGetRegionSummary(const rvsdg::region & region) const + TryGetRegionSummary(const rvsdg::Region & region) const { return ContainsRegionSummary(region) ? &GetRegionSummary(region) : nullptr; } @@ -668,7 +668,7 @@ RegionAwareMemoryNodeProvider::Create( } void -RegionAwareMemoryNodeProvider::AnnotateRegion(rvsdg::region & region) +RegionAwareMemoryNodeProvider::AnnotateRegion(rvsdg::Region & region) { if (ShouldCreateRegionSummary(region)) { @@ -905,7 +905,7 @@ RegionAwareMemoryNodeProvider::PropagatePhi(const phi::node & phiNode) void RegionAwareMemoryNodeProvider::AssignAndPropagateMemoryNodes( - const rvsdg::region & region, + const rvsdg::Region & region, const util::HashSet & memoryNodes, const util::HashSet & unknownMemoryNodeReferences) { @@ -929,7 +929,7 @@ RegionAwareMemoryNodeProvider::AssignAndPropagateMemoryNodes( } void -RegionAwareMemoryNodeProvider::PropagateRegion(const rvsdg::region & region) +RegionAwareMemoryNodeProvider::PropagateRegion(const rvsdg::Region & region) { auto & regionSummary = Provisioning_->GetRegionSummary(region); for (auto & structuralNode : regionSummary.GetStructuralNodes().Items()) @@ -996,7 +996,7 @@ RegionAwareMemoryNodeProvider::ResolveUnknownMemoryNodeReferences(const RvsdgMod } bool -RegionAwareMemoryNodeProvider::ShouldCreateRegionSummary(const rvsdg::region & region) +RegionAwareMemoryNodeProvider::ShouldCreateRegionSummary(const rvsdg::Region & region) { return !region.IsRootRegion() && !is(region.node()) && !is(region.node()); @@ -1023,8 +1023,8 @@ RegionAwareMemoryNodeProvider::ToRegionTree( return std::string(depth, '-'); }; - std::function toRegionTree = - [&](const jlm::rvsdg::region * region, size_t depth) + std::function toRegionTree = + [&](const rvsdg::Region * region, size_t depth) { std::string subtree; if (region->node()) diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp index 76574ccf8..65b6a1644 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp @@ -104,7 +104,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider * @param region The to be annotated region. */ void - AnnotateRegion(rvsdg::region & region); + AnnotateRegion(rvsdg::Region & region); void AnnotateSimpleNode(const rvsdg::simple_node & provider); @@ -158,7 +158,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider Propagate(const RvsdgModule & rvsdgModule); void - PropagateRegion(const rvsdg::region & region); + PropagateRegion(const rvsdg::Region & region); void PropagatePhi(const phi::node & phiNode); @@ -172,7 +172,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider */ void AssignAndPropagateMemoryNodes( - const rvsdg::region & region, + const rvsdg::Region & region, const util::HashSet & memoryNodes, const util::HashSet & unknownMemoryNodeReferences); @@ -196,7 +196,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider ResolveUnknownMemoryNodeReferences(const RvsdgModule & rvsdgModule); static bool - ShouldCreateRegionSummary(const rvsdg::region & region); + ShouldCreateRegionSummary(const rvsdg::Region & region); /** * Converts \p rvsdg to an annotated region tree. This method is very useful for debugging the diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index bea13a147..bd7619125 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -1711,7 +1711,7 @@ Steensgaard::AnalyzeStructuralNode(const jlm::rvsdg::structural_node & node) } void -Steensgaard::AnalyzeRegion(jlm::rvsdg::region & region) +Steensgaard::AnalyzeRegion(rvsdg::Region & region) { // Check that we added a RegisterLocation for each required argument for (size_t n = 0; n < region.narguments(); n++) diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index ce5aa6829..f2ce6f9c3 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -73,7 +73,7 @@ class Steensgaard final : public AliasAnalysis AnalyzeExports(const rvsdg::graph & graph); void - AnalyzeRegion(rvsdg::region & region); + AnalyzeRegion(rvsdg::Region & region); void AnalyzeLambda(const lambda::node & node); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 9ce802a63..8309bd82c 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -48,7 +48,7 @@ class TopDownMemoryNodeEliminator::Statistics final : public util::Statistics class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisioning { using RegionMap = - std::unordered_map>; + std::unordered_map>; using CallMap = std::unordered_map>; @@ -74,14 +74,14 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi } [[nodiscard]] const util::HashSet & - GetRegionEntryNodes(const rvsdg::region & region) const override + GetRegionEntryNodes(const rvsdg::Region & region) const override { JLM_ASSERT(HasRegionEntryMemoryNodesSet(region)); return RegionEntryMemoryNodes_.find(®ion)->second; } [[nodiscard]] const util::HashSet & - GetRegionExitNodes(const rvsdg::region & region) const override + GetRegionExitNodes(const rvsdg::Region & region) const override { JLM_ASSERT(HasRegionExitMemoryNodesSet(region)); return RegionExitMemoryNodes_.find(®ion)->second; @@ -148,7 +148,7 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi void AddRegionEntryNodes( - const rvsdg::region & region, + const rvsdg::Region & region, const util::HashSet & memoryNodes) { auto & set = GetOrCreateRegionEntryMemoryNodesSet(region); @@ -157,7 +157,7 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi void AddRegionExitNodes( - const rvsdg::region & region, + const rvsdg::Region & region, const util::HashSet & memoryNodes) { auto & set = GetOrCreateRegionExitMemoryNodesSet(region); @@ -203,19 +203,19 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi } bool - HasRegionEntryMemoryNodesSet(const rvsdg::region & region) const noexcept + HasRegionEntryMemoryNodesSet(const rvsdg::Region & region) const noexcept { return RegionEntryMemoryNodes_.find(®ion) != RegionEntryMemoryNodes_.end(); } bool - HasRegionExitMemoryNodesSet(const rvsdg::region & region) const noexcept + HasRegionExitMemoryNodesSet(const rvsdg::Region & region) const noexcept { return RegionExitMemoryNodes_.find(®ion) != RegionExitMemoryNodes_.end(); } util::HashSet & - GetOrCreateRegionEntryMemoryNodesSet(const rvsdg::region & region) + GetOrCreateRegionEntryMemoryNodesSet(const rvsdg::Region & region) { if (!HasRegionEntryMemoryNodesSet(region)) { @@ -226,7 +226,7 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi } util::HashSet & - GetOrCreateRegionExitMemoryNodesSet(const rvsdg::region & region) + GetOrCreateRegionExitMemoryNodesSet(const rvsdg::Region & region) { if (!HasRegionExitMemoryNodesSet(region)) { @@ -331,7 +331,7 @@ class TopDownMemoryNodeEliminator::Context final * @return Return the points-to graph memory nodes that are considered live in \p region. */ const util::HashSet & - GetLiveNodes(const rvsdg::region & region) noexcept + GetLiveNodes(const rvsdg::Region & region) noexcept { return GetOrCreateLiveNodesSet(region); } @@ -344,7 +344,7 @@ class TopDownMemoryNodeEliminator::Context final */ void AddLiveNodes( - const rvsdg::region & region, + const rvsdg::Region & region, const util::HashSet & memoryNodes) { auto & liveNodes = GetOrCreateLiveNodesSet(region); @@ -382,13 +382,13 @@ class TopDownMemoryNodeEliminator::Context final private: bool - HasLiveNodesSet(const rvsdg::region & region) const noexcept + HasLiveNodesSet(const rvsdg::Region & region) const noexcept { return LiveNodes_.find(®ion) != LiveNodes_.end(); } util::HashSet & - GetOrCreateLiveNodesSet(const rvsdg::region & region) + GetOrCreateLiveNodesSet(const rvsdg::Region & region) { if (!HasLiveNodesSet(region)) { @@ -402,7 +402,7 @@ class TopDownMemoryNodeEliminator::Context final std::unique_ptr Provisioning_; // Keeps track of the memory nodes that are live within a region. - std::unordered_map> + std::unordered_map> LiveNodes_; // Keeps track of all lambda nodes where we annotated live nodes BEFORE traversing the lambda @@ -467,7 +467,7 @@ TopDownMemoryNodeEliminator::EliminateTopDown(const RvsdgModule & rvsdgModule) } void -TopDownMemoryNodeEliminator::EliminateTopDownRootRegion(rvsdg::region & region) +TopDownMemoryNodeEliminator::EliminateTopDownRootRegion(rvsdg::Region & region) { JLM_ASSERT(region.IsRootRegion() || rvsdg::is(region.node())); @@ -498,7 +498,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownRootRegion(rvsdg::region & region) } void -TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::region & region) +TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::Region & region) { auto isLambdaSubregion = rvsdg::is(region.node()); auto isThetaSubregion = rvsdg::is(region.node()); @@ -614,7 +614,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownLambdaExit(const lambda::node & lam void TopDownMemoryNodeEliminator::EliminateTopDownPhi(const phi::node & phiNode) { - auto unifyLiveNodes = [&](const rvsdg::region & phiSubregion) + auto unifyLiveNodes = [&](const rvsdg::Region & phiSubregion) { std::vector lambdaNodes; util::HashSet liveNodes; @@ -917,11 +917,11 @@ TopDownMemoryNodeEliminator::CheckInvariants( const Provisioning & provisioning) { std::function &, + const rvsdg::Region &, + std::vector &, std::vector &)> - collectRegionsAndCalls = [&](const rvsdg::region & rootRegion, - std::vector & regions, + collectRegionsAndCalls = [&](const rvsdg::Region & rootRegion, + std::vector & regions, std::vector & callNodes) { for (auto & node : rootRegion.nodes) @@ -960,7 +960,7 @@ TopDownMemoryNodeEliminator::CheckInvariants( }; std::vector callNodes; - std::vector regions; + std::vector regions; collectRegionsAndCalls(*rvsdgModule.Rvsdg().root(), regions, callNodes); for (auto region : regions) diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp index 05f3a5dd5..aaf3b93f5 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp @@ -29,7 +29,7 @@ namespace jlm::rvsdg { class GammaNode; class node; -class region; +class Region; class simple_node; class structural_node; class ThetaNode; @@ -121,7 +121,7 @@ class TopDownMemoryNodeEliminator final : public MemoryNodeEliminator * @param region The RVSDG root region or a phi subregion. */ void - EliminateTopDownRootRegion(rvsdg::region & region); + EliminateTopDownRootRegion(rvsdg::Region & region); /** * Processes the intra-procedural nodes in a lambda, theta, or gamma subregion top-down. The @@ -131,7 +131,7 @@ class TopDownMemoryNodeEliminator final : public MemoryNodeEliminator * @param region A lambda, theta, or gamma subregion. */ void - EliminateTopDownRegion(rvsdg::region & region); + EliminateTopDownRegion(rvsdg::Region & region); void EliminateTopDownStructuralNode(const rvsdg::structural_node & structuralNode); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 030fc2cb8..ecbbd9562 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -271,7 +271,7 @@ mark_arguments(jlm::rvsdg::structural_input * i1, jlm::rvsdg::structural_input * } static void -mark(jlm::rvsdg::region *, cnectx &); +mark(rvsdg::Region *, cnectx &); static void mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) @@ -425,7 +425,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) } static void -mark(jlm::rvsdg::region * region, cnectx & ctx) +mark(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -455,14 +455,14 @@ divert_outputs(jlm::rvsdg::node * node, cnectx & ctx) } static void -divert_arguments(jlm::rvsdg::region * region, cnectx & ctx) +divert_arguments(rvsdg::Region * region, cnectx & ctx) { for (size_t n = 0; n < region->narguments(); n++) divert_users(region->argument(n), ctx); } static void -divert(jlm::rvsdg::region *, cnectx &); +divert(rvsdg::Region *, cnectx &); static void divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) @@ -539,7 +539,7 @@ divert(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert(jlm::rvsdg::region * region, cnectx & ctx) +divert(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 80c16308a..44eddc2be 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -62,7 +62,7 @@ find_producer(jlm::rvsdg::input * input) } static jlm::rvsdg::output * -route_to_region(jlm::rvsdg::output * output, jlm::rvsdg::region * region) +route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) { JLM_ASSERT(region != nullptr); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index ed7dbce9a..eeced9554 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -113,7 +113,7 @@ collect_condition_nodes(jlm::rvsdg::structural_node * tnode, jlm::rvsdg::structu static void copy_condition_nodes( - jlm::rvsdg::region * target, + rvsdg::Region * target, jlm::rvsdg::substitution_map & smap, const std::vector> & nodes) { @@ -287,7 +287,7 @@ invert(rvsdg::ThetaNode * otheta) } static void -invert(jlm::rvsdg::region * region) +invert(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 7d514ae83..515c068ff 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -228,7 +228,7 @@ is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const jlm::rvsdg::node * } /* collect subregions where node is used */ - std::unordered_set subregions; + std::unordered_set subregions; for (const auto & input : inputs) { for (const auto & argument : *input) @@ -282,7 +282,7 @@ pull(rvsdg::GammaNode * gamma) } void -pull(jlm::rvsdg::region * region) +pull(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/llvm/opt/pull.hpp b/jlm/llvm/opt/pull.hpp index fb6650366..f06aa0ac7 100644 --- a/jlm/llvm/opt/pull.hpp +++ b/jlm/llvm/opt/pull.hpp @@ -41,7 +41,7 @@ void pull(rvsdg::GammaNode * gamma); void -pull(jlm::rvsdg::region * region); +pull(rvsdg::Region * region); } diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index b82f36478..bebdb9dff 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -403,7 +403,7 @@ push(rvsdg::ThetaNode * theta) } static void -push(jlm::rvsdg::region * region) +push(rvsdg::Region * region) { for (auto node : jlm::rvsdg::topdown_traverser(region)) { diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 65dd373b3..96ee05e1b 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -176,7 +176,7 @@ unrollinfo::create(rvsdg::ThetaNode * theta) static void unroll_body( const rvsdg::ThetaNode * theta, - jlm::rvsdg::region * target, + rvsdg::Region * target, jlm::rvsdg::substitution_map & smap, size_t factor) { @@ -348,7 +348,7 @@ create_unrolled_gamma_predicate(const unrollinfo & ui, size_t factor) static jlm::rvsdg::output * create_unrolled_theta_predicate( - jlm::rvsdg::region * target, + rvsdg::Region * target, const jlm::rvsdg::substitution_map & smap, const unrollinfo & ui, size_t factor) @@ -482,7 +482,7 @@ unroll(rvsdg::ThetaNode * otheta, size_t factor) } static bool -unroll(jlm::rvsdg::region * region, size_t factor) +unroll(rvsdg::Region * region, size_t factor) { bool unrolled = false; for (auto & node : jlm::rvsdg::topdown_traverser(region)) diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 02c335f3f..df6781d0d 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -61,7 +61,7 @@ JlmToMlirConverter::ConvertOmega(const rvsdg::graph & graph) } ::llvm::SmallVector<::mlir::Value> -JlmToMlirConverter::ConvertRegion(rvsdg::region & region, ::mlir::Block & block) +JlmToMlirConverter::ConvertRegion(rvsdg::Region & region, ::mlir::Block & block) { for (size_t i = 0; i < region.narguments(); ++i) { diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index 41eef98d3..47aaf09e8 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -81,7 +81,7 @@ class JlmToMlirConverter final * \return A list of outputs of the converted region/block. */ ::llvm::SmallVector<::mlir::Value> - ConvertRegion(rvsdg::region & region, ::mlir::Block & block); + ConvertRegion(rvsdg::Region & region, ::mlir::Block & block); /** * Retreive the previously converted MLIR values from the map of operations diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index d906bb028..2787cf64b 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -42,7 +42,7 @@ MlirToJlmConverter::ConvertMlir(std::unique_ptr<::mlir::Block> & block) } ::llvm::SmallVector -MlirToJlmConverter::ConvertRegion(::mlir::Region & region, rvsdg::region & rvsdgRegion) +MlirToJlmConverter::ConvertRegion(::mlir::Region & region, rvsdg::Region & rvsdgRegion) { // MLIR use blocks as the innermost "container" // In the RVSDG Dialect a region should contain one and only one block @@ -54,7 +54,7 @@ ::llvm::SmallVector MlirToJlmConverter::GetConvertedInputs( ::mlir::Operation & mlirOp, const std::unordered_map<::mlir::Operation *, rvsdg::node *> & operationsMap, - const rvsdg::region & rvsdgRegion) + const rvsdg::Region & rvsdgRegion) { ::llvm::SmallVector inputs; for (::mlir::Value operand : mlirOp.getOperands()) @@ -77,7 +77,7 @@ MlirToJlmConverter::GetConvertedInputs( } ::llvm::SmallVector -MlirToJlmConverter::ConvertBlock(::mlir::Block & block, rvsdg::region & rvsdgRegion) +MlirToJlmConverter::ConvertBlock(::mlir::Block & block, rvsdg::Region & rvsdgRegion) { ::mlir::sortTopologically(&block); @@ -258,7 +258,7 @@ MlirToJlmConverter::ConvertBitBinaryNode( rvsdg::node * MlirToJlmConverter::ConvertOperation( ::mlir::Operation & mlirOperation, - rvsdg::region & rvsdgRegion, + rvsdg::Region & rvsdgRegion, const ::llvm::SmallVector & inputs) { @@ -397,14 +397,14 @@ MlirToJlmConverter::ConvertOperation( } void -MlirToJlmConverter::ConvertOmega(::mlir::Operation & mlirOmega, rvsdg::region & rvsdgRegion) +MlirToJlmConverter::ConvertOmega(::mlir::Operation & mlirOmega, rvsdg::Region & rvsdgRegion) { JLM_ASSERT(mlirOmega.getRegions().size() == 1); ConvertRegion(mlirOmega.getRegion(0), rvsdgRegion); } jlm::rvsdg::node * -MlirToJlmConverter::ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::region & rvsdgRegion) +MlirToJlmConverter::ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::Region & rvsdgRegion) { // Get the name of the function auto functionNameAttribute = mlirLambda.getAttr(::llvm::StringRef("sym_name")); diff --git a/jlm/mlir/frontend/MlirToJlmConverter.hpp b/jlm/mlir/frontend/MlirToJlmConverter.hpp index 46300599a..fb8d36e5e 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.hpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.hpp @@ -85,7 +85,7 @@ class MlirToJlmConverter final * \return The results of the region are returned as a std::vector */ ::llvm::SmallVector - ConvertRegion(::mlir::Region & region, rvsdg::region & rvsdgRegion); + ConvertRegion(::mlir::Region & region, rvsdg::Region & rvsdgRegion); /** * Converts the MLIR block and all operations in it @@ -95,7 +95,7 @@ class MlirToJlmConverter final * \return The results of the region are returned as a std::vector */ ::llvm::SmallVector - ConvertBlock(::mlir::Block & block, rvsdg::region & rvsdgRegion); + ConvertBlock(::mlir::Block & block, rvsdg::Region & rvsdgRegion); /** * Retreive the previously converted RVSDG ouputs from the map of operations @@ -109,7 +109,7 @@ class MlirToJlmConverter final GetConvertedInputs( ::mlir::Operation & mlirOp, const std::unordered_map<::mlir::Operation *, rvsdg::node *> & operationsMap, - const rvsdg::region & rvsdgRegion); + const rvsdg::Region & rvsdgRegion); /** * Converts an MLIR integer comparison operation into an RVSDG node. @@ -145,7 +145,7 @@ class MlirToJlmConverter final rvsdg::node * ConvertOperation( ::mlir::Operation & mlirOperation, - rvsdg::region & rvsdgRegion, + rvsdg::Region & rvsdgRegion, const ::llvm::SmallVector & inputs); /** @@ -154,7 +154,7 @@ class MlirToJlmConverter final * \param rvsdgRegion The RVSDG region that the omega node will reside in. */ void - ConvertOmega(::mlir::Operation & mlirOmega, rvsdg::region & rvsdgRegion); + ConvertOmega(::mlir::Operation & mlirOmega, rvsdg::Region & rvsdgRegion); /** * Converts an MLIR lambda operation and inserts it into an RVSDG region. @@ -163,7 +163,7 @@ class MlirToJlmConverter final * \result The converted Lambda node. */ rvsdg::node * - ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::region & rvsdgRegion); + ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::Region & rvsdgRegion); /** * Converts an MLIR type into an RVSDG type. diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 99f3fbbe4..18578e24f 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -153,7 +153,7 @@ binary_normal_form::normalize_node(jlm::rvsdg::node * node, const binary_op & op std::vector binary_normal_form::normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & base_op, const std::vector & args) const { @@ -297,7 +297,7 @@ flattened_binary_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector flattened_binary_normal_form::normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & base_op, const std::vector & arguments) const { @@ -417,7 +417,7 @@ flattened_binary_op::reduce( void flattened_binary_op::reduce( - jlm::rvsdg::region * region, + rvsdg::Region * region, const flattened_binary_op::reduction & reduction) { for (auto & node : topdown_traverser(region)) @@ -436,7 +436,7 @@ flattened_binary_op::reduce( } } - JLM_ASSERT(!region::Contains(*region, true)); + JLM_ASSERT(!Region::Contains(*region, true)); } } diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index b09644259..888be2bcd 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -34,7 +34,7 @@ class binary_normal_form final : public simple_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const override; @@ -111,7 +111,7 @@ class flattened_binary_normal_form final : public simple_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const override; }; @@ -220,7 +220,7 @@ class flattened_binary_op final : public simple_op const std::vector & operands) const; static void - reduce(jlm::rvsdg::region * region, const flattened_binary_op::reduction & reduction); + reduce(rvsdg::Region * region, const flattened_binary_op::reduction & reduction); static inline void reduce(jlm::rvsdg::graph * graph, const flattened_binary_op::reduction & reduction) diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 1b34156ad..12fabf24b 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -145,7 +145,7 @@ class concat_normal_form final : public simple_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const override { diff --git a/jlm/rvsdg/bitstring/constant.hpp b/jlm/rvsdg/bitstring/constant.hpp index d8283cafd..912ee93b2 100644 --- a/jlm/rvsdg/bitstring/constant.hpp +++ b/jlm/rvsdg/bitstring/constant.hpp @@ -58,26 +58,26 @@ int_constant_op(size_t nbits, int64_t value) extern template class domain_const_op; static inline jlm::rvsdg::output * -create_bitconstant(jlm::rvsdg::region * region, const bitvalue_repr & vr) +create_bitconstant(rvsdg::Region * region, const bitvalue_repr & vr) { return simple_node::create_normalized(region, bitconstant_op(vr), {})[0]; } static inline jlm::rvsdg::output * -create_bitconstant(jlm::rvsdg::region * region, size_t nbits, int64_t value) +create_bitconstant(rvsdg::Region * region, size_t nbits, int64_t value) { return create_bitconstant(region, { nbits, value }); } static inline jlm::rvsdg::output * -create_bitconstant_undefined(jlm::rvsdg::region * region, size_t nbits) +create_bitconstant_undefined(rvsdg::Region * region, size_t nbits) { std::string s(nbits, 'X'); return create_bitconstant(region, bitvalue_repr(s.c_str())); } static inline jlm::rvsdg::output * -create_bitconstant_defined(jlm::rvsdg::region * region, size_t nbits) +create_bitconstant_defined(rvsdg::Region * region, size_t nbits) { std::string s(nbits, 'D'); return create_bitconstant(region, bitvalue_repr(s.c_str())); diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index da9e6a789..e2a639877 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -161,7 +161,7 @@ match( } jlm::rvsdg::output * -control_constant(jlm::rvsdg::region * region, size_t nalternatives, size_t alternative) +control_constant(rvsdg::Region * region, size_t nalternatives, size_t alternative) { jlm::rvsdg::ctlconstant_op op({ alternative, nalternatives }); return jlm::rvsdg::simple_node::create_normalized(region, op, {})[0]; diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index b0252380a..26c9f52c7 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -257,16 +257,16 @@ to_match_op(const jlm::rvsdg::operation & op) noexcept } jlm::rvsdg::output * -control_constant(jlm::rvsdg::region * region, size_t nalternatives, size_t alternative); +control_constant(rvsdg::Region * region, size_t nalternatives, size_t alternative); static inline jlm::rvsdg::output * -control_false(jlm::rvsdg::region * region) +control_false(rvsdg::Region * region) { return control_constant(region, 2, 0); } static inline jlm::rvsdg::output * -control_true(jlm::rvsdg::region * region) +control_true(rvsdg::Region * region) { return control_constant(region, 2, 1); } diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index dd27dea69..3c372d1cf 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -341,7 +341,7 @@ GammaNode::exitvar_iterator::operator++() noexcept } GammaNode * -GammaNode::copy(rvsdg::region * region, substitution_map & smap) const +GammaNode::copy(rvsdg::Region * region, substitution_map & smap) const { auto gamma = create(smap.lookup(predicate()->origin()), nsubregions()); @@ -374,7 +374,7 @@ GammaNode::copy(rvsdg::region * region, substitution_map & smap) const GammaArgument::~GammaArgument() noexcept = default; GammaArgument & -GammaArgument::Copy(rvsdg::region & region, structural_input * input) +GammaArgument::Copy(rvsdg::Region & region, structural_input * input) { auto gammaInput = util::AssertedCast(input); return Create(region, *gammaInput); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 937a68769..29234bb19 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -312,7 +312,7 @@ class GammaNode : public structural_node } virtual GammaNode * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(jlm::rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; }; /* gamma input */ @@ -468,15 +468,15 @@ class GammaArgument final : public RegionArgument ~GammaArgument() noexcept override; GammaArgument & - Copy(rvsdg::region & region, structural_input * input) override; + Copy(rvsdg::Region & region, structural_input * input) override; private: - GammaArgument(rvsdg::region & region, GammaInput & input) + GammaArgument(rvsdg::Region & region, GammaInput & input) : RegionArgument(®ion, &input, input.Type()) {} static GammaArgument & - Create(rvsdg::region & region, GammaInput & input) + Create(rvsdg::Region & region, GammaInput & input) { auto gammaArgument = new GammaArgument(region, input); region.append_argument(gammaArgument); @@ -495,7 +495,7 @@ class GammaResult final : public RegionResult ~GammaResult() noexcept override; private: - GammaResult(rvsdg::region & region, rvsdg::output & origin, GammaOutput & gammaOutput) + GammaResult(rvsdg::Region & region, rvsdg::output & origin, GammaOutput & gammaOutput) : RegionResult(®ion, &origin, &gammaOutput, origin.Type()) {} @@ -503,7 +503,7 @@ class GammaResult final : public RegionResult Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; static GammaResult & - Create(rvsdg::region & region, rvsdg::output & origin, GammaOutput & gammaOutput) + Create(rvsdg::Region & region, rvsdg::output & origin, GammaOutput & gammaOutput) { auto gammaResult = new GammaResult(region, origin, gammaOutput); origin.region()->append_result(gammaResult); diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 278719d40..9dcdf41c3 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -36,7 +36,7 @@ graph::~graph() graph::graph() : normalized_(false), - root_(new jlm::rvsdg::region(nullptr, this)) + root_(new rvsdg::Region(nullptr, this)) {} std::unique_ptr diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index e0748e12c..0e87f377d 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -67,7 +67,7 @@ class graph graph(); - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * root() const noexcept { return root_; @@ -112,7 +112,7 @@ class graph private: bool normalized_; - jlm::rvsdg::region * root_; + rvsdg::Region * root_; jlm::rvsdg::node_normal_form_hash node_normal_forms_; }; diff --git a/jlm/rvsdg/node-normal-form.hpp b/jlm/rvsdg/node-normal-form.hpp index e93f001dd..dd3ea7d79 100644 --- a/jlm/rvsdg/node-normal-form.hpp +++ b/jlm/rvsdg/node-normal-form.hpp @@ -26,7 +26,7 @@ class graph; class node; class operation; class output; -class region; +class Region; class node_normal_form { diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 272710c40..ae5734fa6 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -23,7 +23,7 @@ input::~input() noexcept input::input( jlm::rvsdg::output * origin, - jlm::rvsdg::region * region, + rvsdg::Region * region, std::shared_ptr type) : index_(0), origin_(origin), @@ -83,7 +83,7 @@ output::~output() noexcept JLM_ASSERT(nusers() == 0); } -output::output(jlm::rvsdg::region * region, std::shared_ptr type) +output::output(rvsdg::Region * region, std::shared_ptr type) : index_(0), region_(region), Type_(std::move(type)) @@ -163,7 +163,7 @@ node_output::node_output(jlm::rvsdg::node * node, std::shared_ptr op, jlm::rvsdg::region * region) +node::node(std::unique_ptr op, rvsdg::Region * region) : depth_(0), graph_(region->graph()), region_(region), @@ -288,7 +288,7 @@ node::recompute_depth() noexcept } jlm::rvsdg::node * -node::copy(jlm::rvsdg::region * region, const std::vector & operands) const +node::copy(rvsdg::Region * region, const std::vector & operands) const { substitution_map smap; diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 035f97a18..16102d4ae 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -35,14 +35,14 @@ class substitution_map; class input { friend class jlm::rvsdg::node; - friend class jlm::rvsdg::region; + friend class rvsdg::Region; public: virtual ~input() noexcept; input( jlm::rvsdg::output * origin, - jlm::rvsdg::region * region, + rvsdg::Region * region, std::shared_ptr type); input(const input &) = delete; @@ -82,7 +82,7 @@ class input return Type_; } - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * region() const noexcept { return region_; @@ -264,7 +264,7 @@ class input private: size_t index_; jlm::rvsdg::output * origin_; - jlm::rvsdg::region * region_; + rvsdg::Region * region_; std::shared_ptr Type_; }; @@ -285,14 +285,14 @@ class output { friend input; friend class jlm::rvsdg::node; - friend class jlm::rvsdg::region; + friend class rvsdg::Region; typedef std::unordered_set::const_iterator user_iterator; public: virtual ~output() noexcept; - output(jlm::rvsdg::region * region, std::shared_ptr type); + output(rvsdg::Region * region, std::shared_ptr type); output(const output &) = delete; @@ -365,7 +365,7 @@ class output return Type_; } - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * region() const noexcept { return region_; @@ -542,7 +542,7 @@ class output add_user(jlm::rvsdg::input * user); size_t index_; - jlm::rvsdg::region * region_; + rvsdg::Region * region_; std::shared_ptr Type_; std::unordered_set users_; }; @@ -609,7 +609,7 @@ class node public: virtual ~node(); - node(std::unique_ptr op, jlm::rvsdg::region * region); + node(std::unique_ptr op, rvsdg::Region * region); inline const jlm::rvsdg::operation & operation() const noexcept @@ -783,14 +783,14 @@ class node return graph_; } - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * region() const noexcept { return region_; } virtual jlm::rvsdg::node * - copy(jlm::rvsdg::region * region, const std::vector & operands) const; + copy(rvsdg::Region * region, const std::vector & operands) const; /** \brief Copy a node with substitutions @@ -809,7 +809,7 @@ class node subsequent \ref copy operations. */ virtual jlm::rvsdg::node * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const = 0; + copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const = 0; inline size_t depth() const noexcept @@ -840,7 +840,7 @@ class node private: size_t depth_; jlm::rvsdg::graph * graph_; - jlm::rvsdg::region * region_; + rvsdg::Region * region_; std::unique_ptr operation_; std::vector> inputs_; std::vector> outputs_; diff --git a/jlm/rvsdg/notifiers.cpp b/jlm/rvsdg/notifiers.cpp index cdf9d1742..00f7dfa3a 100644 --- a/jlm/rvsdg/notifiers.cpp +++ b/jlm/rvsdg/notifiers.cpp @@ -8,8 +8,8 @@ namespace jlm::rvsdg { -jlm::util::notifier on_region_create; -jlm::util::notifier on_region_destroy; +jlm::util::notifier on_region_create; +jlm::util::notifier on_region_destroy; jlm::util::notifier on_node_create; jlm::util::notifier on_node_destroy; diff --git a/jlm/rvsdg/notifiers.hpp b/jlm/rvsdg/notifiers.hpp index 52a828163..78257cf9e 100644 --- a/jlm/rvsdg/notifiers.hpp +++ b/jlm/rvsdg/notifiers.hpp @@ -14,10 +14,10 @@ namespace jlm::rvsdg class input; class node; class output; -class region; +class Region; -extern jlm::util::notifier on_region_create; -extern jlm::util::notifier on_region_destroy; +extern jlm::util::notifier on_region_create; +extern jlm::util::notifier on_region_destroy; extern jlm::util::notifier on_node_create; extern jlm::util::notifier on_node_destroy; diff --git a/jlm/rvsdg/nullary.hpp b/jlm/rvsdg/nullary.hpp index 129f3dbd4..bcd07d670 100644 --- a/jlm/rvsdg/nullary.hpp +++ b/jlm/rvsdg/nullary.hpp @@ -95,7 +95,7 @@ class domain_const_op final : public nullary_op } static inline jlm::rvsdg::output * - create(jlm::rvsdg::region * region, const value_repr & vr) + create(rvsdg::Region * region, const value_repr & vr) { domain_const_op op(vr); return simple_node::create_normalized(region, op, {})[0]; diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index 067e2ef2a..9dd28f62c 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -20,7 +20,7 @@ class graph; class node; class node_normal_form; class output; -class region; +class Region; class simple_normal_form; class structural_normal_form; diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 598866206..6deca52b8 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -23,7 +23,7 @@ RegionArgument::~RegionArgument() noexcept } RegionArgument::RegionArgument( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::structural_input * input, std::shared_ptr type) : output(region, std::move(type)), @@ -52,7 +52,7 @@ RegionResult::~RegionResult() noexcept } RegionResult::RegionResult( - jlm::rvsdg::region * region, + rvsdg::Region * region, jlm::rvsdg::output * origin, jlm::rvsdg::structural_output * output, std::shared_ptr type) @@ -73,7 +73,7 @@ RegionResult::RegionResult( } } -region::~region() +Region::~Region() noexcept { on_region_destroy(this); @@ -89,7 +89,7 @@ region::~region() RemoveArgument(arguments_.size() - 1); } -region::region(jlm::rvsdg::region * parent, jlm::rvsdg::graph * graph) +Region::Region(rvsdg::Region * parent, jlm::rvsdg::graph * graph) : index_(0), graph_(graph), node_(nullptr) @@ -97,7 +97,7 @@ region::region(jlm::rvsdg::region * parent, jlm::rvsdg::graph * graph) on_region_create(this); } -region::region(jlm::rvsdg::structural_node * node, size_t index) +Region::Region(jlm::rvsdg::structural_node * node, size_t index) : index_(index), graph_(node->graph()), node_(node) @@ -106,7 +106,7 @@ region::region(jlm::rvsdg::structural_node * node, size_t index) } void -region::append_argument(RegionArgument * argument) +Region::append_argument(RegionArgument * argument) { if (argument->region() != this) throw jlm::util::error("Appending argument to wrong region."); @@ -122,7 +122,7 @@ region::append_argument(RegionArgument * argument) } void -region::RemoveArgument(size_t index) +Region::RemoveArgument(size_t index) { JLM_ASSERT(index < narguments()); RegionArgument * argument = arguments_[index]; @@ -137,7 +137,7 @@ region::RemoveArgument(size_t index) } void -region::append_result(RegionResult * result) +Region::append_result(RegionResult * result) { if (result->region() != this) throw jlm::util::error("Appending result to wrong region."); @@ -157,7 +157,7 @@ region::append_result(RegionResult * result) } void -region::RemoveResult(size_t index) +Region::RemoveResult(size_t index) { JLM_ASSERT(index < results_.size()); RegionResult * result = results_[index]; @@ -172,13 +172,13 @@ region::RemoveResult(size_t index) } void -region::remove_node(jlm::rvsdg::node * node) +Region::remove_node(jlm::rvsdg::node * node) { delete node; } void -region::copy(region * target, substitution_map & smap, bool copy_arguments, bool copy_results) const +Region::copy(Region * target, substitution_map & smap, bool copy_arguments, bool copy_results) const { smap.insert(this, target); @@ -225,7 +225,7 @@ region::copy(region * target, substitution_map & smap, bool copy_arguments, bool } void -region::prune(bool recursive) +Region::prune(bool recursive) { while (bottom_nodes.first()) remove_node(bottom_nodes.first()); @@ -244,7 +244,7 @@ region::prune(bool recursive) } void -region::normalize(bool recursive) +Region::normalize(bool recursive) { for (auto node : jlm::rvsdg::topdown_traverser(this)) { @@ -260,13 +260,13 @@ region::normalize(bool recursive) } bool -region::IsRootRegion() const noexcept +Region::IsRootRegion() const noexcept { return this->graph()->root() == this; } size_t -region::NumRegions(const jlm::rvsdg::region & region) noexcept +Region::NumRegions(const rvsdg::Region & region) noexcept { size_t numRegions = 1; for (auto & node : region.nodes) @@ -284,7 +284,7 @@ region::NumRegions(const jlm::rvsdg::region & region) noexcept } std::string -region::ToTree(const rvsdg::region & region, const util::AnnotationMap & annotationMap) noexcept +Region::ToTree(const rvsdg::Region & region, const util::AnnotationMap & annotationMap) noexcept { std::stringstream stream; ToTree(region, annotationMap, 0, stream); @@ -292,7 +292,7 @@ region::ToTree(const rvsdg::region & region, const util::AnnotationMap & annotat } std::string -region::ToTree(const rvsdg::region & region) noexcept +Region::ToTree(const rvsdg::Region & region) noexcept { std::stringstream stream; util::AnnotationMap annotationMap; @@ -301,8 +301,8 @@ region::ToTree(const rvsdg::region & region) noexcept } void -region::ToTree( - const rvsdg::region & region, +Region::ToTree( + const rvsdg::Region & region, const util::AnnotationMap & annotationMap, size_t indentationDepth, std::stringstream & stream) noexcept @@ -344,7 +344,7 @@ region::ToTree( } std::string -region::GetAnnotationString( +Region::GetAnnotationString( const void * key, const util::AnnotationMap & annotationMap, char annotationSeparator, @@ -358,7 +358,7 @@ region::GetAnnotationString( } std::string -region::ToString( +Region::ToString( const std::vector & annotations, char annotationSeparator, char labelValueSeparator) @@ -374,7 +374,7 @@ region::ToString( } std::string -region::ToString(const util::Annotation & annotation, char labelValueSeparator) +Region::ToString(const util::Annotation & annotation, char labelValueSeparator) { std::string value; if (annotation.HasValueType()) @@ -402,7 +402,7 @@ region::ToString(const util::Annotation & annotation, char labelValueSeparator) } size_t -nnodes(const jlm::rvsdg::region * region) noexcept +nnodes(const jlm::rvsdg::Region * region) noexcept { size_t n = region->nnodes(); for (const auto & node : region->nodes) @@ -418,7 +418,7 @@ nnodes(const jlm::rvsdg::region * region) noexcept } size_t -nstructnodes(const jlm::rvsdg::region * region) noexcept +nstructnodes(const rvsdg::Region * region) noexcept { size_t n = 0; for (const auto & node : region->nodes) @@ -435,7 +435,7 @@ nstructnodes(const jlm::rvsdg::region * region) noexcept } size_t -nsimpnodes(const jlm::rvsdg::region * region) noexcept +nsimpnodes(const rvsdg::Region * region) noexcept { size_t n = 0; for (const auto & node : region->nodes) @@ -455,7 +455,7 @@ nsimpnodes(const jlm::rvsdg::region * region) noexcept } size_t -ninputs(const jlm::rvsdg::region * region) noexcept +ninputs(const rvsdg::Region * region) noexcept { size_t n = region->nresults(); for (const auto & node : region->nodes) diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index f95be439c..215e7d240 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -51,7 +51,7 @@ class RegionArgument : public output protected: RegionArgument( - rvsdg::region * region, + rvsdg::Region * region, structural_input * input, std::shared_ptr type); @@ -81,7 +81,7 @@ class RegionArgument : public output * @return A reference to the copied argument. */ virtual RegionArgument & - Copy(rvsdg::region & region, structural_input * input) = 0; + Copy(rvsdg::Region & region, structural_input * input) = 0; private: structural_input * input_; @@ -108,7 +108,7 @@ class RegionResult : public input protected: RegionResult( - rvsdg::region * region, + rvsdg::Region * region, rvsdg::output * origin, structural_output * output, std::shared_ptr type); @@ -146,7 +146,7 @@ class RegionResult : public input structural_output * output_; }; -class region +class Region { typedef jlm::util::intrusive_list region_nodes_list; @@ -160,11 +160,11 @@ class region region_bottom_node_list; public: - ~region(); + ~Region() noexcept; - region(jlm::rvsdg::region * parent, jlm::rvsdg::graph * graph); + Region(rvsdg::Region * parent, jlm::rvsdg::graph * graph); - region(jlm::rvsdg::structural_node * node, size_t index); + Region(rvsdg::structural_node * node, size_t index); inline region_nodes_list::iterator begin() @@ -368,7 +368,7 @@ class region map will be updated as nodes are copied. */ void - copy(region * target, substitution_map & smap, bool copy_arguments, bool copy_results) const; + copy(Region * target, substitution_map & smap, bool copy_arguments, bool copy_results) const; void prune(bool recursive); @@ -387,7 +387,7 @@ class region */ template static inline bool - Contains(const jlm::rvsdg::region & region, bool checkSubregions); + Contains(const rvsdg::Region & region, bool checkSubregions); /** * Counts the number of (sub-)regions contained within \p region. The count includes \p region, @@ -398,7 +398,7 @@ class region * @return The number of (sub-)regions. */ [[nodiscard]] static size_t - NumRegions(const jlm::rvsdg::region & region) noexcept; + NumRegions(const rvsdg::Region & region) noexcept; /** * Converts \p region and all of its contained structural nodes with subregions to a tree in @@ -420,12 +420,12 @@ class region * \p annotationMap. * * @param region The top-level region that is converted - * @param annotationMap A map with annotations for instances of \ref region%s or + * @param annotationMap A map with annotations for instances of \ref Region%s or * structural_node%s. * @return A string containing the ASCII tree of \p region. */ [[nodiscard]] static std::string - ToTree(const rvsdg::region & region, const util::AnnotationMap & annotationMap) noexcept; + ToTree(const rvsdg::Region & region, const util::AnnotationMap & annotationMap) noexcept; /** * Converts \p region and all of its contained structural nodes with subregions to a tree in @@ -448,7 +448,7 @@ class region * @return A string containing the ASCII tree of \p region */ [[nodiscard]] static std::string - ToTree(const rvsdg::region & region) noexcept; + ToTree(const rvsdg::Region & region) noexcept; region_nodes_list nodes; @@ -459,7 +459,7 @@ class region private: static void ToTree( - const rvsdg::region & region, + const rvsdg::Region & region, const util::AnnotationMap & annotationMap, size_t indentationDepth, std::stringstream & stream) noexcept; @@ -494,16 +494,16 @@ remove(jlm::rvsdg::node * node) } size_t -nnodes(const jlm::rvsdg::region * region) noexcept; +nnodes(const rvsdg::Region * region) noexcept; size_t -nstructnodes(const jlm::rvsdg::region * region) noexcept; +nstructnodes(const rvsdg::Region * region) noexcept; size_t -nsimpnodes(const jlm::rvsdg::region * region) noexcept; +nsimpnodes(const rvsdg::Region * region) noexcept; size_t -ninputs(const jlm::rvsdg::region * region) noexcept; +ninputs(const rvsdg::Region * region) noexcept; } // namespace diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 58c80506c..d6dc53253 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -46,7 +46,7 @@ simple_node::~simple_node() } simple_node::simple_node( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & operands) : node(op.copy(), region) @@ -71,8 +71,7 @@ simple_node::simple_node( } jlm::rvsdg::node * -simple_node::copy(jlm::rvsdg::region * region, const std::vector & operands) - const +simple_node::copy(rvsdg::Region * region, const std::vector & operands) const { auto node = create(region, *static_cast(&operation()), operands); graph()->mark_denormalized(); @@ -80,7 +79,7 @@ simple_node::copy(jlm::rvsdg::region * region, const std::vector operands; for (size_t n = 0; n < ninputs(); n++) diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index 6104af0c9..d7b433571 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -27,7 +27,7 @@ class simple_node : public node protected: simple_node( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & operands); @@ -42,15 +42,14 @@ class simple_node : public node operation() const noexcept; virtual jlm::rvsdg::node * - copy(jlm::rvsdg::region * region, const std::vector & operands) - const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; virtual jlm::rvsdg::node * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; static inline jlm::rvsdg::simple_node * create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & operands) { @@ -59,7 +58,7 @@ class simple_node : public node static inline std::vector create_normalized( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & operands) { diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index ce181b579..90b8e51a5 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -8,7 +8,7 @@ static jlm::rvsdg::node * node_cse( - jlm::rvsdg::region * region, + jlm::rvsdg::Region * region, const jlm::rvsdg::operation & op, const std::vector & arguments) { @@ -81,7 +81,7 @@ simple_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector simple_normal_form::normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const { diff --git a/jlm/rvsdg/simple-normal-form.hpp b/jlm/rvsdg/simple-normal-form.hpp index 2af288166..996f2b57c 100644 --- a/jlm/rvsdg/simple-normal-form.hpp +++ b/jlm/rvsdg/simple-normal-form.hpp @@ -28,7 +28,7 @@ class simple_normal_form : public node_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const; diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index d50f57e2c..7d7a2e140 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -149,7 +149,7 @@ mux_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector mux_normal_form::normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & operands) const { diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index 7fc1c21f2..52d4f9695 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -31,7 +31,7 @@ class mux_normal_form final : public simple_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const override; diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index c31c084a5..99ac1518d 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -57,7 +57,7 @@ structural_node::~structural_node() structural_node::structural_node( const jlm::rvsdg::structural_op & op, - jlm::rvsdg::region * region, + rvsdg::Region * region, size_t nsubregions) : node(op.copy(), region) { @@ -65,7 +65,7 @@ structural_node::structural_node( throw jlm::util::error("Number of subregions must be greater than zero."); for (size_t n = 0; n < nsubregions; n++) - subregions_.emplace_back(std::unique_ptr(new jlm::rvsdg::region(this, n))); + subregions_.emplace_back(std::unique_ptr(new jlm::rvsdg::Region(this, n))); on_node_create(this); } diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 72c0fee0f..ce0d6c937 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -27,7 +27,7 @@ class structural_node : public node structural_node( /* FIXME: use move semantics instead of copy semantics for op */ const jlm::rvsdg::structural_op & op, - jlm::rvsdg::region * region, + rvsdg::Region * region, size_t nsubregions); public: @@ -37,7 +37,7 @@ class structural_node : public node return subregions_.size(); } - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * subregion(size_t index) const noexcept { JLM_ASSERT(index < nsubregions()); @@ -61,7 +61,7 @@ class structural_node : public node using node::RemoveOutput; private: - std::vector> subregions_; + std::vector> subregions_; }; /* structural input class */ @@ -146,7 +146,7 @@ structural_node::output(size_t index) const noexcept template bool -region::Contains(const jlm::rvsdg::region & region, bool checkSubregions) +Region::Contains(const rvsdg::Region & region, bool checkSubregions) { for (auto & node : region.nodes) { diff --git a/jlm/rvsdg/substitution.hpp b/jlm/rvsdg/substitution.hpp index 1d188cfb9..051c71f0a 100644 --- a/jlm/rvsdg/substitution.hpp +++ b/jlm/rvsdg/substitution.hpp @@ -15,7 +15,7 @@ namespace jlm::rvsdg { class output; -class region; +class Region; class structural_input; class substitution_map final @@ -28,7 +28,7 @@ class substitution_map final } bool - contains(const region & original) const noexcept + contains(const Region & original) const noexcept { return region_map_.find(&original) != region_map_.end(); } @@ -48,8 +48,8 @@ class substitution_map final return *output_map_.find(&original)->second; } - region & - lookup(const region & original) const + Region & + lookup(const Region & original) const { if (!contains(original)) throw jlm::util::error("Region not in substitution map."); @@ -73,8 +73,8 @@ class substitution_map final return i != output_map_.end() ? i->second : nullptr; } - inline jlm::rvsdg::region * - lookup(const jlm::rvsdg::region * original) const noexcept + [[nodiscard]] rvsdg::Region * + lookup(const jlm::rvsdg::Region * original) const noexcept { auto i = region_map_.find(original); return i != region_map_.end() ? i->second : nullptr; @@ -94,7 +94,7 @@ class substitution_map final } inline void - insert(const jlm::rvsdg::region * original, jlm::rvsdg::region * substitute) + insert(const rvsdg::Region * original, rvsdg::Region * substitute) { region_map_[original] = substitute; } @@ -106,7 +106,7 @@ class substitution_map final } private: - std::unordered_map region_map_; + std::unordered_map region_map_; std::unordered_map output_map_; std::unordered_map structinput_map_; diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 37bedd982..6e9cedcf8 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -26,7 +26,7 @@ ThetaOperation::copy() const return std::unique_ptr(new ThetaOperation(*this)); } -ThetaNode::ThetaNode(rvsdg::region & parent) +ThetaNode::ThetaNode(rvsdg::Region & parent) : structural_node(ThetaOperation(), &parent, 1) { auto predicate = control_false(subregion()); @@ -50,7 +50,7 @@ ThetaOutput::~ThetaOutput() noexcept ThetaArgument::~ThetaArgument() noexcept = default; ThetaArgument & -ThetaArgument::Copy(rvsdg::region & region, structural_input * input) +ThetaArgument::Copy(rvsdg::Region & region, structural_input * input) { auto thetaInput = util::AssertedCast(input); return ThetaArgument::Create(region, *thetaInput); @@ -114,7 +114,7 @@ ThetaNode::add_loopvar(jlm::rvsdg::output * origin) } ThetaNode * -ThetaNode::copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const +ThetaNode::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const { auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 48d2bb0a6..9fc2a7f22 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -88,16 +88,16 @@ class ThetaNode final : public structural_node ~ThetaNode() noexcept override; private: - explicit ThetaNode(rvsdg::region & parent); + explicit ThetaNode(rvsdg::Region & parent); public: static ThetaNode * - create(jlm::rvsdg::region * parent) + create(rvsdg::Region * parent) { return new ThetaNode(*parent); } - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * subregion() const noexcept { return structural_node::subregion(0); @@ -244,7 +244,7 @@ class ThetaNode final : public structural_node add_loopvar(jlm::rvsdg::output * origin); virtual ThetaNode * - copy(jlm::rvsdg::region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; }; class ThetaInput final : public structural_input @@ -345,17 +345,17 @@ class ThetaArgument final : public RegionArgument ~ThetaArgument() noexcept override; ThetaArgument & - Copy(rvsdg::region & region, structural_input * input) override; + Copy(rvsdg::Region & region, structural_input * input) override; private: - ThetaArgument(rvsdg::region & region, ThetaInput & input) + ThetaArgument(rvsdg::Region & region, ThetaInput & input) : RegionArgument(®ion, &input, input.Type()) { JLM_ASSERT(is(region.node())); } static ThetaArgument & - Create(rvsdg::region & region, ThetaInput & input) + Create(rvsdg::Region & region, ThetaInput & input) { auto thetaArgument = new ThetaArgument(region, input); region.append_argument(thetaArgument); diff --git a/jlm/rvsdg/tracker.hpp b/jlm/rvsdg/tracker.hpp index e678858ed..5f650873d 100644 --- a/jlm/rvsdg/tracker.hpp +++ b/jlm/rvsdg/tracker.hpp @@ -21,7 +21,7 @@ static const size_t tracker_nodestate_none = (size_t)-1; class graph; class node; -class region; +class Region; class tracker_depth_state; class tracker_nodestate; diff --git a/jlm/rvsdg/traverser.cpp b/jlm/rvsdg/traverser.cpp index ac3687b2c..7438bcc2a 100644 --- a/jlm/rvsdg/traverser.cpp +++ b/jlm/rvsdg/traverser.cpp @@ -18,7 +18,7 @@ namespace jlm::rvsdg topdown_traverser::~topdown_traverser() noexcept {} -topdown_traverser::topdown_traverser(jlm::rvsdg::region * region) +topdown_traverser::topdown_traverser(rvsdg::Region * region) : region_(region), tracker_(region->graph()) { @@ -123,7 +123,7 @@ topdown_traverser::input_change(input * in, output * old_origin, output * new_or bottomup_traverser::~bottomup_traverser() noexcept {} -bottomup_traverser::bottomup_traverser(jlm::rvsdg::region * region, bool revisit) +bottomup_traverser::bottomup_traverser(rvsdg::Region * region, bool revisit) : region_(region), tracker_(region->graph()), new_node_state_(revisit ? traversal_nodestate::frontier : traversal_nodestate::behind) diff --git a/jlm/rvsdg/traverser.hpp b/jlm/rvsdg/traverser.hpp index 77fd08271..d1798e726 100644 --- a/jlm/rvsdg/traverser.hpp +++ b/jlm/rvsdg/traverser.hpp @@ -141,12 +141,12 @@ class topdown_traverser final public: ~topdown_traverser() noexcept; - explicit topdown_traverser(jlm::rvsdg::region * region); + explicit topdown_traverser(rvsdg::Region * region); jlm::rvsdg::node * next(); - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * region() const noexcept { return region_; @@ -177,7 +177,7 @@ class topdown_traverser final void input_change(input * in, output * old_origin, output * new_origin); - jlm::rvsdg::region * region_; + rvsdg::Region * region_; traversal_tracker tracker_; std::vector callbacks_; }; @@ -187,12 +187,12 @@ class bottomup_traverser final public: ~bottomup_traverser() noexcept; - explicit bottomup_traverser(jlm::rvsdg::region * region, bool revisit = false); + explicit bottomup_traverser(rvsdg::Region * region, bool revisit = false); jlm::rvsdg::node * next(); - inline jlm::rvsdg::region * + [[nodiscard]] rvsdg::Region * region() const noexcept { return region_; @@ -223,7 +223,7 @@ class bottomup_traverser final void input_change(input * in, output * old_origin, output * new_origin); - jlm::rvsdg::region * region_; + rvsdg::Region * region_; traversal_tracker tracker_; std::vector callbacks_; traversal_nodestate new_node_state_; diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 51e2379bf..249eb7eac 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -55,7 +55,7 @@ unary_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector unary_normal_form::normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const { diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index 3fa530b1a..02fb00c48 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -32,7 +32,7 @@ class unary_normal_form final : public simple_normal_form virtual std::vector normalized_create( - jlm::rvsdg::region * region, + rvsdg::Region * region, const jlm::rvsdg::simple_op & op, const std::vector & arguments) const override; diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 99ae10c77..5cf0dbb94 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -13,7 +13,7 @@ namespace jlm::rvsdg static std::string region_to_string( - const jlm::rvsdg::region * region, + const rvsdg::Region * region, size_t depth, std::unordered_map &); @@ -67,9 +67,7 @@ node_to_string( } static std::string -region_header( - const jlm::rvsdg::region * region, - std::unordered_map & map) +region_header(const rvsdg::Region * region, std::unordered_map & map) { std::string header("["); for (size_t n = 0; n < region->narguments(); n++) @@ -92,7 +90,7 @@ region_header( static std::string region_body( - const jlm::rvsdg::region * region, + const rvsdg::Region * region, size_t depth, std::unordered_map & map) { @@ -115,9 +113,7 @@ region_body( } static std::string -region_footer( - const jlm::rvsdg::region * region, - std::unordered_map & map) +region_footer(const rvsdg::Region * region, std::unordered_map & map) { std::string footer("}["); for (size_t n = 0; n < region->nresults(); n++) @@ -139,7 +135,7 @@ region_footer( static std::string region_to_string( - const jlm::rvsdg::region * region, + const rvsdg::Region * region, size_t depth, std::unordered_map & map) { @@ -151,20 +147,20 @@ region_to_string( } std::string -view(const jlm::rvsdg::region * region) +view(const rvsdg::Region * region) { std::unordered_map map; return view(region, map); } std::string -view(const jlm::rvsdg::region * region, std::unordered_map & map) +view(const rvsdg::Region * region, std::unordered_map & map) { return region_to_string(region, 0, map); } void -view(const jlm::rvsdg::region * region, FILE * out) +view(const rvsdg::Region * region, FILE * out) { fputs(view(region).c_str(), out); fflush(out); @@ -204,7 +200,7 @@ id(const jlm::rvsdg::node * node) } static inline std::string -id(const jlm::rvsdg::region * region) +id(const rvsdg::Region * region) { return jlm::util::strfmt("r", (intptr_t)region); } @@ -276,7 +272,7 @@ type(const jlm::rvsdg::node * n) } static std::string -convert_region(const jlm::rvsdg::region * region); +convert_region(const jlm::rvsdg::Region * region); static inline std::string convert_simple_node(const jlm::rvsdg::simple_node * node) @@ -339,7 +335,7 @@ convert_node(const jlm::rvsdg::node * node) } static inline std::string -convert_region(const jlm::rvsdg::region * region) +convert_region(const rvsdg::Region * region) { std::string s; s += region_starttag(id(region)); @@ -366,7 +362,7 @@ convert_region(const jlm::rvsdg::region * region) } std::string -to_xml(const jlm::rvsdg::region * region) +to_xml(const rvsdg::Region * region) { std::string s; s += xml_header(); @@ -378,7 +374,7 @@ to_xml(const jlm::rvsdg::region * region) } void -view_xml(const jlm::rvsdg::region * region, FILE * out) +view_xml(const rvsdg::Region * region, FILE * out) { fputs(to_xml(region).c_str(), out); fflush(out); diff --git a/jlm/rvsdg/view.hpp b/jlm/rvsdg/view.hpp index 76ccdb63f..adbf17b36 100644 --- a/jlm/rvsdg/view.hpp +++ b/jlm/rvsdg/view.hpp @@ -14,7 +14,7 @@ namespace jlm::rvsdg { -class region; +class Region; /** * Prints the given rvsdg region to a string, @@ -25,7 +25,7 @@ class region; * @see view(region, map) */ std::string -view(const jlm::rvsdg::region * region); +view(const rvsdg::Region * region); /** * Prints the given rvsdg region to a string, and exposes the unique name given to each output. @@ -35,7 +35,7 @@ view(const jlm::rvsdg::region * region); * @return the string describing the region. */ std::string -view(const jlm::rvsdg::region * region, std::unordered_map & map); +view(const rvsdg::Region * region, std::unordered_map & map); /** * Recursively traverses and prints the given rvsdg region to the given file. @@ -43,7 +43,7 @@ view(const jlm::rvsdg::region * region, std::unordered_mapfctargument(0); @@ -2259,7 +2259,7 @@ PhiTest2::SetupRvsdg() jlm::rvsdg::node_output::node(paAlloca[0]))); }; - auto SetupB = [&](jlm::rvsdg::region & region, + auto SetupB = [&](jlm::rvsdg::Region & region, phi::cvargument & functionI, phi::rvargument & functionC, phi::cvargument & functionEight) @@ -2304,7 +2304,7 @@ PhiTest2::SetupRvsdg() jlm::rvsdg::node_output::node(pbAlloca[0]))); }; - auto SetupC = [&](jlm::rvsdg::region & region, phi::rvargument & functionA) + auto SetupC = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) { auto lambda = lambda::node::create(®ion, recFunctionType, "c", linkage::external_linkage); auto xArgument = lambda->fctargument(0); @@ -2343,7 +2343,7 @@ PhiTest2::SetupRvsdg() jlm::rvsdg::node_output::node(pcAlloca[0]))); }; - auto SetupD = [&](jlm::rvsdg::region & region, phi::rvargument & functionA) + auto SetupD = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) { auto lambda = lambda::node::create(®ion, recFunctionType, "d", linkage::external_linkage); auto xArgument = lambda->fctargument(0); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index c9a41f559..22c9d6ce2 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -46,7 +46,7 @@ TestWithMatch() /* Verify output */ - assert(jlm::rvsdg::region::Contains(*lambda->subregion(), true)); + assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); } static void @@ -82,7 +82,7 @@ TestWithoutMatch() /* Verify output */ - assert(jlm::rvsdg::region::Contains(*lambda->subregion(), true)); + assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); } static int diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index 1cd017666..aa7bf06b2 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -57,10 +57,10 @@ TestUnknownBoundaries() jlm::rvsdg::view(rm.Rvsdg(), stdout); // Assert - assert(jlm::rvsdg::region::Contains(*lambda->subregion(), true)); - assert(jlm::rvsdg::region::Contains(*lambda->subregion(), true)); - assert(jlm::rvsdg::region::Contains(*lambda->subregion(), true)); - assert(jlm::rvsdg::region::Contains(*lambda->subregion(), true)); + assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); + assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); + assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); + assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); } static int diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index db730e2a4..67a6573c3 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -188,7 +188,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto SetupFunctionF = [&](lambda::output * g) { - auto SetupOuterTheta = [](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * functionG) + auto SetupOuterTheta = [](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * functionG) { auto outerTheta = jlm::rvsdg::ThetaNode::create(region); auto otf = outerTheta->add_loopvar(functionG); @@ -281,13 +281,13 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto SetupFunctionF = [&](lambda::output * g) { - auto SetupOuterTheta = [&](jlm::rvsdg::region * region, + auto SetupOuterTheta = [&](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * g, jlm::rvsdg::output * value, jlm::rvsdg::output * iOState, jlm::rvsdg::output * memoryState) { - auto SetupInnerTheta = [&](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * g) + auto SetupInnerTheta = [&](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * g) { auto innerTheta = jlm::rvsdg::ThetaNode::create(region); auto thetaOutputG = innerTheta->add_loopvar(g); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index c52424431..e7499a7ab 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -29,7 +29,7 @@ TestPhiCreation() { vtype, iostatetype::Create(), MemoryStateType::Create() }, { vtype, iostatetype::Create(), MemoryStateType::Create() }); - auto SetupEmptyLambda = [&](jlm::rvsdg::region * region, const std::string & name) + auto SetupEmptyLambda = [&](jlm::rvsdg::Region * region, const std::string & name) { auto lambda = lambda::node::create(region, f0type, name, linkage::external_linkage); auto iOStateArgument = lambda->fctargument(1); @@ -38,7 +38,7 @@ TestPhiCreation() return lambda->finalize({ iOStateArgument, memoryStateArgument }); }; - auto SetupF2 = [&](jlm::rvsdg::region * region, jlm::rvsdg::RegionArgument * f2) + auto SetupF2 = [&](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * f2) { auto lambda = lambda::node::create(region, f1type, "f2", linkage::external_linkage); auto ctxVarF2 = lambda->add_ctxvar(f2); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 6a8bbecd0..3602280df 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -290,7 +290,7 @@ TestPhi() auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); auto setupF1 = - [&](jlm::rvsdg::region & region, phi::rvoutput & rv2, jlm::rvsdg::RegionArgument & dx) + [&](jlm::rvsdg::Region & region, phi::rvoutput & rv2, jlm::rvsdg::RegionArgument & dx) { auto lambda1 = lambda::node::create(®ion, functionType, "f1", linkage::external_linkage); auto f2Argument = lambda1->add_ctxvar(rv2.argument()); @@ -306,7 +306,7 @@ TestPhi() }; auto setupF2 = - [&](jlm::rvsdg::region & region, phi::rvoutput & rv1, jlm::rvsdg::RegionArgument & dy) + [&](jlm::rvsdg::Region & region, phi::rvoutput & rv1, jlm::rvsdg::RegionArgument & dy) { auto lambda2 = lambda::node::create(®ion, functionType, "f2", linkage::external_linkage); auto f1Argument = lambda2->add_ctxvar(rv1.argument()); @@ -321,7 +321,7 @@ TestPhi() return lambda2->finalize({ result }); }; - auto setupF3 = [&](jlm::rvsdg::region & region, jlm::rvsdg::RegionArgument & dz) + auto setupF3 = [&](jlm::rvsdg::Region & region, jlm::rvsdg::RegionArgument & dz) { auto lambda3 = lambda::node::create(®ion, functionType, "f3", linkage::external_linkage); auto zArgument = lambda3->add_ctxvar(&dz); @@ -335,7 +335,7 @@ TestPhi() return lambda3->finalize({ result }); }; - auto setupF4 = [&](jlm::rvsdg::region & region) + auto setupF4 = [&](jlm::rvsdg::Region & region) { auto lambda = lambda::node::create(®ion, functionType, "f4", linkage::external_linkage); return lambda->finalize({ lambda->fctargument(0) }); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp index a27c4375b..9418aca9f 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp @@ -45,7 +45,7 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis private: void - AnalyzeRegion(jlm::rvsdg::region & region) + AnalyzeRegion(jlm::rvsdg::Region & region) { using namespace jlm::llvm; diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index 4403c7a03..dfb95f23c 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -97,7 +97,7 @@ test1() // jlm::rvsdg::view(graph.root(), stdout); // Assert - assert(!jlm::rvsdg::region::Contains(*graph.root(), true)); + assert(!jlm::rvsdg::Region::Contains(*graph.root(), true)); } static void diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index c614b7eb0..219cfc95a 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -21,7 +21,7 @@ static jlm::util::StatisticsCollector statisticsCollector; static size_t -nthetas(jlm::rvsdg::region * region) +nthetas(jlm::rvsdg::Region * region) { size_t n = 0; for (const auto & node : region->nodes) @@ -274,7 +274,7 @@ test_unknown_boundaries() } static std::vector -find_thetas(jlm::rvsdg::region * region) +find_thetas(jlm::rvsdg::Region * region) { std::vector thetas; for (auto & node : jlm::rvsdg::topdown_traverser(region)) diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index acee0ced5..f3880fde7 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -13,7 +13,7 @@ #include /** - * Test region::Contains(). + * Test Region::Contains(). */ static int Contains() @@ -39,10 +39,10 @@ Contains() binary_op::create(valueType, valueType, ®ionArgument2, ®ionArgument2); // Act & Assert - assert(jlm::rvsdg::region::Contains(*graph.root(), false)); - assert(jlm::rvsdg::region::Contains(*graph.root(), true)); - assert(jlm::rvsdg::region::Contains(*graph.root(), true)); - assert(!jlm::rvsdg::region::Contains(*graph.root(), true)); + assert(jlm::rvsdg::Region::Contains(*graph.root(), false)); + assert(jlm::rvsdg::Region::Contains(*graph.root(), true)); + assert(jlm::rvsdg::Region::Contains(*graph.root(), true)); + assert(!jlm::rvsdg::Region::Contains(*graph.root(), true)); return 0; } @@ -50,7 +50,7 @@ Contains() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-Contains", Contains) /** - * Test region::IsRootRegion(). + * Test Region::IsRootRegion(). */ static int IsRootRegion() @@ -70,7 +70,7 @@ IsRootRegion() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-IsRootRegion", IsRootRegion) /** - * Test region::NumRegions() with an empty Rvsdg. + * Test Region::NumRegions() with an empty Rvsdg. */ static int NumRegions_EmptyRvsdg() @@ -81,7 +81,7 @@ NumRegions_EmptyRvsdg() jlm::rvsdg::graph graph; // Act & Assert - assert(region::NumRegions(*graph.root()) == 1); + assert(Region::NumRegions(*graph.root()) == 1); return 0; } @@ -89,7 +89,7 @@ NumRegions_EmptyRvsdg() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-NumRegions_EmptyRvsdg", NumRegions_EmptyRvsdg) /** - * Test region::NumRegions() with non-empty Rvsdg. + * Test Region::NumRegions() with non-empty Rvsdg. */ static int NumRegions_NonEmptyRvsdg() @@ -103,7 +103,7 @@ NumRegions_NonEmptyRvsdg() jlm::tests::structural_node::create(structuralNode->subregion(3), 5); // Act & Assert - assert(region::NumRegions(*graph.root()) == 1 + 4 + 2 + 5); + assert(Region::NumRegions(*graph.root()) == 1 + 4 + 2 + 5); return 0; } @@ -111,7 +111,7 @@ NumRegions_NonEmptyRvsdg() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-NumRegions_NonEmptyRvsdg", NumRegions_NonEmptyRvsdg) /** - * Test region::RemoveResultsWhere() + * Test Region::RemoveResultsWhere() */ static int RemoveResultsWhere() @@ -120,7 +120,7 @@ RemoveResultsWhere() // Arrange jlm::rvsdg::graph rvsdg; - jlm::rvsdg::region region(rvsdg.root(), &rvsdg); + jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); auto node = jlm::tests::test_op::Create(®ion, {}, {}, { valueType }); @@ -166,7 +166,7 @@ RemoveResultsWhere() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-RemoveResultsWhere", RemoveResultsWhere) /** - * Test region::RemoveArgumentsWhere() + * Test Region::RemoveArgumentsWhere() */ static int RemoveArgumentsWhere() @@ -175,7 +175,7 @@ RemoveArgumentsWhere() // Arrange jlm::rvsdg::graph rvsdg; - jlm::rvsdg::region region(rvsdg.root(), &rvsdg); + jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); auto & argument0 = TestGraphArgument::Create(region, nullptr, valueType); @@ -220,7 +220,7 @@ RemoveArgumentsWhere() JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-RemoveArgumentsWhere", RemoveArgumentsWhere) /** - * Test region::PruneArguments() + * Test Region::PruneArguments() */ static int PruneArguments() @@ -229,7 +229,7 @@ PruneArguments() // Arrange jlm::rvsdg::graph rvsdg; - jlm::rvsdg::region region(rvsdg.root(), &rvsdg); + jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); auto & argument0 = TestGraphArgument::Create(region, nullptr, valueType); @@ -268,7 +268,7 @@ ToTree_EmptyRvsdg() graph rvsdg; // Act - auto tree = region::ToTree(*rvsdg.root()); + auto tree = Region::ToTree(*rvsdg.root()); std::cout << tree << std::flush; // Assert @@ -292,7 +292,7 @@ ToTree_EmptyRvsdgWithAnnotations() annotationMap.AddAnnotation(rvsdg.root(), Annotation("NumNodes", rvsdg.root()->nodes.size())); // Act - auto tree = region::ToTree(*rvsdg.root(), annotationMap); + auto tree = Region::ToTree(*rvsdg.root(), annotationMap); std::cout << tree << std::flush; // Assert @@ -317,7 +317,7 @@ ToTree_RvsdgWithStructuralNodes() jlm::tests::structural_node::create(structuralNode->subregion(1), 3); // Act - auto tree = region::ToTree(*rvsdg.root()); + auto tree = Region::ToTree(*rvsdg.root()); std::cout << tree << std::flush; // Assert @@ -354,7 +354,7 @@ ToTree_RvsdgWithStructuralNodesAndAnnotations() annotationMap.AddAnnotation(subregion2, Annotation("NumArguments", subregion2->narguments())); // Act - auto tree = region::ToTree(*rvsdg.root(), annotationMap); + auto tree = Region::ToTree(*rvsdg.root(), annotationMap); std::cout << tree << std::flush; // Assert diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 0383ab27c..1d18630ba 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -34,7 +34,7 @@ test_flattened_binary_reduction() jlm::rvsdg::view(graph, stdout); assert( - graph.root()->nnodes() == 1 && region::Contains(*graph.root(), false)); + graph.root()->nnodes() == 1 && Region::Contains(*graph.root(), false)); flattened_binary_op::reduce(&graph, jlm::rvsdg::flattened_binary_op::reduction::parallel); jlm::rvsdg::view(graph, stdout); @@ -68,7 +68,7 @@ test_flattened_binary_reduction() jlm::rvsdg::view(graph, stdout); assert( - graph.root()->nnodes() == 1 && region::Contains(*graph.root(), false)); + graph.root()->nnodes() == 1 && Region::Contains(*graph.root(), false)); flattened_binary_op::reduce(&graph, jlm::rvsdg::flattened_binary_op::reduction::linear); jlm::rvsdg::view(graph, stdout); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 7b655f5ae..ab35f8ad4 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -14,7 +14,7 @@ #include static bool -region_contains_node(const jlm::rvsdg::region * region, const jlm::rvsdg::node * n) +region_contains_node(const jlm::rvsdg::Region * region, const jlm::rvsdg::node * n) { for (const auto & node : region->nodes) { diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 7be44298e..dc89aed84 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -9,7 +9,7 @@ namespace jlm::tests { GraphImport & -GraphImport::Copy(rvsdg::region & region, rvsdg::structural_input * input) +GraphImport::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { return GraphImport::Create(*region.graph(), Type(), Name()); } @@ -166,7 +166,7 @@ structural_node::~structural_node() {} structural_node * -structural_node::copy(rvsdg::region * parent, rvsdg::substitution_map & smap) const +structural_node::copy(rvsdg::Region * parent, rvsdg::substitution_map & smap) const { graph()->mark_denormalized(); auto node = structural_node::create(parent, nsubregions()); @@ -244,7 +244,7 @@ StructuralNodeOutput::~StructuralNodeOutput() noexcept = default; StructuralNodeArgument::~StructuralNodeArgument() noexcept = default; StructuralNodeArgument & -StructuralNodeArgument::Copy(rvsdg::region & region, rvsdg::structural_input * input) +StructuralNodeArgument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) { auto structuralNodeInput = util::AssertedCast(input); return structuralNodeInput != nullptr ? Create(region, *structuralNodeInput) diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index a32939238..30169ee04 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -32,7 +32,7 @@ class GraphImport final : public rvsdg::GraphImport public: GraphImport & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; static GraphImport & Create(rvsdg::graph & graph, std::shared_ptr type, std::string name) @@ -96,7 +96,7 @@ class unary_op final : public rvsdg::unary_op static inline rvsdg::node * create( - rvsdg::region * region, + rvsdg::Region * region, std::shared_ptr srctype, rvsdg::output * operand, std::shared_ptr dsttype) @@ -215,7 +215,7 @@ class structural_node final : public rvsdg::structural_node ~structural_node() override; private: - structural_node(rvsdg::region * parent, size_t nsubregions) + structural_node(rvsdg::Region * parent, size_t nsubregions) : rvsdg::structural_node(structural_op(), parent, nsubregions) {} @@ -233,13 +233,13 @@ class structural_node final : public rvsdg::structural_node AddOutputWithResults(const std::vector & origins); static structural_node * - create(rvsdg::region * parent, size_t nsubregions) + create(rvsdg::Region * parent, size_t nsubregions) { return new structural_node(parent, nsubregions); } virtual structural_node * - copy(rvsdg::region * region, rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::substitution_map & smap) const override; }; class StructuralNodeInput final : public rvsdg::structural_input @@ -301,18 +301,18 @@ class StructuralNodeArgument final : public rvsdg::RegionArgument ~StructuralNodeArgument() noexcept override; StructuralNodeArgument & - Copy(rvsdg::region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; private: StructuralNodeArgument( - rvsdg::region & region, + rvsdg::Region & region, StructuralNodeInput * input, std::shared_ptr type) : rvsdg::RegionArgument(®ion, input, std::move(type)) {} static StructuralNodeArgument & - Create(rvsdg::region & region, StructuralNodeInput & input) + Create(rvsdg::Region & region, StructuralNodeInput & input) { auto argument = new StructuralNodeArgument(region, &input, input.Type()); region.append_argument(argument); @@ -320,7 +320,7 @@ class StructuralNodeArgument final : public rvsdg::RegionArgument } static StructuralNodeArgument & - Create(rvsdg::region & region, std::shared_ptr type) + Create(rvsdg::Region & region, std::shared_ptr type) { auto argument = new StructuralNodeArgument(region, nullptr, std::move(type)); region.append_argument(argument); @@ -384,7 +384,7 @@ class test_op final : public rvsdg::simple_op static rvsdg::simple_node * create( - rvsdg::region * region, + rvsdg::Region * region, const std::vector & operands, std::vector> result_types) { @@ -398,7 +398,7 @@ class test_op final : public rvsdg::simple_op static rvsdg::simple_node * Create( - rvsdg::region * region, + rvsdg::Region * region, std::vector> operandTypes, const std::vector & operands, std::vector> resultTypes) @@ -412,7 +412,7 @@ class SimpleNode final : public rvsdg::simple_node { private: SimpleNode( - rvsdg::region & region, + rvsdg::Region & region, const test_op & operation, const std::vector & operands) : simple_node(®ion, operation, operands) @@ -425,7 +425,7 @@ class SimpleNode final : public rvsdg::simple_node static SimpleNode & Create( - rvsdg::region & region, + rvsdg::Region & region, const std::vector & operands, std::vector> resultTypes) { @@ -466,7 +466,7 @@ create_testop_tac( static inline std::vector create_testop( - rvsdg::region * region, + rvsdg::Region * region, const std::vector & operands, std::vector> result_types) { @@ -482,7 +482,7 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument { private: TestGraphArgument( - jlm::rvsdg::region & region, + rvsdg::Region & region, jlm::rvsdg::structural_input * input, std::shared_ptr type) : jlm::rvsdg::RegionArgument(®ion, input, type) @@ -490,14 +490,14 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument public: TestGraphArgument & - Copy(jlm::rvsdg::region & region, jlm::rvsdg::structural_input * input) override + Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override { return Create(region, input, Type()); } static TestGraphArgument & Create( - jlm::rvsdg::region & region, + rvsdg::Region & region, jlm::rvsdg::structural_input * input, std::shared_ptr type) { @@ -511,7 +511,7 @@ class TestGraphResult final : public jlm::rvsdg::RegionResult { private: TestGraphResult( - jlm::rvsdg::region & region, + rvsdg::Region & region, jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) : jlm::rvsdg::RegionResult(®ion, &origin, output, origin.Type()) @@ -530,7 +530,7 @@ class TestGraphResult final : public jlm::rvsdg::RegionResult static TestGraphResult & Create( - jlm::rvsdg::region & region, + rvsdg::Region & region, jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) { From 5a675e38d0332145a4a75a0cd591cadae85f51ec Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 19 Sep 2024 17:55:40 +0200 Subject: [PATCH 086/170] Rename substitution_map class to SubstitutionMap (#629) --- .../backend/rvsdg2rhls/GammaConversion.cpp | 4 ++-- .../backend/rvsdg2rhls/ThetaConversion.cpp | 2 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 12 +++++----- jlm/hls/backend/rvsdg2rhls/mem-conv.hpp | 6 ++--- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 6 ++--- .../rvsdg2rhls/remove-unused-state.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 8 +++---- jlm/hls/ir/hls.cpp | 2 +- jlm/hls/ir/hls.hpp | 2 +- jlm/llvm/ir/operators/Phi.cpp | 4 ++-- jlm/llvm/ir/operators/Phi.hpp | 2 +- jlm/llvm/ir/operators/delta.cpp | 4 ++-- jlm/llvm/ir/operators/delta.hpp | 2 +- jlm/llvm/ir/operators/lambda.cpp | 4 ++-- jlm/llvm/ir/operators/lambda.hpp | 2 +- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 8 +++---- jlm/llvm/opt/unroll.cpp | 22 +++++++++---------- jlm/rvsdg/gamma.cpp | 6 ++--- jlm/rvsdg/gamma.hpp | 2 +- jlm/rvsdg/graph.cpp | 2 +- jlm/rvsdg/node.cpp | 2 +- jlm/rvsdg/node.hpp | 4 ++-- jlm/rvsdg/region.cpp | 2 +- jlm/rvsdg/region.hpp | 4 ++-- jlm/rvsdg/simple-node.cpp | 2 +- jlm/rvsdg/simple-node.hpp | 2 +- jlm/rvsdg/substitution.hpp | 2 +- jlm/rvsdg/theta.cpp | 4 ++-- jlm/rvsdg/theta.hpp | 2 +- tests/jlm/rvsdg/test-nodes.cpp | 6 ++--- tests/test-operation.cpp | 2 +- tests/test-operation.hpp | 2 +- 37 files changed, 72 insertions(+), 74 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index cc36f8384..b1a83aac6 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -14,7 +14,7 @@ namespace jlm::hls static void ConvertGammaNodeWithoutSpeculation(rvsdg::GammaNode & gammaNode) { - rvsdg::substitution_map substitutionMap; + rvsdg::SubstitutionMap substitutionMap; // create a branch for each gamma input and map the corresponding argument of each subregion to an // output of the branch @@ -55,7 +55,7 @@ ConvertGammaNodeWithoutSpeculation(rvsdg::GammaNode & gammaNode) static void ConvertGammaNodeWithSpeculation(rvsdg::GammaNode & gammaNode) { - rvsdg::substitution_map substitutionMap; + rvsdg::SubstitutionMap substitutionMap; // Map arguments to origins of inputs. Forks will automatically be created later for (size_t i = 0; i < gammaNode.nentryvars(); i++) diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index dace17626..c01ec0f83 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -13,7 +13,7 @@ namespace jlm::hls static void ConvertThetaNode(rvsdg::ThetaNode & theta) { - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; auto loop = hls::loop_node::create(theta.region()); std::vector branches; diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 9d40330f7..6370e7373 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -70,7 +70,7 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) lambdaNode.linkage(), lambdaNode.attributes()); - jlm::rvsdg::substitution_map substitutionMap; + rvsdg::SubstitutionMap substitutionMap; for (size_t i = 0; i < lambdaNode.ncvarguments(); ++i) { auto oldArgument = lambdaNode.cvargument(i); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index ec14d7917..6492c7bc7 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -50,7 +50,7 @@ add_lambda_argument(llvm::lambda::node * ln, std::shared_ptrlinkage(), ln->attributes()); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (size_t i = 0; i < ln->ncvarguments(); ++i) { // copy over cvarguments diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 1bf88fc3f..0bd7102b4 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -224,7 +224,7 @@ decouple_load( { // loadNode is always a part of loop_slice due to state edges auto new_loop = loop_node::create(loopNode->region(), false); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; std::vector backedge_args; // create arguments for (size_t i = 0; i < loopNode->subregion()->narguments(); ++i) diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index ac2aab97d..9103b12a8 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -27,7 +27,7 @@ change_function_name(llvm::lambda::node * ln, const std::string & name) llvm::lambda::node::create(ln->region(), ln->Type(), name, ln->linkage(), ln->attributes()); /* add context variables */ - jlm::rvsdg::substitution_map subregionmap; + rvsdg::SubstitutionMap subregionmap; for (auto & cv : ln->ctxvars()) { auto origin = cv.origin(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 5a8945fe8..03b328d04 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -638,7 +638,7 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) lambda->linkage(), lambda->attributes()); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (size_t i = 0; i < lambda->ncvarguments(); ++i) { smap.insert( @@ -737,7 +737,7 @@ jlm::rvsdg::output * jlm::hls::ConnectRequestResponseMemPorts( const jlm::llvm::lambda::node * lambda, size_t argumentIndex, - jlm::rvsdg::substitution_map & smap, + rvsdg::SubstitutionMap & smap, const std::vector & originalLoadNodes, const std::vector & originalStoreNodes, const std::vector & originalDecoupledNodes) @@ -838,7 +838,7 @@ jlm::hls::ConnectRequestResponseMemPorts( jlm::rvsdg::simple_node * jlm::hls::ReplaceLoad( - jlm::rvsdg::substitution_map & smap, + rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_node * originalLoad, jlm::rvsdg::output * response) { @@ -876,9 +876,7 @@ jlm::hls::ReplaceLoad( } jlm::rvsdg::simple_node * -jlm::hls::ReplaceStore( - jlm::rvsdg::substitution_map & smap, - const jlm::rvsdg::simple_node * originalStore) +jlm::hls::ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_node * originalStore) { // We have the store from the original lambda since it is needed to update the smap // We need the store in the new lambda such that we can replace it with a store node with explicit @@ -906,7 +904,7 @@ jlm::hls::ReplaceStore( jlm::rvsdg::simple_node * ReplaceDecouple( - jlm::rvsdg::substitution_map & smap, + jlm::rvsdg::SubstitutionMap & smap, const jlm::llvm::lambda::node * lambda, jlm::rvsdg::simple_node * originalDecoupleRequest, jlm::rvsdg::output * response) diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp index c848eba21..51f819904 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp @@ -38,19 +38,19 @@ jlm::rvsdg::output * ConnectRequestResponseMemPorts( const llvm::lambda::node * lambda, size_t argumentIndex, - jlm::rvsdg::substitution_map & smap, + rvsdg::SubstitutionMap & smap, const std::vector & originalLoadNodes, const std::vector & originalStoreNodes, const std::vector & originalDecoupledNodes); jlm::rvsdg::simple_node * ReplaceLoad( - jlm::rvsdg::substitution_map & smap, + rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_node * originalLoad, jlm::rvsdg::output * response); jlm::rvsdg::simple_node * -ReplaceStore(jlm::rvsdg::substitution_map & smap, const jlm::rvsdg::simple_node * originalStore); +ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_node * originalStore); jlm::rvsdg::output * route_response(rvsdg::Region * target, jlm::rvsdg::output * response); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 95863b924..d1d15a41c 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -119,8 +119,8 @@ fix_match_inversion(rvsdg::GammaNode * old_gamma) op, { no->node()->input(0)->origin() })[0]; auto new_gamma = rvsdg::GammaNode::create(new_match, match->nalternatives()); - rvsdg::substitution_map rmap0; // subregion 0 of the new gamma - 1 of the old - rvsdg::substitution_map rmap1; + rvsdg::SubstitutionMap rmap0; // subregion 0 of the new gamma - 1 of the old + rvsdg::SubstitutionMap rmap1; for (auto oev = old_gamma->begin_entryvar(); oev != old_gamma->end_entryvar(); oev++) { auto nev = new_gamma->add_entryvar(oev->origin()); @@ -292,7 +292,7 @@ merge_gamma(rvsdg::GammaNode * gamma) } if (can_merge) { - std::vector rmap(gamma->nsubregions()); + std::vector rmap(gamma->nsubregions()); // populate argument mappings for (size_t i = 0; i < gamma->nentryvars(); ++i) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index bc4ebfb2b..e0f8122c0 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -204,7 +204,7 @@ remove_lambda_passthrough(llvm::lambda::node * ln) ln->linkage(), ln->attributes()); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (size_t i = 0; i < ln->ncvarguments(); ++i) { // copy over cvarguments diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 4d60fab56..641965121 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -260,7 +260,7 @@ rename_delta(llvm::delta::node * odn) "", odn->constant()); /* add dependencies */ - jlm::rvsdg::substitution_map rmap; + rvsdg::SubstitutionMap rmap; for (size_t i = 0; i < odn->ncvarguments(); i++) { auto input = odn->input(i); @@ -286,7 +286,7 @@ change_linkage(llvm::lambda::node * ln, llvm::linkage link) llvm::lambda::node::create(ln->region(), ln->Type(), ln->name(), link, ln->attributes()); /* add context variables */ - jlm::rvsdg::substitution_map subregionmap; + rvsdg::SubstitutionMap subregionmap; for (auto & cv : ln->ctxvars()) { auto origin = cv.origin(); @@ -337,7 +337,7 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) inline_calls(ln->subregion()); split_opt(rm); // convert_alloca(ln->subregion()); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (size_t i = 0; i < ln->ninputs(); ++i) { auto orig_node_output = dynamic_cast(ln->input(i)->origin()); @@ -445,7 +445,7 @@ dump_ref(llvm::RvsdgModule & rhls, std::string & path) { auto reference = llvm::RvsdgModule::Create(rhls.SourceFileName(), rhls.TargetTriple(), rhls.DataLayout()); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; rhls.Rvsdg().root()->copy(reference->Rvsdg().root(), smap, true, true); pre_opt(*reference); instrument_ref(*reference); diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index cf627fdfa..626554663 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -99,7 +99,7 @@ loop_node::add_loopconst(jlm::rvsdg::output * origin) } loop_node * -loop_node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const +loop_node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 001040623..e169a66df 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -793,7 +793,7 @@ class loop_node final : public jlm::rvsdg::structural_node add_loopconst(jlm::rvsdg::output * origin); virtual loop_node * - copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; }; class bundletype final : public jlm::rvsdg::valuetype diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 6a387d5c5..07a943088 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -55,13 +55,13 @@ node::add_ctxvar(jlm::rvsdg::output * origin) } phi::node * -node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const +node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { phi::builder pb; pb.begin(region); /* add context variables */ - jlm::rvsdg::substitution_map subregionmap; + rvsdg::SubstitutionMap subregionmap; for (auto it = begin_cv(); it != end_cv(); it++) { auto origin = smap.lookup(it->origin()); diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 50393066e..64679aed3 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -486,7 +486,7 @@ class node final : public jlm::rvsdg::structural_node output(size_t n) const noexcept; virtual phi::node * - copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; /** * Extracts all lambda nodes from a phi node. diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index 1af42804f..a7766840d 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -49,12 +49,12 @@ node::copy(rvsdg::Region * region, const std::vector & ope } delta::node * -node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const +node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { auto delta = Create(region, Type(), name(), linkage(), Section(), constant()); /* add context variables */ - jlm::rvsdg::substitution_map subregionmap; + rvsdg::SubstitutionMap subregionmap; for (auto & cv : ctxvars()) { auto origin = smap.lookup(cv.origin()); diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index a1ef4aa81..0e804de2e 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -262,7 +262,7 @@ class node final : public rvsdg::structural_node copy(rvsdg::Region * region, const std::vector & operands) const override; virtual delta::node * - copy(rvsdg::Region * region, rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; /** * Creates a delta node in the region \p parent with the pointer type \p type and name \p name. diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 32618d770..b0443ad06 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -245,12 +245,12 @@ node::copy(rvsdg::Region * region, const std::vector & ope } lambda::node * -node::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const +node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { auto lambda = create(region, Type(), name(), linkage(), attributes()); /* add context variables */ - jlm::rvsdg::substitution_map subregionmap; + rvsdg::SubstitutionMap subregionmap; for (auto & cv : ctxvars()) { auto origin = smap.lookup(cv.origin()); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index a4425970c..4a54a8b9c 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -300,7 +300,7 @@ class node final : public jlm::rvsdg::structural_node copy(rvsdg::Region * region, const std::vector & operands) const override; lambda::node * - copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; /** * @return The memory state argument of the lambda subregion. diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 44eddc2be..60f66b1c5 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -121,7 +121,7 @@ inlineCall(jlm::rvsdg::simple_node * call, const lambda::node * lambda) auto deps = route_dependencies(lambda, call); JLM_ASSERT(lambda->ncvarguments() == deps.size()); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (size_t n = 1; n < call->ninputs(); n++) { auto argument = lambda->fctargument(n - 1); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index eeced9554..deb49c718 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -114,7 +114,7 @@ collect_condition_nodes(jlm::rvsdg::structural_node * tnode, jlm::rvsdg::structu static void copy_condition_nodes( rvsdg::Region * target, - jlm::rvsdg::substitution_map & smap, + rvsdg::SubstitutionMap & smap, const std::vector> & nodes) { for (size_t n = 0; n < nodes.size(); n++) @@ -146,7 +146,7 @@ invert(rvsdg::ThetaNode * otheta) pullin(ogamma, otheta); /* copy condition nodes for new gamma node */ - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; auto cnodes = collect_condition_nodes(otheta, ogamma); for (const auto & olv : *otheta) smap.insert(olv->argument(), olv->input()->origin()); @@ -156,7 +156,7 @@ invert(rvsdg::ThetaNode * otheta) rvsdg::GammaNode::create(smap.lookup(ogamma->predicate()->origin()), ogamma->nsubregions()); /* handle subregion 0 */ - jlm::rvsdg::substitution_map r0map; + rvsdg::SubstitutionMap r0map; { /* setup substitution map for exit region copying */ auto osubregion0 = ogamma->subregion(0); @@ -188,7 +188,7 @@ invert(rvsdg::ThetaNode * otheta) } /* handle subregion 1 */ - jlm::rvsdg::substitution_map r1map; + rvsdg::SubstitutionMap r1map; { auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 96ee05e1b..47a3e4bcb 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -177,13 +177,13 @@ static void unroll_body( const rvsdg::ThetaNode * theta, rvsdg::Region * target, - jlm::rvsdg::substitution_map & smap, + rvsdg::SubstitutionMap & smap, size_t factor) { for (size_t n = 0; n < factor - 1; n++) { theta->subregion()->copy(target, smap, false, false); - jlm::rvsdg::substitution_map tmap; + rvsdg::SubstitutionMap tmap; for (const auto & olv : *theta) tmap.insert(olv->argument(), smap.lookup(olv->result()->origin())); smap = tmap; @@ -199,7 +199,7 @@ unroll_body( static void copy_body_and_unroll(const rvsdg::ThetaNode * theta, size_t factor) { - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (const auto & olv : *theta) smap.insert(olv->argument(), olv->input()->origin()); @@ -213,7 +213,7 @@ copy_body_and_unroll(const rvsdg::ThetaNode * theta, size_t factor) Unroll theta node by given factor. */ static void -unroll_theta(const unrollinfo & ui, jlm::rvsdg::substitution_map & smap, size_t factor) +unroll_theta(const unrollinfo & ui, rvsdg::SubstitutionMap & smap, size_t factor) { auto theta = ui.theta(); auto remainder = ui.remainder(factor); @@ -259,7 +259,7 @@ unroll_theta(const unrollinfo & ui, jlm::rvsdg::substitution_map & smap, size_t Adde the reminder for the lopp if any */ static void -add_remainder(const unrollinfo & ui, jlm::rvsdg::substitution_map & smap, size_t factor) +add_remainder(const unrollinfo & ui, rvsdg::SubstitutionMap & smap, size_t factor) { auto theta = ui.theta(); auto remainder = ui.remainder(factor); @@ -318,7 +318,7 @@ unroll_known_theta(const unrollinfo & ui, size_t factor) /* Unroll the theta */ - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; unroll_theta(ui, smap, factor); /* @@ -349,7 +349,7 @@ create_unrolled_gamma_predicate(const unrollinfo & ui, size_t factor) static jlm::rvsdg::output * create_unrolled_theta_predicate( rvsdg::Region * target, - const jlm::rvsdg::substitution_map & smap, + const rvsdg::SubstitutionMap & smap, const unrollinfo & ui, size_t factor) { @@ -377,7 +377,7 @@ create_unrolled_theta_predicate( } static jlm::rvsdg::output * -create_residual_gamma_predicate(const jlm::rvsdg::substitution_map & smap, const unrollinfo & ui) +create_residual_gamma_predicate(const rvsdg::SubstitutionMap & smap, const unrollinfo & ui) { auto region = ui.theta()->region(); auto idv = smap.lookup(ui.theta()->output(ui.idv()->input()->index())); @@ -396,13 +396,13 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) auto otheta = ui.theta(); /* handle gamma with unrolled loop */ - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; { auto pred = create_unrolled_gamma_predicate(ui, factor); auto ngamma = rvsdg::GammaNode::create(pred, 2); auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); - jlm::rvsdg::substitution_map rmap[2]; + rvsdg::SubstitutionMap rmap[2]; for (const auto & olv : *otheta) { auto ev = ngamma->add_entryvar(olv->input()->origin()); @@ -435,7 +435,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) auto ngamma = rvsdg::GammaNode::create(pred, 2); auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); - jlm::rvsdg::substitution_map rmap[2]; + rvsdg::SubstitutionMap rmap[2]; for (const auto & olv : *otheta) { auto ev = ngamma->add_entryvar(smap.lookup(olv)); diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 3c372d1cf..b33e0b526 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -28,7 +28,7 @@ perform_predicate_reduction(GammaNode * gamma) auto cop = static_cast(&constant->operation()); auto alternative = cop->value().alternative(); - jlm::rvsdg::substitution_map smap; + rvsdg::SubstitutionMap smap; for (auto it = gamma->begin_entryvar(); it != gamma->end_entryvar(); it++) smap.insert(it->argument(alternative), it->origin()); @@ -341,12 +341,12 @@ GammaNode::exitvar_iterator::operator++() noexcept } GammaNode * -GammaNode::copy(rvsdg::Region * region, substitution_map & smap) const +GammaNode::copy(rvsdg::Region * region, SubstitutionMap & smap) const { auto gamma = create(smap.lookup(predicate()->origin()), nsubregions()); /* add entry variables to new gamma */ - std::vector rmap(nsubregions()); + std::vector rmap(nsubregions()); for (auto oev = begin_entryvar(); oev != end_entryvar(); oev++) { auto nev = gamma->add_entryvar(smap.lookup(oev->origin())); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 29234bb19..23a8b51a0 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -312,7 +312,7 @@ class GammaNode : public structural_node } virtual GammaNode * - copy(jlm::rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(jlm::rvsdg::Region * region, SubstitutionMap & smap) const override; }; /* gamma input */ diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 9dcdf41c3..4abd6a91a 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -42,7 +42,7 @@ graph::graph() std::unique_ptr graph::copy() const { - jlm::rvsdg::substitution_map smap; + SubstitutionMap smap; std::unique_ptr graph(new jlm::rvsdg::graph()); root()->copy(graph->root(), smap, true, true); return graph; diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index ae5734fa6..45d562f7f 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -290,7 +290,7 @@ node::recompute_depth() noexcept jlm::rvsdg::node * node::copy(rvsdg::Region * region, const std::vector & operands) const { - substitution_map smap; + SubstitutionMap smap; size_t noperands = std::min(operands.size(), ninputs()); for (size_t n = 0; n < noperands; n++) diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 16102d4ae..9022fcaab 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -28,7 +28,7 @@ class type; class graph; class node_normal_form; class output; -class substitution_map; +class SubstitutionMap; /* inputs */ @@ -809,7 +809,7 @@ class node subsequent \ref copy operations. */ virtual jlm::rvsdg::node * - copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const = 0; + copy(rvsdg::Region * region, SubstitutionMap & smap) const = 0; inline size_t depth() const noexcept diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 6deca52b8..7cb3ab5d6 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -178,7 +178,7 @@ Region::remove_node(jlm::rvsdg::node * node) } void -Region::copy(Region * target, substitution_map & smap, bool copy_arguments, bool copy_results) const +Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool copy_results) const { smap.insert(this, target); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 215e7d240..1230126e4 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -29,7 +29,7 @@ class structural_input; class structural_node; class structural_op; class structural_output; -class substitution_map; +class SubstitutionMap; /** * \brief Represents the argument of a region. @@ -368,7 +368,7 @@ class Region map will be updated as nodes are copied. */ void - copy(Region * target, substitution_map & smap, bool copy_arguments, bool copy_results) const; + copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool copy_results) const; void prune(bool recursive); diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index d6dc53253..4a742a97b 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -79,7 +79,7 @@ simple_node::copy(rvsdg::Region * region, const std::vector operands; for (size_t n = 0; n < ninputs(); n++) diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index d7b433571..1d7a41ae0 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -45,7 +45,7 @@ class simple_node : public node copy(rvsdg::Region * region, const std::vector & operands) const override; virtual jlm::rvsdg::node * - copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, SubstitutionMap & smap) const override; static inline jlm::rvsdg::simple_node * create( diff --git a/jlm/rvsdg/substitution.hpp b/jlm/rvsdg/substitution.hpp index 051c71f0a..a76627d04 100644 --- a/jlm/rvsdg/substitution.hpp +++ b/jlm/rvsdg/substitution.hpp @@ -18,7 +18,7 @@ class output; class Region; class structural_input; -class substitution_map final +class SubstitutionMap final { public: bool diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 6e9cedcf8..c1667d26e 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -114,12 +114,12 @@ ThetaNode::add_loopvar(jlm::rvsdg::output * origin) } ThetaNode * -ThetaNode::copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const +ThetaNode::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); - jlm::rvsdg::substitution_map rmap; + rvsdg::SubstitutionMap rmap; auto theta = create(region); /* add loop variables */ diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 9fc2a7f22..1d00216ae 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -244,7 +244,7 @@ class ThetaNode final : public structural_node add_loopvar(jlm::rvsdg::output * origin); virtual ThetaNode * - copy(rvsdg::Region * region, jlm::rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; }; class ThetaInput final : public structural_input diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 900c2ae7b..cfbd20a03 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -40,7 +40,7 @@ test_node_copy(void) jlm::rvsdg::view(graph.root(), stdout); /* copy first into second region with arguments and results */ - substitution_map smap; + SubstitutionMap smap; smap.insert(i1, i1); smap.insert(i2, i2); smap.insert(o1, o1); @@ -61,7 +61,7 @@ test_node_copy(void) assert(r2->nnodes() == 2); /* copy second into third region only with arguments */ - jlm::rvsdg::substitution_map smap2; + jlm::rvsdg::SubstitutionMap smap2; auto & a3 = TestGraphArgument::Create(*n1->subregion(2), i1, stype); auto & a4 = TestGraphArgument::Create(*n1->subregion(2), i2, vtype); smap2.insert(r2->argument(0), &a3); @@ -81,7 +81,7 @@ test_node_copy(void) assert(r3->nnodes() == 2); /* copy structural node */ - jlm::rvsdg::substitution_map smap3; + jlm::rvsdg::SubstitutionMap smap3; smap3.insert(s, s); smap3.insert(v, v); n1->copy(graph.root(), smap3); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index dc89aed84..46c6f785f 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -166,7 +166,7 @@ structural_node::~structural_node() {} structural_node * -structural_node::copy(rvsdg::Region * parent, rvsdg::substitution_map & smap) const +structural_node::copy(rvsdg::Region * parent, rvsdg::SubstitutionMap & smap) const { graph()->mark_denormalized(); auto node = structural_node::create(parent, nsubregions()); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 30169ee04..283d6d098 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -239,7 +239,7 @@ class structural_node final : public rvsdg::structural_node } virtual structural_node * - copy(rvsdg::Region * region, rvsdg::substitution_map & smap) const override; + copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; }; class StructuralNodeInput final : public rvsdg::structural_input From b395dc8825e8c8b09b5a2117f88e20bb94e50b92 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 28 Sep 2024 07:27:28 +0200 Subject: [PATCH 087/170] Avoid hoisting loads out of gammas (#635) Closes #632 --- jlm/llvm/opt/push.cpp | 8 -------- 1 file changed, 8 deletions(-) diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index bebdb9dff..695f98615 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -155,14 +155,6 @@ copy_from_theta(jlm::rvsdg::node * node) static bool is_gamma_top_pushable(const jlm::rvsdg::node * node) { - /* - FIXME: This is techically not fully correct. It is - only possible to push a load out of a gamma node, if - it is guaranteed to load from a valid address. - */ - if (is(node)) - return true; - return !has_side_effects(node); } From 54449da8f4d71e3d634550763274b5b2aec7ec22 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 28 Sep 2024 07:49:39 +0200 Subject: [PATCH 088/170] Move CFG conversion to ascii to cfg class (#631) The ascii output for the control flow graph is not easily readable. This is the first PR on improving it. It does the following: 1. Moves the functions for converting a CFG to ascii to the cfg class 2. Adds a unit test for the conversion of three address codes In order to add a unit test for the CFG conversion, I first need to make the output more deterministic. This will happen in a follow up PR. This PR is part of issue #586 --- jlm/llvm/Makefile.sub | 1 + jlm/llvm/ir/cfg.cpp | 105 ++++++++++++++ jlm/llvm/ir/cfg.hpp | 18 +++ jlm/llvm/ir/print.cpp | 133 +----------------- jlm/llvm/ir/print.hpp | 10 -- jlm/llvm/ir/tac.cpp | 26 ++++ jlm/llvm/ir/tac.hpp | 3 + .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 2 +- tests/jlm/llvm/ir/ThreeAddressCodeTests.cpp | 59 ++++++++ tests/jlm/llvm/ir/test-cfg-node.cpp | 2 +- tests/jlm/llvm/ir/test-cfg-prune.cpp | 4 +- tests/jlm/llvm/ir/test-cfg-purge.cpp | 2 +- tests/jlm/llvm/ir/test-cfg-structure.cpp | 2 +- tests/jlm/llvm/ir/test-cfg-validity.cpp | 2 +- tests/jlm/llvm/ir/test-cfg.cpp | 2 +- tests/jlm/llvm/ir/test-ssa-destruction.cpp | 4 +- 16 files changed, 225 insertions(+), 150 deletions(-) create mode 100644 tests/jlm/llvm/ir/ThreeAddressCodeTests.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index b88fef2d4..e5ee2244f 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -190,6 +190,7 @@ libllvm_TESTS += \ tests/jlm/llvm/ir/test-ssa-destruction \ tests/jlm/llvm/ir/TestTypes \ tests/jlm/llvm/ir/TestAnnotation \ + tests/jlm/llvm/ir/ThreeAddressCodeTests \ tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider \ tests/jlm/llvm/opt/alias-analyses/TestAndersen \ tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation \ diff --git a/jlm/llvm/ir/cfg.cpp b/jlm/llvm/ir/cfg.cpp index f2221a88f..aad3183d1 100644 --- a/jlm/llvm/ir/cfg.cpp +++ b/jlm/llvm/ir/cfg.cpp @@ -69,6 +69,111 @@ cfg::remove_node(basic_block * bb) return remove_node(it); } +std::string +cfg::ToAscii(const cfg & controlFlowGraph) +{ + std::string str; + auto nodes = breadth_first(controlFlowGraph); + for (const auto & node : nodes) + { + str += CreateLabel(*node) + ":"; + str += (is(node) ? "\n" : " "); + + if (auto entryNode = dynamic_cast(node)) + { + str += ToAscii(*entryNode); + } + else if (auto exitNode = dynamic_cast(node)) + { + str += ToAscii(*exitNode); + } + else if (auto basicBlock = dynamic_cast(node)) + { + str += ToAscii(*basicBlock); + } + else + { + JLM_UNREACHABLE("Unhandled control flow graph node type!"); + } + } + + return str; +} + +std::string +cfg::ToAscii(const entry_node & entryNode) +{ + std::string str; + for (size_t n = 0; n < entryNode.narguments(); n++) + { + str += entryNode.argument(n)->debug_string() + " "; + } + + return str + "\n"; +} + +std::string +cfg::ToAscii(const exit_node & exitNode) +{ + std::string str; + for (size_t n = 0; n < exitNode.nresults(); n++) + { + str += exitNode.result(n)->debug_string() + " "; + } + + return str; +} + +std::string +cfg::ToAscii(const basic_block & basicBlock) +{ + auto & threeAddressCodes = basicBlock.tacs(); + + std::string str; + for (const auto & tac : threeAddressCodes) + { + str += "\t" + tac::ToAscii(*tac); + if (tac != threeAddressCodes.last()) + str += "\n"; + } + + if (threeAddressCodes.last()) + { + if (is(threeAddressCodes.last()->operation())) + str += " " + CreateTargets(basicBlock); + else + str += "\n\t" + CreateTargets(basicBlock); + } + else + { + str += "\t" + CreateTargets(basicBlock); + } + + return str + "\n"; +} + +std::string +cfg::CreateTargets(const cfg_node & node) +{ + size_t n = 0; + std::string str("["); + for (auto it = node.begin_outedges(); it != node.end_outedges(); it++, n++) + { + str += CreateLabel(*it->sink()); + if (n != node.noutedges() - 1) + str += ", "; + } + str += "]"; + + return str; +} + +std::string +cfg::CreateLabel(const cfg_node & node) +{ + return util::strfmt(&node); +} + /* supporting functions */ std::vector diff --git a/jlm/llvm/ir/cfg.hpp b/jlm/llvm/ir/cfg.hpp index 27f6d8ee9..7e0e51c5e 100644 --- a/jlm/llvm/ir/cfg.hpp +++ b/jlm/llvm/ir/cfg.hpp @@ -376,7 +376,25 @@ class cfg final return std::unique_ptr(new cfg(im)); } + static std::string + ToAscii(const cfg & controlFlowGraph); + private: + static std::string + ToAscii(const entry_node & entryNode); + + static std::string + ToAscii(const exit_node & exitNode); + + static std::string + ToAscii(const basic_block & basicBlock); + + static std::string + CreateTargets(const cfg_node & node); + + static std::string + CreateLabel(const cfg_node & node); + ipgraph_module & module_; std::unique_ptr exit_; std::unique_ptr entry_; diff --git a/jlm/llvm/ir/print.cpp b/jlm/llvm/ir/print.cpp index 7875dc87b..926fcd732 100644 --- a/jlm/llvm/ir/print.cpp +++ b/jlm/llvm/ir/print.cpp @@ -18,143 +18,16 @@ namespace jlm::llvm /* string converters */ -static std::string -emit_tac(const llvm::tac &); - static std::string emit_tacs(const tacsvector_t & tacs) { std::string str; for (const auto & tac : tacs) - str += emit_tac(*tac) + ", "; + str += tac::ToAscii(*tac) + ", "; return "[" + str + "]"; } -static inline std::string -emit_entry(const cfg_node * node) -{ - JLM_ASSERT(is(node)); - auto & en = *static_cast(node); - - std::string str; - for (size_t n = 0; n < en.narguments(); n++) - str += en.argument(n)->debug_string() + " "; - - return str + "\n"; -} - -static inline std::string -emit_exit(const cfg_node * node) -{ - JLM_ASSERT(is(node)); - auto & xn = *static_cast(node); - - std::string str; - for (size_t n = 0; n < xn.nresults(); n++) - str += xn.result(n)->debug_string() + " "; - - return str; -} - -static inline std::string -emit_tac(const llvm::tac & tac) -{ - /* convert results */ - std::string results; - for (size_t n = 0; n < tac.nresults(); n++) - { - results += tac.result(n)->debug_string(); - if (n != tac.nresults() - 1) - results += ", "; - } - - /* convert operands */ - std::string operands; - for (size_t n = 0; n < tac.noperands(); n++) - { - operands += tac.operand(n)->debug_string(); - if (n != tac.noperands() - 1) - operands += ", "; - } - - std::string op = tac.operation().debug_string(); - return results + (results.empty() ? "" : " = ") + op + " " + operands; -} - -static inline std::string -emit_label(const cfg_node * node) -{ - return util::strfmt(node); -} - -static inline std::string -emit_targets(const cfg_node * node) -{ - size_t n = 0; - std::string str("["); - for (auto it = node->begin_outedges(); it != node->end_outedges(); it++, n++) - { - str += emit_label(it->sink()); - if (n != node->noutedges() - 1) - str += ", "; - } - str += "]"; - - return str; -} - -static inline std::string -emit_basic_block(const cfg_node * node) -{ - JLM_ASSERT(is(node)); - auto & tacs = static_cast(node)->tacs(); - - std::string str; - for (const auto & tac : tacs) - { - str += "\t" + emit_tac(*tac); - if (tac != tacs.last()) - str += "\n"; - } - - if (tacs.last()) - { - if (is(tacs.last()->operation())) - str += " " + emit_targets(node); - else - str += "\n\t" + emit_targets(node); - } - else - { - str += "\t" + emit_targets(node); - } - - return str + "\n"; -} - -std::string -to_str(const llvm::cfg & cfg) -{ - static std::unordered_map map( - { { typeid(entry_node), emit_entry }, - { typeid(exit_node), emit_exit }, - { typeid(basic_block), emit_basic_block } }); - - std::string str; - auto nodes = breadth_first(cfg); - for (const auto & node : nodes) - { - str += emit_label(node) + ":"; - str += (is(node) ? "\n" : " "); - - JLM_ASSERT(map.find(typeid(*node)) != map.end()); - str += map[typeid(*node)](node) + "\n"; - } - - return str; -} - static std::string emit_function_node(const ipgraph_node & clg_node) { @@ -183,7 +56,7 @@ emit_function_node(const ipgraph_node & clg_node) } operands += ">"; - std::string cfg = node.cfg() ? to_str(*node.cfg()) : ""; + std::string cfg = node.cfg() ? cfg::ToAscii(*node.cfg()) : ""; std::string exported = !is_externally_visible(node.linkage()) ? "static" : ""; return exported + results + " " + node.name() + " " + operands + "\n{\n" + cfg + "}\n"; @@ -261,7 +134,7 @@ emit_basic_block(const cfg_node & node) std::string str; for (const auto & tac : tacs) - str += emit_tac(*tac) + "\\n"; + str += tac::ToAscii(*tac) + "\\n"; return str; } diff --git a/jlm/llvm/ir/print.hpp b/jlm/llvm/ir/print.hpp index 9eab85731..22816c062 100644 --- a/jlm/llvm/ir/print.hpp +++ b/jlm/llvm/ir/print.hpp @@ -21,19 +21,9 @@ class ipgraph_module; /* control flow graph */ -std::string -to_str(const llvm::cfg & cfg); - std::string to_dot(const llvm::cfg & cfg); -static inline void -print_ascii(const llvm::cfg & cfg, FILE * out) -{ - fputs(to_str(cfg).c_str(), out); - fflush(out); -} - static inline void print_dot(const llvm::cfg & cfg, FILE * out) { diff --git a/jlm/llvm/ir/tac.cpp b/jlm/llvm/ir/tac.cpp index 38f2545fc..f00a5a194 100644 --- a/jlm/llvm/ir/tac.cpp +++ b/jlm/llvm/ir/tac.cpp @@ -119,4 +119,30 @@ tac::replace( operation_ = operation.copy(); } +std::string +tac::ToAscii(const jlm::llvm::tac & threeAddressCode) +{ + std::string resultString; + for (size_t n = 0; n < threeAddressCode.nresults(); n++) + { + resultString += threeAddressCode.result(n)->debug_string(); + if (n != threeAddressCode.nresults() - 1) + resultString += ", "; + } + + std::string operandString; + for (size_t n = 0; n < threeAddressCode.noperands(); n++) + { + operandString += threeAddressCode.operand(n)->debug_string(); + if (n != threeAddressCode.noperands() - 1) + operandString += ", "; + } + + std::string operationString = threeAddressCode.operation().debug_string(); + std::string resultOperationSeparator = resultString.empty() ? "" : " = "; + std::string operationOperandSeparator = operandString.empty() ? "" : " "; + return resultString + resultOperationSeparator + operationString + operationOperandSeparator + + operandString; +} + } diff --git a/jlm/llvm/ir/tac.hpp b/jlm/llvm/ir/tac.hpp index 29f7405b7..136f01160 100644 --- a/jlm/llvm/ir/tac.hpp +++ b/jlm/llvm/ir/tac.hpp @@ -126,6 +126,9 @@ class tac final void convert(const jlm::rvsdg::simple_op & operation, const std::vector & operands); + static std::string + ToAscii(const tac & threeAddressCode); + static std::unique_ptr create(const jlm::rvsdg::simple_op & operation, const std::vector & operands) { diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp index be4b9d403..43ece0a7b 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -221,7 +221,7 @@ PartialEmptyGamma() assert(ipg.nnodes() == 1); auto cfg = dynamic_cast(*ipg.begin()).cfg(); - print_ascii(*cfg, stdout); + std::cout << cfg::ToAscii(*cfg) << std::flush; assert(!is_proper_structured(*cfg)); assert(is_structured(*cfg)); diff --git a/tests/jlm/llvm/ir/ThreeAddressCodeTests.cpp b/tests/jlm/llvm/ir/ThreeAddressCodeTests.cpp new file mode 100644 index 000000000..4c42f5cd5 --- /dev/null +++ b/tests/jlm/llvm/ir/ThreeAddressCodeTests.cpp @@ -0,0 +1,59 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +static int +ToAscii() +{ + using namespace jlm::llvm; + using namespace jlm::tests; + + // Arrange + auto valueType = valuetype::Create(); + + variable v0(valueType, "v0"); + variable v1(valueType, "v1"); + + auto tac0 = create_testop_tac({}, {}); + auto tac1 = create_testop_tac({ &v0 }, {}); + auto tac2 = create_testop_tac({ &v0, &v1 }, {}); + auto tac3 = create_testop_tac({}, { valueType }); + auto tac4 = create_testop_tac({}, { valueType, valueType }); + auto tac5 = create_testop_tac({ &v0, &v1 }, { valueType, valueType }); + + // Act + auto tac0String = tac::ToAscii(*tac0); + std::cout << tac0String << "\n" << std::flush; + + auto tac1String = tac::ToAscii(*tac1); + std::cout << tac1String << "\n" << std::flush; + + auto tac2String = tac::ToAscii(*tac2); + std::cout << tac2String << "\n" << std::flush; + + auto tac3String = tac::ToAscii(*tac3); + std::cout << tac3String << "\n" << std::flush; + + auto tac4String = tac::ToAscii(*tac4); + std::cout << tac4String << "\n" << std::flush; + + auto tac5String = tac::ToAscii(*tac5); + std::cout << tac5String << "\n" << std::flush; + + // Assert + assert(tac0String == "test_op"); + assert(tac1String == "test_op v0"); + assert(tac2String == "test_op v0, v1"); + assert(tac3String == "tv0 = test_op"); + assert(tac4String == "tv1, tv2 = test_op"); + assert(tac5String == "tv3, tv4 = test_op v0, v1"); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/ThreeAddressCodeTests-ToAscii", ToAscii); diff --git a/tests/jlm/llvm/ir/test-cfg-node.cpp b/tests/jlm/llvm/ir/test-cfg-node.cpp index 3b0cad482..df13ac858 100644 --- a/tests/jlm/llvm/ir/test-cfg-node.cpp +++ b/tests/jlm/llvm/ir/test-cfg-node.cpp @@ -26,7 +26,7 @@ test_divert_inedges() bb0->add_outedge(bb0); bb0->add_outedge(cfg.exit()); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; /* verify inedge diversion */ diff --git a/tests/jlm/llvm/ir/test-cfg-prune.cpp b/tests/jlm/llvm/ir/test-cfg-prune.cpp index f4243174a..7f7b23101 100644 --- a/tests/jlm/llvm/ir/test-cfg-prune.cpp +++ b/tests/jlm/llvm/ir/test-cfg-prune.cpp @@ -38,12 +38,12 @@ test() bb1->add_outedge(cfg.exit()); cfg.exit()->append_result(bb1->last()->result(0)); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; /* verify pruning */ prune(cfg); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; assert(cfg.nnodes() == 1); diff --git a/tests/jlm/llvm/ir/test-cfg-purge.cpp b/tests/jlm/llvm/ir/test-cfg-purge.cpp index a2351e0f6..ca13ece01 100644 --- a/tests/jlm/llvm/ir/test-cfg-purge.cpp +++ b/tests/jlm/llvm/ir/test-cfg-purge.cpp @@ -30,7 +30,7 @@ test() bb0->add_outedge(cfg.exit()); bb1->add_outedge(bb1); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; purge(cfg); diff --git a/tests/jlm/llvm/ir/test-cfg-structure.cpp b/tests/jlm/llvm/ir/test-cfg-structure.cpp index 3e8aab4eb..c2be94a5e 100644 --- a/tests/jlm/llvm/ir/test-cfg-structure.cpp +++ b/tests/jlm/llvm/ir/test-cfg-structure.cpp @@ -67,7 +67,7 @@ test_is_structured() bb->add_outedge(join); join->add_outedge(cfg.exit()); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; assert(is_structured(cfg)); } diff --git a/tests/jlm/llvm/ir/test-cfg-validity.cpp b/tests/jlm/llvm/ir/test-cfg-validity.cpp index 292d40062..235986b28 100644 --- a/tests/jlm/llvm/ir/test-cfg-validity.cpp +++ b/tests/jlm/llvm/ir/test-cfg-validity.cpp @@ -31,7 +31,7 @@ test_single_operand_phi() bb0->add_outedge(cfg.exit()); cfg.exit()->append_result(bb0->last()->result(0)); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; assert(is_valid(cfg)); } diff --git a/tests/jlm/llvm/ir/test-cfg.cpp b/tests/jlm/llvm/ir/test-cfg.cpp index 44ed9905f..9316ea127 100644 --- a/tests/jlm/llvm/ir/test-cfg.cpp +++ b/tests/jlm/llvm/ir/test-cfg.cpp @@ -24,7 +24,7 @@ test_remove_node() bb0->add_outedge(bb0); bb0->add_outedge(cfg.exit()); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; /* verify inedge diversion */ diff --git a/tests/jlm/llvm/ir/test-ssa-destruction.cpp b/tests/jlm/llvm/ir/test-ssa-destruction.cpp index 1a89bf8cf..9345e2128 100644 --- a/tests/jlm/llvm/ir/test-ssa-destruction.cpp +++ b/tests/jlm/llvm/ir/test-ssa-destruction.cpp @@ -49,11 +49,11 @@ test_two_phis() bb4->append_last(phi_op::create({ { v1, bb2 }, { v2, bb3 } }, vt)); bb4->append_last(phi_op::create({ { v3, bb2 }, { v4, bb3 } }, vt)); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; destruct_ssa(cfg); - print_ascii(cfg, stdout); + std::cout << cfg::ToAscii(cfg) << std::flush; } static int From 66af5765a4c1cc87a827e590f3241035fbaab937 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 29 Sep 2024 12:00:41 +0200 Subject: [PATCH 089/170] Remove obsolete @param in documentation of GraphWriter.hpp (#637) --- jlm/util/GraphWriter.hpp | 1 - 1 file changed, 1 deletion(-) diff --git a/jlm/util/GraphWriter.hpp b/jlm/util/GraphWriter.hpp index c872681b4..ab03914b3 100644 --- a/jlm/util/GraphWriter.hpp +++ b/jlm/util/GraphWriter.hpp @@ -42,7 +42,6 @@ class GraphElement /** * Constructs a graph element with no label, attributes or associated program object - * @param label */ GraphElement(); From 97249bd9b08d09d80862e4931d36b05edac81a75 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 29 Sep 2024 20:22:09 +0200 Subject: [PATCH 090/170] Add documentation to region class (#638) --- jlm/rvsdg/region.hpp | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 1230126e4..84b556e7d 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -146,6 +146,20 @@ class RegionResult : public input structural_output * output_; }; +/** + * \brief Represent acyclic RVSDG subgraphs + * + * Regions represent acyclic RVSDG subgraphs and are instantiated with an index in \ref + * structural_node%s. Each region has \ref RegionArgument%s and \ref RegionResult%s that represent + * the values at the beginning and end of the acyclic graph, respectively. In addition, each region + * keeps track of the following properties: + * + * 1. The nodes of the acyclic subgraph. They represent the computations performed in the region. + * 2. The top nodes of the acyclic subgraph. These are all nodes of the region that have no inputs, + * i.e., constants. + * 3. The bottom nodes of the acyclic subgraph. These are all nodes of the region that have no + * users, i.e. that are dead. See \ref output::IsDead() for more information. + */ class Region { typedef jlm::util::intrusive_list From 9156ef29895cfc5e5101f53959485a3f70455343 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Sun, 29 Sep 2024 21:21:26 +0200 Subject: [PATCH 091/170] rvsdg2dot use argument and result index as label (#633) In order to make it easier to read the graphs, arguments and results only get their index. The number in parenthesis is the index of the corresponding input/output of the structural node if any. ![image](https://github.com/user-attachments/assets/54b2c76a-9a79-49e0-9862-55fe9632764a) Let me know if you prefer a different label. Fixes #626 --- jlm/llvm/backend/dot/DotWriter.cpp | 14 ++++++++++++++ jlm/util/GraphWriter.cpp | 10 +++++----- scripts/build-circt.sh | 2 +- tests/jlm/llvm/backend/dot/DotWriterTests.cpp | 6 +++++- tests/jlm/util/TestGraphWriter.cpp | 6 +++--- 5 files changed, 28 insertions(+), 10 deletions(-) diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp index d3ea2a7fc..a20af9ec8 100644 --- a/jlm/llvm/backend/dot/DotWriter.cpp +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -144,9 +144,16 @@ CreateGraphNodes(util::Graph & graph, rvsdg::Region & region, util::Graph * type auto & argument = *region.argument(n); AttachNodeOutput(node, argument, typeGraph); + // Give the argument a label using its local index, not the global argument index + node.SetLabel(util::strfmt("a", n)); + // If this argument corresponds to one of the structural node's inputs, reference it if (argument.input()) + { node.SetAttributeObject("input", *argument.input()); + // Include the local index of the node's input in the label + node.AppendToLabel(util::strfmt("<- i", argument.input()->index()), " "); + } } // Create a node for each node in the region in topological order. @@ -182,9 +189,16 @@ CreateGraphNodes(util::Graph & graph, rvsdg::Region & region, util::Graph * type auto & result = *region.result(n); AttachNodeInput(node, result); + // Use the result's local index as the label + node.SetLabel(util::strfmt("r", n)); + // If this result corresponds to one of the structural node's outputs, reference it if (result.output()) + { node.SetAttributeObject("output", *result.output()); + // Include the local index of the node's output in the label + node.AppendToLabel(util::strfmt("-> o", result.output()->index()), " "); + } } } diff --git a/jlm/util/GraphWriter.cpp b/jlm/util/GraphWriter.cpp index a00209994..3fd3dcf6f 100644 --- a/jlm/util/GraphWriter.cpp +++ b/jlm/util/GraphWriter.cpp @@ -597,7 +597,7 @@ InputPort::InputPort(jlm::util::InOutNode & node) const char * InputPort::GetIdPrefix() const { - return "i"; + return "in"; } Node & @@ -634,7 +634,7 @@ OutputPort::OutputPort(jlm::util::InOutNode & node) const char * OutputPort::GetIdPrefix() const { - return "o"; + return "out"; } Node & @@ -889,7 +889,7 @@ InOutNode::OutputDot(std::ostream & out, size_t indent) const for (auto & graph : SubGraphs_) { out << "\t\t\t\t\tGetFullId() << "\">"; + out << "_SUBGRAPH=\"" << graph->GetFullId() << "\">"; PrintStringAsHtmlText(out, graph->GetFullId(), true); out << "" << std::endl; } @@ -917,7 +917,7 @@ ArgumentNode::ArgumentNode(jlm::util::Graph & graph) const char * ArgumentNode::GetIdPrefix() const { - return "a"; + return "arg"; } bool @@ -958,7 +958,7 @@ ResultNode::ResultNode(jlm::util::Graph & graph) const char * ResultNode::GetIdPrefix() const { - return "r"; + return "res"; } bool diff --git a/scripts/build-circt.sh b/scripts/build-circt.sh index e6b8d5150..8fc1ad0f7 100755 --- a/scripts/build-circt.sh +++ b/scripts/build-circt.sh @@ -8,7 +8,7 @@ SCRIPT_DIR="$(dirname "$(realpath "$0")")" JLM_ROOT_DIR="$(realpath "${SCRIPT_DIR}/..")" CIRCT_BUILD=${JLM_ROOT_DIR}/build-circt CIRCT_INSTALL=${JLM_ROOT_DIR}/usr -LLVM_LIT_PATH=/usr/local/bin/lit +LLVM_LIT_PATH=`which lit` LLVM_VERSION=18 LLVM_CONFIG_BIN=llvm-config-${LLVM_VERSION} diff --git a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp index 2ec459639..8a664a6da 100644 --- a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp +++ b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp @@ -57,11 +57,15 @@ TestWriteGraphs() auto & argument = gammaNode.GetSubgraph(0).GetArgumentNode(0); auto & input = gammaNode.GetInputPort(1); assert(argument.GetAttributeGraphElement("input") == &input); + // The label also includes the attribute index and input index + assert(argument.GetLabel() == "a0 <- i1"); + auto & result = argument.GetConnections().front()->GetOtherEnd(argument); + assert(result.GetLabel() == "r0 -> o0"); // Check that the last argument is colored red to represent the memory state type auto & stateConnections = fctBody.GetArgumentNode(5).GetConnections(); assert(stateConnections.size() == 1); - assert(stateConnections[0]->GetAttributeString("color") == "#FF0000"); + assert(stateConnections.front()->GetAttributeString("color") == "#FF0000"); return 0; } diff --git a/tests/jlm/util/TestGraphWriter.cpp b/tests/jlm/util/TestGraphWriter.cpp index 61ab303f6..84420bf49 100644 --- a/tests/jlm/util/TestGraphWriter.cpp +++ b/tests/jlm/util/TestGraphWriter.cpp @@ -184,11 +184,11 @@ TestInOutNode() std::ostringstream out; node.Output(out, GraphOutputFormat::ASCII, 0); auto string = out.str(); - assert(StringContains(string, "o0, o1, o2 := \"My\\nInOutNode\" o2, []")); + assert(StringContains(string, "out0, out1, out2 := \"My\\nInOutNode\" out2, []")); // Check that the subgraph is also printed - assert(StringContains(string, "ARG a0:CTX <= o2")); - assert(StringContains(string, "RES a0:RETURN => o0")); + assert(StringContains(string, "ARG arg0:CTX <= out2")); + assert(StringContains(string, "RES arg0:RETURN => out0")); // Check that HTML labels with newlines turn into
std::ostringstream out2; From 403c348aa5e4aad46c8294a90350d945da320ff8 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 29 Sep 2024 22:51:01 +0200 Subject: [PATCH 092/170] Add iterator ranges to Region class (#630) Co-authored-by: HKrogstie --- jlm/rvsdg/region.hpp | 117 ++++++++++++++++++++++++++++++++ tests/jlm/rvsdg/RegionTests.cpp | 68 +++++++++++++++++++ 2 files changed, 185 insertions(+) diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 84b556e7d..851c293cc 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -12,6 +12,7 @@ #include #include +#include namespace jlm::util { @@ -173,6 +174,31 @@ class Region intrusive_list region_bottom_node_list; + using RegionArgumentIterator = std::vector::iterator; + using RegionArgumentConstIterator = std::vector::const_iterator; + using RegionArgumentRange = util::iterator_range; + using RegionArgumentConstRange = util::iterator_range; + + using RegionResultIterator = std::vector::iterator; + using RegionResultConstIterator = std::vector::const_iterator; + using RegionResultRange = util::iterator_range; + using RegionResultConstRange = util::iterator_range; + + using TopNodeIterator = region_top_node_list::iterator; + using TopNodeConstIterator = region_top_node_list::const_iterator; + using TopNodeRange = util::iterator_range; + using TopNodeConstRange = util::iterator_range; + + using NodeIterator = region_nodes_list::iterator; + using NodeConstIterator = region_nodes_list::const_iterator; + using NodeRange = util::iterator_range; + using NodeConstRange = util::iterator_range; + + using BottomNodeIterator = region_bottom_node_list::iterator; + using BottomNodeConstIterator = region_bottom_node_list::const_iterator; + using BottomNodeRange = util::iterator_range; + using BottomNodeConstRange = util::iterator_range; + public: ~Region() noexcept; @@ -180,6 +206,97 @@ class Region Region(rvsdg::structural_node * node, size_t index); + /** + * @return Returns an iterator range for iterating through the arguments of the region. + */ + [[nodiscard]] RegionArgumentRange + Arguments() noexcept + { + return { arguments_.begin(), arguments_.end() }; + } + + /** + * @return Returns an iterator range for iterating through the arguments of the region. + */ + [[nodiscard]] RegionArgumentConstRange + Arguments() const noexcept + { + return { arguments_.begin(), arguments_.end() }; + } + + /** + * @return Returns an iterator range for iterating through the results of the region. + */ + [[nodiscard]] RegionResultRange + Results() noexcept + { + return { results_.begin(), results_.end() }; + } + + /** + * @return Returns an iterator range for iterating through the results of the region. + */ + [[nodiscard]] RegionResultConstRange + Results() const noexcept + { + return { results_.begin(), results_.end() }; + } + + /** + * @return Returns an iterator range for iterating through the top nodes of the region. + */ + [[nodiscard]] TopNodeRange + TopNodes() noexcept + { + return { top_nodes.begin(), top_nodes.end() }; + } + + /** + * @return Returns an iterator range for iterating through the top nodes of the region. + */ + [[nodiscard]] TopNodeConstRange + TopNodes() const noexcept + { + return { top_nodes.begin(), top_nodes.end() }; + } + + /** + * @return Returns an iterator range for iterating through the nodes of the region. + */ + [[nodiscard]] NodeRange + Nodes() noexcept + { + return { nodes.begin(), nodes.end() }; + } + + /** + * @return Returns an iterator range for iterating through the nodes of the region. + */ + [[nodiscard]] NodeConstRange + Nodes() const noexcept + { + return { nodes.begin(), nodes.end() }; + } + + /** + * @return Returns an iterator range for iterating through the bottom nodes of the region. + */ + [[nodiscard]] BottomNodeRange + BottomNodes() noexcept + { + return { bottom_nodes.begin(), bottom_nodes.end() }; + } + + /** + * @return Returns an iterator range for iterating through the bottom nodes of the + * region. + */ + [[nodiscard]] BottomNodeConstRange + BottomNodes() const noexcept + { + return { bottom_nodes.begin(), bottom_nodes.end() }; + } + inline region_nodes_list::iterator begin() { diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index f3880fde7..1b1b4330d 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -12,6 +12,74 @@ #include #include +static int +IteratorRanges() +{ + using namespace jlm::tests; + + // Arrange + auto valueType = valuetype::Create(); + + jlm::rvsdg::graph graph; + + auto structuralNode = structural_node::create(graph.root(), 1); + auto & subregion = *structuralNode->subregion(0); + auto & constSubregion = *static_cast(structuralNode->subregion(0)); + + auto & argument0 = TestGraphArgument::Create(subregion, nullptr, valueType); + auto & argument1 = TestGraphArgument::Create(subregion, nullptr, valueType); + + auto topNode0 = test_op::create(&subregion, {}, { valueType }); + auto node0 = test_op::create(&subregion, { &argument0 }, { valueType }); + auto node1 = test_op::create(&subregion, { &argument1 }, { valueType }); + auto bottomNode0 = test_op::create(&subregion, { &argument0, &argument1 }, { valueType }); + + auto & result0 = TestGraphResult::Create(*topNode0->output(0), nullptr); + auto & result1 = TestGraphResult::Create(*node0->output(0), nullptr); + auto & result2 = TestGraphResult::Create(*node1->output(0), nullptr); + + // Act & Assert + auto numArguments = std::distance(subregion.Arguments().begin(), subregion.Arguments().end()); + assert(numArguments == 2); + for (auto & argument : constSubregion.Arguments()) + { + assert(argument == &argument0 || argument == &argument1); + } + + auto numTopNodes = std::distance(subregion.TopNodes().begin(), subregion.TopNodes().end()); + assert(numTopNodes == 1); + for (auto & topNode : constSubregion.TopNodes()) + { + assert(&topNode == topNode0); + } + + auto numNodes = std::distance(subregion.Nodes().begin(), subregion.Nodes().end()); + assert(numNodes == 4); + for (auto & node : constSubregion.Nodes()) + { + assert(&node == topNode0 || &node == node0 || &node == node1 || &node == bottomNode0); + } + + auto numBottomNodes = + std::distance(subregion.BottomNodes().begin(), subregion.BottomNodes().end()); + assert(numBottomNodes == 1); + for (auto & bottomNode : constSubregion.BottomNodes()) + { + assert(&bottomNode == bottomNode0); + } + + auto numResults = std::distance(subregion.Results().begin(), subregion.Results().end()); + assert(numResults == 3); + for (auto & result : constSubregion.Results()) + { + assert(result == &result0 || result == &result1 || result == &result2); + } + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-IteratorRanges", IteratorRanges) + /** * Test Region::Contains(). */ From 2b85b4c1f98b168cbeb55cbfe01418b7a5bfd6c0 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 30 Sep 2024 10:33:54 +0200 Subject: [PATCH 093/170] Rename type class to Type (#640) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 8 +- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 4 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.hpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.hpp | 2 +- .../rhls2firrtl/verilator-harness-hls.cpp | 4 +- .../rhls2firrtl/verilator-harness-hls.hpp | 4 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/add-triggers.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 +- .../rvsdg2rhls/remove-unused-state.cpp | 4 +- jlm/hls/ir/hls.cpp | 6 +- jlm/hls/ir/hls.hpp | 150 +++++++-------- jlm/llvm/backend/dot/DotWriter.cpp | 2 +- jlm/llvm/backend/jlm2llvm/type.cpp | 6 +- jlm/llvm/backend/jlm2llvm/type.hpp | 18 +- .../frontend/LlvmInstructionConversion.cpp | 6 +- jlm/llvm/frontend/LlvmTypeConversion.cpp | 4 +- jlm/llvm/ir/cfg.hpp | 14 +- jlm/llvm/ir/ipgraph-module.hpp | 4 +- jlm/llvm/ir/ipgraph.cpp | 6 +- jlm/llvm/ir/ipgraph.hpp | 10 +- jlm/llvm/ir/operators/GetElementPtr.hpp | 10 +- jlm/llvm/ir/operators/Load.hpp | 28 +-- jlm/llvm/ir/operators/MemCpy.hpp | 26 +-- jlm/llvm/ir/operators/Phi.cpp | 2 +- jlm/llvm/ir/operators/Phi.hpp | 24 +-- jlm/llvm/ir/operators/Store.hpp | 30 +-- jlm/llvm/ir/operators/call.hpp | 8 +- jlm/llvm/ir/operators/delta.hpp | 4 +- jlm/llvm/ir/operators/lambda.hpp | 8 +- jlm/llvm/ir/operators/operators.hpp | 174 +++++++++--------- jlm/llvm/ir/operators/sext.hpp | 6 +- jlm/llvm/ir/tac.hpp | 4 +- jlm/llvm/ir/types.cpp | 34 ++-- jlm/llvm/ir/types.hpp | 60 +++--- jlm/llvm/ir/variable.hpp | 10 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 2 +- .../TopDownMemoryNodeEliminator.cpp | 2 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 2 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 2 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 6 +- jlm/mlir/frontend/MlirToJlmConverter.hpp | 2 +- jlm/rvsdg/binary.hpp | 4 +- jlm/rvsdg/bitstring/slice.hpp | 2 +- jlm/rvsdg/bitstring/type.cpp | 2 +- jlm/rvsdg/bitstring/type.hpp | 2 +- jlm/rvsdg/control.cpp | 2 +- jlm/rvsdg/control.hpp | 6 +- jlm/rvsdg/gamma.hpp | 6 +- jlm/rvsdg/graph.cpp | 2 +- jlm/rvsdg/graph.hpp | 2 +- jlm/rvsdg/node.cpp | 8 +- jlm/rvsdg/node.hpp | 20 +- jlm/rvsdg/nullary.hpp | 2 +- jlm/rvsdg/operation.cpp | 4 +- jlm/rvsdg/operation.hpp | 12 +- jlm/rvsdg/region.cpp | 4 +- jlm/rvsdg/region.hpp | 4 +- jlm/rvsdg/simple-node.cpp | 4 +- jlm/rvsdg/simple-node.hpp | 4 +- jlm/rvsdg/statemux.hpp | 6 +- jlm/rvsdg/structural-node.cpp | 4 +- jlm/rvsdg/structural-node.hpp | 8 +- jlm/rvsdg/theta.hpp | 4 +- jlm/rvsdg/type.cpp | 2 +- jlm/rvsdg/type.hpp | 30 +-- jlm/rvsdg/unary.hpp | 4 +- .../llvm/ThreeAddressCodeConversionTests.cpp | 4 +- tests/test-operation.cpp | 2 +- tests/test-operation.hpp | 68 +++---- tests/test-types.cpp | 4 +- tests/test-types.hpp | 4 +- 75 files changed, 474 insertions(+), 474 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index ad37a6d26..2e719b4ec 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -320,7 +320,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) mlir::Value result = AddCvtOp(body, input0); // TODO: support structs - const jlm::rvsdg::type * pointeeType = &op->GetPointeeType(); + const jlm::rvsdg::Type * pointeeType = &op->GetPointeeType(); for (size_t i = 1; i < node->ninputs(); i++) { int bits = JlmSize(pointeeType); @@ -3888,13 +3888,13 @@ RhlsToFirrtlConverter::GetIntType(int size) // which is useful for, e.g., additions where the result has to be 1 // larger than the operands to accommodate for the carry. circt::firrtl::IntType -RhlsToFirrtlConverter::GetIntType(const jlm::rvsdg::type * type, int extend) +RhlsToFirrtlConverter::GetIntType(const jlm::rvsdg::Type * type, int extend) { return circt::firrtl::IntType::get(Builder_->getContext(), false, JlmSize(type) + extend); } circt::firrtl::FIRRTLBaseType -RhlsToFirrtlConverter::GetFirrtlType(const jlm::rvsdg::type * type) +RhlsToFirrtlConverter::GetFirrtlType(const jlm::rvsdg::Type * type) { if (auto bt = dynamic_cast(type)) { @@ -3933,7 +3933,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) } if (auto op = dynamic_cast(&node->operation())) { - const jlm::rvsdg::type * pointeeType = &op->GetPointeeType(); + const jlm::rvsdg::Type * pointeeType = &op->GetPointeeType(); for (size_t i = 1; i < node->ninputs(); i++) { int bits = JlmSize(pointeeType); diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 63bd27aa5..237739bff 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -266,9 +266,9 @@ class RhlsToFirrtlConverter : public BaseHLS circt::firrtl::IntType GetIntType(int size); circt::firrtl::IntType - GetIntType(const jlm::rvsdg::type * type, int extend = 0); + GetIntType(const jlm::rvsdg::Type * type, int extend = 0); circt::firrtl::FIRRTLBaseType - GetFirrtlType(const jlm::rvsdg::type * type); + GetFirrtlType(const jlm::rvsdg::Type * type); std::string GetModuleName(const jlm::rvsdg::node * node); bool diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 7064e2336..a4c25860a 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -103,7 +103,7 @@ BaseHLS::get_port_name(jlm::rvsdg::output * port) } int -BaseHLS::JlmSize(const jlm::rvsdg::type * type) +BaseHLS::JlmSize(const jlm::rvsdg::Type * type) { if (auto bt = dynamic_cast(type)) { diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index 8c8769492..471bc2d7d 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -32,7 +32,7 @@ class BaseHLS } static int - JlmSize(const jlm::rvsdg::type * type); + JlmSize(const jlm::rvsdg::Type * type); /** * @return The size of a pointer in bits. diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index bb426c605..2f3c5dc20 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -167,7 +167,7 @@ DotHLS::node_to_dot(const jlm::rvsdg::node * node) } std::string -DotHLS::edge(std::string src, std::string snk, const jlm::rvsdg::type & type, bool back) +DotHLS::edge(std::string src, std::string snk, const jlm::rvsdg::Type & type, bool back) { auto color = "black"; JLM_ASSERT(src != "" && snk != ""); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp index 24130563c..0934fa548 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp @@ -31,7 +31,7 @@ class DotHLS : public BaseHLS node_to_dot(const jlm::rvsdg::node * node); std::string - edge(std::string src, std::string snk, const jlm::rvsdg::type & type, bool back = false); + edge(std::string src, std::string snk, const jlm::rvsdg::Type & type, bool back = false); std::string loop_to_dot(hls::loop_node * ln); diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index 45bf5a6f8..510531909 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -710,7 +710,7 @@ VerilatorHarnessHLS::get_function_header( } std::string -VerilatorHarnessHLS::convert_to_c_type(const jlm::rvsdg::type * type) +VerilatorHarnessHLS::convert_to_c_type(const jlm::rvsdg::Type * type) { if (auto t = dynamic_cast(type)) { @@ -731,7 +731,7 @@ VerilatorHarnessHLS::convert_to_c_type(const jlm::rvsdg::type * type) } std::string -VerilatorHarnessHLS::convert_to_c_type_postfix(const jlm::rvsdg::type * type) +VerilatorHarnessHLS::convert_to_c_type_postfix(const jlm::rvsdg::Type * type) { if (auto t = dynamic_cast(type)) { diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp index b93041ed9..cfe24aae7 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp @@ -48,10 +48,10 @@ class VerilatorHarnessHLS : public BaseHLS } std::string - convert_to_c_type(const jlm::rvsdg::type * type); + convert_to_c_type(const jlm::rvsdg::Type * type); std::string - convert_to_c_type_postfix(const jlm::rvsdg::type * type); + convert_to_c_type_postfix(const jlm::rvsdg::Type * type); void get_function_header( diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 6370e7373..0505b22fc 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -36,7 +36,7 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) { auto & oldFunctionType = lambdaNode.type(); - std::vector> newArgumentTypes; + std::vector> newArgumentTypes; for (size_t i = 0; i < oldFunctionType.NumArguments(); ++i) { auto argument = lambdaNode.subregion()->argument(i); @@ -49,7 +49,7 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) } } - std::vector> newResultTypes; + std::vector> newResultTypes; for (size_t i = 0; i < oldFunctionType.NumResults(); ++i) { auto result = lambdaNode.subregion()->result(i); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 6492c7bc7..a964486ad 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -28,16 +28,16 @@ get_trigger(rvsdg::Region * region) } jlm::llvm::lambda::node * -add_lambda_argument(llvm::lambda::node * ln, std::shared_ptr type) +add_lambda_argument(llvm::lambda::node * ln, std::shared_ptr type) { auto old_fcttype = ln->type(); - std::vector> new_argument_types; + std::vector> new_argument_types; for (size_t i = 0; i < old_fcttype.NumArguments(); ++i) { new_argument_types.push_back(old_fcttype.Arguments()[i]); } new_argument_types.push_back(std::move(type)); - std::vector> new_result_types; + std::vector> new_result_types; for (size_t i = 0; i < old_fcttype.NumResults(); ++i) { new_result_types.push_back(old_fcttype.Results()[i]); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.hpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.hpp index c48c2248c..2027398a3 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.hpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.hpp @@ -17,7 +17,7 @@ rvsdg::output * get_trigger(rvsdg::Region * region); llvm::lambda::node * -add_lambda_argument(llvm::lambda::node * ln, const rvsdg::type * type); +add_lambda_argument(llvm::lambda::node * ln, const rvsdg::Type * type); void add_triggers(rvsdg::Region * region); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 03b328d04..a912d8edf 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -566,12 +566,12 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) // This modifies the function signature so we create a new lambda node to replace the old one // auto oldFunctionType = lambda->type(); - std::vector> newArgumentTypes; + std::vector> newArgumentTypes; for (size_t i = 0; i < oldFunctionType.NumArguments(); ++i) { newArgumentTypes.push_back(oldFunctionType.Arguments()[i]); } - std::vector> newResultTypes; + std::vector> newResultTypes; for (size_t i = 0; i < oldFunctionType.NumResults(); ++i) { newResultTypes.push_back(oldFunctionType.Results()[i]); diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index e0f8122c0..aaba59a54 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -174,7 +174,7 @@ jlm::llvm::lambda::node * remove_lambda_passthrough(llvm::lambda::node * ln) { auto old_fcttype = ln->type(); - std::vector> new_argument_types; + std::vector> new_argument_types; for (size_t i = 0; i < old_fcttype.NumArguments(); ++i) { auto arg = ln->subregion()->argument(i); @@ -185,7 +185,7 @@ remove_lambda_passthrough(llvm::lambda::node * ln) new_argument_types.push_back(argtype); } } - std::vector> new_result_types; + std::vector> new_result_types; for (size_t i = 0; i < old_fcttype.NumResults(); ++i) { auto res = ln->subregion()->result(i); diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 626554663..2be0b8c4d 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -155,7 +155,7 @@ loop_node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const } backedge_argument * -loop_node::add_backedge(std::shared_ptr type) +loop_node::add_backedge(std::shared_ptr type) { auto argument_loop = backedge_argument::create(subregion(), std::move(type)); auto result_loop = backedge_result::create(argument_loop); @@ -194,7 +194,7 @@ loop_node::set_predicate(jlm::rvsdg::output * p) std::shared_ptr get_mem_req_type(std::shared_ptr elementType, bool write) { - std::vector>> elements; + std::vector>> elements; elements.emplace_back("addr", llvm::PointerType::Create()); elements.emplace_back("size", jlm::rvsdg::bittype::Create(4)); elements.emplace_back("id", jlm::rvsdg::bittype::Create(8)); @@ -209,7 +209,7 @@ get_mem_req_type(std::shared_ptr elementType, bool write std::shared_ptr get_mem_res_type(std::shared_ptr dataType) { - std::vector>> elements; + std::vector>> elements; elements.emplace_back("data", std::move(dataType)); elements.emplace_back("id", jlm::rvsdg::bittype::Create(8)); return std::make_shared(std::move(elements)); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index e169a66df..edf71b3c7 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -23,7 +23,7 @@ namespace jlm::hls class branch_op final : public jlm::rvsdg::simple_op { private: - branch_op(size_t nalternatives, const std::shared_ptr & type, bool loop) + branch_op(size_t nalternatives, const std::shared_ptr & type, bool loop) : jlm::rvsdg::simple_op( { jlm::rvsdg::ctltype::Create(nalternatives), type }, { nalternatives, type }), @@ -94,7 +94,7 @@ class fork_op final : public jlm::rvsdg::simple_op * /param nalternatives Number of outputs. * /param value The signal type, which is the same for the input and all outputs. */ - fork_op(size_t nalternatives, const std::shared_ptr & type) + fork_op(size_t nalternatives, const std::shared_ptr & type) : jlm::rvsdg::simple_op({ type }, { nalternatives, type }) {} @@ -107,7 +107,7 @@ class fork_op final : public jlm::rvsdg::simple_op */ fork_op( size_t nalternatives, - const std::shared_ptr & type, + const std::shared_ptr & type, bool isConstant) : rvsdg::simple_op({ type }, { nalternatives, type }), IsConstant_(isConstant) @@ -178,7 +178,7 @@ class merge_op final : public jlm::rvsdg::simple_op virtual ~merge_op() {} - merge_op(size_t nalternatives, const std::shared_ptr & type) + merge_op(size_t nalternatives, const std::shared_ptr & type) : jlm::rvsdg::simple_op({ nalternatives, type }, { type }) {} @@ -221,7 +221,7 @@ class mux_op final : public jlm::rvsdg::simple_op mux_op( size_t nalternatives, - const std::shared_ptr & type, + const std::shared_ptr & type, bool discarding, bool loop) : jlm::rvsdg::simple_op(create_typevector(nalternatives, type), { type }), @@ -276,11 +276,11 @@ class mux_op final : public jlm::rvsdg::simple_op bool discarding; bool loop; // used only for dot output private: - static std::vector> - create_typevector(size_t nalternatives, std::shared_ptr type) + static std::vector> + create_typevector(size_t nalternatives, std::shared_ptr type) { auto vec = - std::vector>(nalternatives + 1, std::move(type)); + std::vector>(nalternatives + 1, std::move(type)); vec[0] = jlm::rvsdg::ctltype::Create(nalternatives); return vec; } @@ -292,7 +292,7 @@ class sink_op final : public jlm::rvsdg::simple_op virtual ~sink_op() {} - explicit sink_op(const std::shared_ptr & type) + explicit sink_op(const std::shared_ptr & type) : jlm::rvsdg::simple_op({ type }, {}) {} @@ -373,7 +373,7 @@ class loop_constant_buffer_op final : public jlm::rvsdg::simple_op loop_constant_buffer_op( const std::shared_ptr & ctltype, - const std::shared_ptr & type) + const std::shared_ptr & type) : jlm::rvsdg::simple_op({ ctltype, type }, { type }) {} @@ -415,7 +415,7 @@ class buffer_op final : public jlm::rvsdg::simple_op {} buffer_op( - const std::shared_ptr & type, + const std::shared_ptr & type, size_t capacity, bool pass_through) : jlm::rvsdg::simple_op({ type }, { type }), @@ -474,7 +474,7 @@ class triggertype final : public jlm::rvsdg::statetype }; bool - operator==(const jlm::rvsdg::type & other) const noexcept override + operator==(const jlm::rvsdg::Type & other) const noexcept override { auto type = dynamic_cast(&other); return type; @@ -493,7 +493,7 @@ class trigger_op final : public jlm::rvsdg::simple_op virtual ~trigger_op() {} - explicit trigger_op(const std::shared_ptr & type) + explicit trigger_op(const std::shared_ptr & type) : jlm::rvsdg::simple_op({ triggertype::Create(), type }, { type }) {} @@ -538,7 +538,7 @@ class print_op final : public jlm::rvsdg::simple_op virtual ~print_op() {} - explicit print_op(const std::shared_ptr & type) + explicit print_op(const std::shared_ptr & type) : jlm::rvsdg::simple_op({ type }, { type }) { static size_t common_id{ 0 }; @@ -621,7 +621,7 @@ class EntryArgument : public rvsdg::RegionArgument EntryArgument( rvsdg::Region & region, rvsdg::structural_input & input, - const std::shared_ptr type) + const std::shared_ptr type) : rvsdg::RegionArgument(®ion, &input, std::move(type)) {} @@ -635,7 +635,7 @@ class EntryArgument : public rvsdg::RegionArgument Create( rvsdg::Region & region, rvsdg::structural_input & input, - const std::shared_ptr type) + const std::shared_ptr type) { auto argument = new EntryArgument(region, input, std::move(type)); region.append_argument(argument); @@ -661,13 +661,13 @@ class backedge_argument : public rvsdg::RegionArgument Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; private: - backedge_argument(rvsdg::Region * region, const std::shared_ptr & type) + backedge_argument(rvsdg::Region * region, const std::shared_ptr & type) : rvsdg::RegionArgument(region, nullptr, type), result_(nullptr) {} static backedge_argument * - create(rvsdg::Region * region, std::shared_ptr type) + create(rvsdg::Region * region, std::shared_ptr type) { auto argument = new backedge_argument(region, std::move(type)); region->append_argument(argument); @@ -784,7 +784,7 @@ class loop_node final : public jlm::rvsdg::structural_node set_predicate(jlm::rvsdg::output * p); backedge_argument * - add_backedge(std::shared_ptr type); + add_backedge(std::shared_ptr type); jlm::rvsdg::structural_output * add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer = nullptr); @@ -803,7 +803,7 @@ class bundletype final : public jlm::rvsdg::valuetype {} bundletype( - const std::vector>> elements) + const std::vector>> elements) : jlm::rvsdg::valuetype(), elements_(std::move(elements)) {} @@ -819,7 +819,7 @@ class bundletype final : public jlm::rvsdg::valuetype operator=(bundletype &&) = delete; virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override + operator==(const jlm::rvsdg::Type & other) const noexcept override { auto type = dynamic_cast(&other); // TODO: better comparison? @@ -841,7 +841,7 @@ class bundletype final : public jlm::rvsdg::valuetype [[nodiscard]] std::size_t ComputeHash() const noexcept override; - std::shared_ptr + std::shared_ptr get_element_type(std::string element) const { for (size_t i = 0; i < elements_.size(); ++i) @@ -863,7 +863,7 @@ class bundletype final : public jlm::rvsdg::valuetype // private: // TODO: fix memory leak - const std::vector>> elements_; + const std::vector>> elements_; }; std::shared_ptr @@ -891,13 +891,13 @@ class load_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(std::shared_ptr pointeeType, size_t numStates) { - std::vector> types( + std::vector> types( 1, llvm::PointerType::Create()); // addr - std::vector> states( + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -905,11 +905,11 @@ class load_op final : public jlm::rvsdg::simple_op return types; } - static std::vector> + static std::vector> CreateOutTypes(std::shared_ptr pointeeType, size_t numStates) { - std::vector> types(1, std::move(pointeeType)); - std::vector> states( + std::vector> types(1, std::move(pointeeType)); + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -983,16 +983,16 @@ class addr_queue_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(std::shared_ptr pointerType) { // check, enq - std::vector> types(2, std::move(pointerType)); + std::vector> types(2, std::move(pointerType)); types.emplace_back(llvm::MemoryStateType::Create()); // deq return types; } - static std::vector> + static std::vector> CreateOutTypes(std::shared_ptr pointerType) { return { std::move(pointerType) }; @@ -1038,7 +1038,7 @@ class state_gate_op final : public jlm::rvsdg::simple_op virtual ~state_gate_op() {} - state_gate_op(const std::shared_ptr & type, size_t numStates) + state_gate_op(const std::shared_ptr & type, size_t numStates) : simple_op(CreateInOutTypes(type, numStates), CreateInOutTypes(type, numStates)) {} @@ -1050,11 +1050,11 @@ class state_gate_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> - CreateInOutTypes(const std::shared_ptr & type, size_t numStates) + static std::vector> + CreateInOutTypes(const std::shared_ptr & type, size_t numStates) { - std::vector> types(1, type); - std::vector> states( + std::vector> types(1, type); + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -1103,18 +1103,18 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(std::shared_ptr pointeeType) { - std::vector> types(1, llvm::PointerType::Create()); + std::vector> types(1, llvm::PointerType::Create()); types.emplace_back(std::move(pointeeType)); // result return types; } - static std::vector> + static std::vector> CreateOutTypes(std::shared_ptr pointeeType) { - std::vector> types(1, std::move(pointeeType)); + std::vector> types(1, std::move(pointeeType)); types.emplace_back(llvm::PointerType::Create()); // addr return types; } @@ -1173,7 +1173,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(const std::vector> & output_types) { size_t max_width = 64; @@ -1183,15 +1183,15 @@ class mem_resp_op final : public jlm::rvsdg::simple_op // auto sz = jlm::hls::BaseHLS::JlmSize(tp); // max_width = sz>max_width?sz:max_width; // } - std::vector> types; + std::vector> types; types.emplace_back(get_mem_res_type(jlm::rvsdg::bittype::Create(max_width))); return types; } - static std::vector> + static std::vector> CreateOutTypes(const std::vector> & output_types) { - std::vector> types; + std::vector> types; types.reserve(output_types.size()); for (auto outputType : output_types) { @@ -1260,12 +1260,12 @@ class mem_req_op final : public jlm::rvsdg::simple_op && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes( const std::vector> & load_types, const std::vector> & store_types) { - std::vector> types; + std::vector> types; for (size_t i = 0; i < load_types.size(); i++) { types.emplace_back(llvm::PointerType::Create()); // addr @@ -1278,7 +1278,7 @@ class mem_req_op final : public jlm::rvsdg::simple_op return types; } - static std::vector> + static std::vector> CreateOutTypes( const std::vector> & load_types, const std::vector> & store_types) @@ -1294,7 +1294,7 @@ class mem_req_op final : public jlm::rvsdg::simple_op // auto sz = jlm::hls::BaseHLS::JlmSize(tp); // max_width = sz>max_width?sz:max_width; // } - std::vector> types; + std::vector> types; types.emplace_back( get_mem_req_type(jlm::rvsdg::bittype::Create(max_width), !store_types.empty())); return types; @@ -1340,21 +1340,21 @@ class mem_req_op final : public jlm::rvsdg::simple_op return LoadTypes_.size(); } - const std::vector> * + const std::vector> * GetLoadTypes() const { return &LoadTypes_; } - const std::vector> * + const std::vector> * GetStoreTypes() const { return &StoreTypes_; } private: - std::vector> LoadTypes_; - std::vector> StoreTypes_; + std::vector> LoadTypes_; + std::vector> StoreTypes_; }; class store_op final : public jlm::rvsdg::simple_op @@ -1376,22 +1376,22 @@ class store_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(const std::shared_ptr & pointeeType, size_t numStates) { - std::vector> types( + std::vector> types( { llvm::PointerType::Create(), pointeeType }); - std::vector> states( + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); return types; } - static std::vector> + static std::vector> CreateOutTypes(const std::shared_ptr & pointeeType, size_t numStates) { - std::vector> types( + std::vector> types( numStates, llvm::MemoryStateType::Create()); types.emplace_back(llvm::PointerType::Create()); // addr @@ -1457,10 +1457,10 @@ class local_mem_op final : public jlm::rvsdg::simple_op return false; } - static std::vector> + static std::vector> CreateOutTypes(std::shared_ptr at) { - std::vector> types(2, std::move(at)); + std::vector> types(2, std::move(at)); return types; } @@ -1503,10 +1503,10 @@ class local_mem_resp_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateOutTypes(const std::shared_ptr & at, size_t resp_count) { - std::vector> types(resp_count, at->GetElementType()); + std::vector> types(resp_count, at->GetElementType()); return types; } @@ -1551,11 +1551,11 @@ class local_load_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(const std::shared_ptr & valuetype, size_t numStates) { - std::vector> types(1, jlm::rvsdg::bittype::Create(64)); - std::vector> states( + std::vector> types(1, jlm::rvsdg::bittype::Create(64)); + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -1563,11 +1563,11 @@ class local_load_op final : public jlm::rvsdg::simple_op return types; } - static std::vector> + static std::vector> CreateOutTypes(const std::shared_ptr & valuetype, size_t numStates) { - std::vector> types(1, valuetype); - std::vector> states( + std::vector> types(1, valuetype); + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -1629,22 +1629,22 @@ class local_store_op final : public jlm::rvsdg::simple_op return ot && *ot->argument(1) == *argument(1) && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes(const std::shared_ptr & valuetype, size_t numStates) { - std::vector> types( + std::vector> types( { jlm::rvsdg::bittype::Create(64), valuetype }); - std::vector> states( + std::vector> states( numStates, llvm::MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); return types; } - static std::vector> + static std::vector> CreateOutTypes(const std::shared_ptr & valuetype, size_t numStates) { - std::vector> types( + std::vector> types( numStates, llvm::MemoryStateType::Create()); types.emplace_back(jlm::rvsdg::bittype::Create(64)); // addr @@ -1711,13 +1711,13 @@ class local_mem_req_op final : public jlm::rvsdg::simple_op && ot->narguments() == narguments(); } - static std::vector> + static std::vector> CreateInTypes( const std::shared_ptr & at, size_t load_cnt, size_t store_cnt) { - std::vector> types(1, at); + std::vector> types(1, at); for (size_t i = 0; i < load_cnt; ++i) { types.emplace_back(jlm::rvsdg::bittype::Create(64)); // addr diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp index a20af9ec8..30aaec6bc 100644 --- a/jlm/llvm/backend/dot/DotWriter.cpp +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -21,7 +21,7 @@ namespace jlm::llvm::dot * The function is recursive, and will create nodes for subtypes of aggregate types. */ static util::Node & -GetOrCreateTypeGraphNode(const rvsdg::type & type, util::Graph & typeGraph) +GetOrCreateTypeGraphNode(const rvsdg::Type & type, util::Graph & typeGraph) { // If the type already has a corresponding node, return it if (auto * graphElement = typeGraph.GetElementFromProgramObject(type)) diff --git a/jlm/llvm/backend/jlm2llvm/type.cpp b/jlm/llvm/backend/jlm2llvm/type.cpp index f93469242..fdb9250f0 100644 --- a/jlm/llvm/backend/jlm2llvm/type.cpp +++ b/jlm/llvm/backend/jlm2llvm/type.cpp @@ -128,17 +128,17 @@ convert(const scalablevectortype & type, context & ctx) template static ::llvm::Type * -convert(const rvsdg::type & type, context & ctx) +convert(const rvsdg::Type & type, context & ctx) { JLM_ASSERT(rvsdg::is(type)); return convert(*static_cast(&type), ctx); } ::llvm::Type * -convert_type(const rvsdg::type & type, context & ctx) +convert_type(const rvsdg::Type & type, context & ctx) { static std:: - unordered_map> + unordered_map> map({ { typeid(rvsdg::bittype), convert }, { typeid(FunctionType), convert }, { typeid(PointerType), convert }, diff --git a/jlm/llvm/backend/jlm2llvm/type.hpp b/jlm/llvm/backend/jlm2llvm/type.hpp index 1aaffbb08..1baf4354b 100644 --- a/jlm/llvm/backend/jlm2llvm/type.hpp +++ b/jlm/llvm/backend/jlm2llvm/type.hpp @@ -27,12 +27,12 @@ namespace jlm::llvm::jlm2llvm class context; ::llvm::Type * -convert_type(const rvsdg::type & type, context & ctx); +convert_type(const rvsdg::Type & type, context & ctx); static inline ::llvm::IntegerType * convert_type(const rvsdg::bittype & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->getTypeID() == ::llvm::Type::IntegerTyID); return ::llvm::cast<::llvm::IntegerType>(t); } @@ -40,7 +40,7 @@ convert_type(const rvsdg::bittype & type, context & ctx) static inline ::llvm::FunctionType * convert_type(const FunctionType & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->getTypeID() == ::llvm::Type::FunctionTyID); return ::llvm::cast<::llvm::FunctionType>(t); } @@ -48,7 +48,7 @@ convert_type(const FunctionType & type, context & ctx) static inline ::llvm::PointerType * convert_type(const PointerType & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->getTypeID() == ::llvm::Type::PointerTyID); return ::llvm::cast<::llvm::PointerType>(t); } @@ -56,7 +56,7 @@ convert_type(const PointerType & type, context & ctx) static inline ::llvm::ArrayType * convert_type(const arraytype & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->getTypeID() == ::llvm::Type::ArrayTyID); return ::llvm::cast<::llvm::ArrayType>(t); } @@ -64,7 +64,7 @@ convert_type(const arraytype & type, context & ctx) static inline ::llvm::IntegerType * convert_type(const rvsdg::ctltype & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->getTypeID() == ::llvm::Type::IntegerTyID); return ::llvm::cast<::llvm::IntegerType>(t); } @@ -72,7 +72,7 @@ convert_type(const rvsdg::ctltype & type, context & ctx) static inline ::llvm::Type * convert_type(const fptype & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->isHalfTy() || t->isFloatTy() || t->isDoubleTy()); return t; } @@ -80,7 +80,7 @@ convert_type(const fptype & type, context & ctx) static inline ::llvm::StructType * convert_type(const StructType & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->isStructTy()); return ::llvm::cast<::llvm::StructType>(t); } @@ -88,7 +88,7 @@ convert_type(const StructType & type, context & ctx) static inline ::llvm::VectorType * convert_type(const vectortype & type, context & ctx) { - auto t = convert_type(*static_cast(&type), ctx); + auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->isVectorTy()); return ::llvm::cast<::llvm::VectorType>(t); } diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index 78609ec51..60c8647c8 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -1159,7 +1159,7 @@ convert(::llvm::UnaryOperator * unaryOperator, tacsvector_t & threeAddressCodeVe template static std::unique_ptr -create_unop(std::shared_ptr st, std::shared_ptr dt) +create_unop(std::shared_ptr st, std::shared_ptr dt) { return std::unique_ptr(new OP(std::move(st), std::move(dt))); } @@ -1174,8 +1174,8 @@ convert_cast_instruction(::llvm::Instruction * i, tacsvector_t & tacs, context & static std::unordered_map< unsigned, std::unique_ptr (*)( - std::shared_ptr, - std::shared_ptr)> + std::shared_ptr, + std::shared_ptr)> map({ { ::llvm::Instruction::Trunc, create_unop }, { ::llvm::Instruction::ZExt, create_unop }, { ::llvm::Instruction::UIToFP, create_unop }, diff --git a/jlm/llvm/frontend/LlvmTypeConversion.cpp b/jlm/llvm/frontend/LlvmTypeConversion.cpp index 8ace3b345..97b7a2b7b 100644 --- a/jlm/llvm/frontend/LlvmTypeConversion.cpp +++ b/jlm/llvm/frontend/LlvmTypeConversion.cpp @@ -54,7 +54,7 @@ convert_function_type(const ::llvm::Type * t, context & ctx) auto type = ::llvm::cast(t); /* arguments */ - std::vector> argumentTypes; + std::vector> argumentTypes; for (size_t n = 0; n < type->getNumParams(); n++) argumentTypes.push_back(ConvertType(type->getParamType(n), ctx)); if (type->isVarArg()) @@ -63,7 +63,7 @@ convert_function_type(const ::llvm::Type * t, context & ctx) argumentTypes.push_back(MemoryStateType::Create()); /* results */ - std::vector> resultTypes; + std::vector> resultTypes; if (type->getReturnType()->getTypeID() != ::llvm::Type::VoidTyID) resultTypes.push_back(ConvertType(type->getReturnType(), ctx)); resultTypes.push_back(iostatetype::Create()); diff --git a/jlm/llvm/ir/cfg.hpp b/jlm/llvm/ir/cfg.hpp index 7e0e51c5e..8e4b5cefc 100644 --- a/jlm/llvm/ir/cfg.hpp +++ b/jlm/llvm/ir/cfg.hpp @@ -30,19 +30,19 @@ class argument final : public variable argument( const std::string & name, - std::shared_ptr type, + std::shared_ptr type, const attributeset & attributes) : variable(std::move(type), name), attributes_(attributes) {} - argument(const std::string & name, std::shared_ptr type) + argument(const std::string & name, std::shared_ptr type) : variable(std::move(type), name) {} argument( const std::string & name, - std::unique_ptr type, + std::unique_ptr type, const attributeset & attributes) : variable(std::move(type), name), attributes_(attributes) @@ -57,14 +57,14 @@ class argument final : public variable static std::unique_ptr create( const std::string & name, - std::shared_ptr type, + std::shared_ptr type, const attributeset & attributes) { return std::make_unique(name, std::move(type), attributes); } static std::unique_ptr - create(const std::string & name, std::shared_ptr type) + create(const std::string & name, std::shared_ptr type) { return create(name, std::move(type), {}); } @@ -359,11 +359,11 @@ class cfg final FunctionType fcttype() const { - std::vector> arguments; + std::vector> arguments; for (size_t n = 0; n < entry()->narguments(); n++) arguments.push_back(entry()->argument(n)->Type()); - std::vector> results; + std::vector> results; for (size_t n = 0; n < exit()->nresults(); n++) results.push_back(exit()->result(n)->Type()); diff --git a/jlm/llvm/ir/ipgraph-module.hpp b/jlm/llvm/ir/ipgraph-module.hpp index 49b8db1df..0f9436559 100644 --- a/jlm/llvm/ir/ipgraph-module.hpp +++ b/jlm/llvm/ir/ipgraph-module.hpp @@ -117,7 +117,7 @@ class ipgraph_module final } inline llvm::variable * - create_variable(std::shared_ptr type, const std::string & name) + create_variable(std::shared_ptr type, const std::string & name) { auto v = std::make_unique(std::move(type), name); auto pv = v.get(); @@ -126,7 +126,7 @@ class ipgraph_module final } inline llvm::variable * - create_variable(std::shared_ptr type) + create_variable(std::shared_ptr type) { static uint64_t c = 0; auto v = std::make_unique(std::move(type), jlm::util::strfmt("v", c++)); diff --git a/jlm/llvm/ir/ipgraph.cpp b/jlm/llvm/ir/ipgraph.cpp index fa1fe0702..47dcddf05 100644 --- a/jlm/llvm/ir/ipgraph.cpp +++ b/jlm/llvm/ir/ipgraph.cpp @@ -108,14 +108,14 @@ function_node::name() const noexcept return name_; } -const jlm::rvsdg::type & +const jlm::rvsdg::Type & function_node::type() const noexcept { static PointerType pointerType; return pointerType; } -std::shared_ptr +std::shared_ptr function_node::Type() const { return PointerType::Create(); @@ -165,7 +165,7 @@ data_node::type() const noexcept return pointerType; } -std::shared_ptr +std::shared_ptr data_node::Type() const { return PointerType::Create(); diff --git a/jlm/llvm/ir/ipgraph.hpp b/jlm/llvm/ir/ipgraph.hpp index a21bf3999..054eb5324 100644 --- a/jlm/llvm/ir/ipgraph.hpp +++ b/jlm/llvm/ir/ipgraph.hpp @@ -180,10 +180,10 @@ class ipgraph_node virtual const std::string & name() const noexcept = 0; - virtual const jlm::rvsdg::type & + virtual const jlm::rvsdg::Type & type() const noexcept = 0; - virtual std::shared_ptr + virtual std::shared_ptr Type() const = 0; virtual const llvm::linkage & @@ -223,10 +223,10 @@ class function_node final : public ipgraph_node return cfg_.get(); } - virtual const jlm::rvsdg::type & + virtual const jlm::rvsdg::Type & type() const noexcept override; - std::shared_ptr + std::shared_ptr Type() const override; const FunctionType & @@ -393,7 +393,7 @@ class data_node final : public ipgraph_node virtual const PointerType & type() const noexcept override; - std::shared_ptr + std::shared_ptr Type() const override; [[nodiscard]] const std::shared_ptr & diff --git a/jlm/llvm/ir/operators/GetElementPtr.hpp b/jlm/llvm/ir/operators/GetElementPtr.hpp index 8ff755890..bf371b709 100644 --- a/jlm/llvm/ir/operators/GetElementPtr.hpp +++ b/jlm/llvm/ir/operators/GetElementPtr.hpp @@ -70,7 +70,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op const variable * baseAddress, const std::vector & offsets, std::shared_ptr pointeeType, - std::shared_ptr resultType) + std::shared_ptr resultType) { CheckPointerType(baseAddress->type()); auto offsetTypes = CheckAndExtractOffsetTypes(offsets); @@ -101,7 +101,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op rvsdg::output * baseAddress, const std::vector & offsets, std::shared_ptr pointeeType, - std::shared_ptr resultType) + std::shared_ptr resultType) { CheckPointerType(baseAddress->type()); auto offsetTypes = CheckAndExtractOffsetTypes(offsets); @@ -116,7 +116,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op private: static void - CheckPointerType(const rvsdg::type & type) + CheckPointerType(const rvsdg::Type & type) { if (!is(type)) { @@ -143,10 +143,10 @@ class GetElementPtrOperation final : public rvsdg::simple_op return offsetTypes; } - static std::vector> + static std::vector> CreateOperandTypes(const std::vector> & indexTypes) { - std::vector> types({ PointerType::Create() }); + std::vector> types({ PointerType::Create() }); types.insert(types.end(), indexTypes.begin(), indexTypes.end()); return types; diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index 73da8730d..b49766929 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -127,8 +127,8 @@ class LoadOperation : public rvsdg::simple_op { protected: LoadOperation( - const std::vector> & operandTypes, - const std::vector> & resultTypes, + const std::vector> & operandTypes, + const std::vector> & resultTypes, size_t alignment) : simple_op(operandTypes, resultTypes), Alignment_(alignment) @@ -224,24 +224,24 @@ class LoadVolatileOperation final : public LoadOperation } private: - static std::vector> + static std::vector> CreateOperandTypes(size_t numMemoryStates) { - std::vector> types( + std::vector> types( { PointerType::Create(), iostatetype::Create() }); - std::vector> states( + std::vector> states( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); return types; } - static std::vector> + static std::vector> CreateResultTypes(std::shared_ptr loadedType, size_t numMemoryStates) { - std::vector> types( + std::vector> types( { std::move(loadedType), iostatetype::Create() }); - std::vector> states( + std::vector> states( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -480,22 +480,22 @@ class LoadNonVolatileOperation final : public LoadOperation } private: - static std::vector> + static std::vector> CreateOperandTypes(size_t numMemoryStates) { - std::vector> types(1, PointerType::Create()); - std::vector> states( + std::vector> types(1, PointerType::Create()); + std::vector> states( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); return types; } - static std::vector> + static std::vector> CreateResultTypes(std::shared_ptr loadedType, size_t numMemoryStates) { - std::vector> types(1, std::move(loadedType)); - std::vector> states( + std::vector> types(1, std::move(loadedType)); + std::vector> states( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); diff --git a/jlm/llvm/ir/operators/MemCpy.hpp b/jlm/llvm/ir/operators/MemCpy.hpp index 28b49af37..418a0f99c 100644 --- a/jlm/llvm/ir/operators/MemCpy.hpp +++ b/jlm/llvm/ir/operators/MemCpy.hpp @@ -24,8 +24,8 @@ class MemCpyOperation : public rvsdg::simple_op { protected: MemCpyOperation( - const std::vector> & operandTypes, - const std::vector> & resultTypes) + const std::vector> & operandTypes, + const std::vector> & resultTypes) : simple_op(operandTypes, resultTypes) { JLM_ASSERT(operandTypes.size() >= 4); @@ -72,7 +72,7 @@ class MemCpyNonVolatileOperation final : public MemCpyOperation public: ~MemCpyNonVolatileOperation() override; - MemCpyNonVolatileOperation(std::shared_ptr lengthType, size_t numMemoryStates) + MemCpyNonVolatileOperation(std::shared_ptr lengthType, size_t numMemoryStates) : MemCpyOperation( CreateOperandTypes(std::move(lengthType), numMemoryStates), CreateResultTypes(numMemoryStates)) @@ -119,16 +119,16 @@ class MemCpyNonVolatileOperation final : public MemCpyOperation } private: - static std::vector> - CreateOperandTypes(std::shared_ptr length, size_t numMemoryStates) + static std::vector> + CreateOperandTypes(std::shared_ptr length, size_t numMemoryStates) { auto pointerType = PointerType::Create(); - std::vector> types = { pointerType, pointerType, length }; + std::vector> types = { pointerType, pointerType, length }; types.insert(types.end(), numMemoryStates, MemoryStateType::Create()); return types; } - static std::vector> + static std::vector> CreateResultTypes(size_t numMemoryStates) { return { numMemoryStates, MemoryStateType::Create() }; @@ -151,7 +151,7 @@ class MemCpyVolatileOperation final : public MemCpyOperation public: ~MemCpyVolatileOperation() noexcept override; - MemCpyVolatileOperation(std::shared_ptr lengthType, size_t numMemoryStates) + MemCpyVolatileOperation(std::shared_ptr lengthType, size_t numMemoryStates) : MemCpyOperation( CreateOperandTypes(std::move(lengthType), numMemoryStates), CreateResultTypes(numMemoryStates)) @@ -200,11 +200,11 @@ class MemCpyVolatileOperation final : public MemCpyOperation } private: - static std::vector> - CreateOperandTypes(std::shared_ptr lengthType, size_t numMemoryStates) + static std::vector> + CreateOperandTypes(std::shared_ptr lengthType, size_t numMemoryStates) { auto pointerType = PointerType::Create(); - std::vector> types = { pointerType, + std::vector> types = { pointerType, pointerType, std::move(lengthType), iostatetype::Create() }; @@ -212,10 +212,10 @@ class MemCpyVolatileOperation final : public MemCpyOperation return types; } - static std::vector> + static std::vector> CreateResultTypes(size_t numMemoryStates) { - std::vector> types(1, iostatetype::Create()); + std::vector> types(1, iostatetype::Create()); types.insert(types.end(), numMemoryStates, MemoryStateType::Create()); return types; } diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 07a943088..dc58eab70 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -122,7 +122,7 @@ node::ExtractLambdaNodes(const phi::node & phiNode) /* phi builder class */ rvoutput * -builder::add_recvar(std::shared_ptr type) +builder::add_recvar(std::shared_ptr type) { if (!node_) return nullptr; diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 64679aed3..cd40a0c24 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -536,7 +536,7 @@ class builder final } phi::rvoutput * - add_recvar(std::shared_ptr type); + add_recvar(std::shared_ptr type); phi::node * end(); @@ -557,7 +557,7 @@ class cvinput final : public jlm::rvsdg::structural_input cvinput( phi::node * node, jlm::rvsdg::output * origin, - std::shared_ptr type) + std::shared_ptr type) : structural_input(node, origin, std::move(type)) {} @@ -576,7 +576,7 @@ class cvinput final : public jlm::rvsdg::structural_input create( phi::node * node, jlm::rvsdg::output * origin, - std::shared_ptr type) + std::shared_ptr type) { auto input = std::make_unique(node, origin, std::move(type)); return static_cast(node->append_input(std::move(input))); @@ -606,7 +606,7 @@ class rvoutput final : public jlm::rvsdg::structural_output ~rvoutput() override; private: - rvoutput(phi::node * node, rvargument * argument, std::shared_ptr type) + rvoutput(phi::node * node, rvargument * argument, std::shared_ptr type) : structural_output(node, std::move(type)), argument_(argument) {} @@ -622,7 +622,7 @@ class rvoutput final : public jlm::rvsdg::structural_output operator=(rvoutput &&) = delete; static rvoutput * - create(phi::node * node, rvargument * argument, std::shared_ptr type); + create(phi::node * node, rvargument * argument, std::shared_ptr type); public: rvargument * @@ -660,7 +660,7 @@ class rvargument final : public rvsdg::RegionArgument ~rvargument() override; private: - rvargument(rvsdg::Region * region, const std::shared_ptr type) + rvargument(rvsdg::Region * region, const std::shared_ptr type) : RegionArgument(region, nullptr, std::move(type)), output_(nullptr) {} @@ -676,7 +676,7 @@ class rvargument final : public rvsdg::RegionArgument operator=(rvargument &&) = delete; static rvargument * - create(rvsdg::Region * region, std::shared_ptr type) + create(rvsdg::Region * region, std::shared_ptr type) { auto argument = new rvargument(region, std::move(type)); region->append_argument(argument); @@ -716,7 +716,7 @@ class cvargument final : public rvsdg::RegionArgument public: ~cvargument() override; - cvargument(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) + cvargument(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) : rvsdg::RegionArgument(region, input, std::move(type)) {} @@ -735,7 +735,7 @@ class cvargument final : public rvsdg::RegionArgument Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; static cvargument * - create(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) + create(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) { auto argument = new cvargument(region, input, std::move(type)); region->append_argument(argument); @@ -764,7 +764,7 @@ class rvresult final : public rvsdg::RegionResult rvsdg::Region * region, jlm::rvsdg::output * origin, rvoutput * output, - std::shared_ptr type) + std::shared_ptr type) : RegionResult(region, origin, output, std::move(type)) {} @@ -786,7 +786,7 @@ class rvresult final : public rvsdg::RegionResult rvsdg::Region * region, jlm::rvsdg::output * origin, rvoutput * output, - std::shared_ptr type) + std::shared_ptr type) { auto result = new rvresult(region, origin, output, type); region->append_result(result); @@ -873,7 +873,7 @@ cvinput::argument() const noexcept } inline rvoutput * -rvoutput::create(phi::node * node, rvargument * argument, std::shared_ptr type) +rvoutput::create(phi::node * node, rvargument * argument, std::shared_ptr type) { JLM_ASSERT(argument->type() == *type); auto output = std::unique_ptr(new rvoutput(node, argument, std::move(type))); diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 4699fb10c..839771143 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -89,8 +89,8 @@ class StoreOperation : public rvsdg::simple_op { protected: StoreOperation( - const std::vector> & operandTypes, - const std::vector> & resultTypes, + const std::vector> & operandTypes, + const std::vector> & resultTypes, size_t alignment) : simple_op(operandTypes, resultTypes), Alignment_(alignment) @@ -183,7 +183,7 @@ class StoreNonVolatileOperation final : public StoreOperation private: static const std::shared_ptr - CheckAndExtractStoredType(const std::shared_ptr & type) + CheckAndExtractStoredType(const std::shared_ptr & type) { if (auto storedType = std::dynamic_pointer_cast(type)) { @@ -193,12 +193,12 @@ class StoreNonVolatileOperation final : public StoreOperation throw util::error("Expected value type"); } - static std::vector> + static std::vector> CreateOperandTypes(std::shared_ptr storedType, size_t numMemoryStates) { - std::vector> types( + std::vector> types( { PointerType::Create(), std::move(storedType) }); - std::vector> states( + std::vector> states( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); @@ -382,7 +382,7 @@ class StoreNonVolatileNode final : public StoreNode private: static std::shared_ptr - CheckAndExtractStoredType(const std::shared_ptr & type) + CheckAndExtractStoredType(const std::shared_ptr & type) { if (auto storedType = std::dynamic_pointer_cast(type)) { @@ -447,7 +447,7 @@ class StoreVolatileOperation final : public StoreOperation private: static std::shared_ptr - CheckAndExtractStoredType(const std::shared_ptr & type) + CheckAndExtractStoredType(const std::shared_ptr & type) { if (auto storedType = std::dynamic_pointer_cast(type)) return storedType; @@ -455,23 +455,23 @@ class StoreVolatileOperation final : public StoreOperation throw jlm::util::error("Expected value type"); } - static std::vector> + static std::vector> CreateOperandTypes(std::shared_ptr storedType, size_t numMemoryStates) { - std::vector> types( + std::vector> types( { PointerType::Create(), std::move(storedType), iostatetype::Create() }); - std::vector> states( + std::vector> states( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), states.begin(), states.end()); return types; } - static std::vector> + static std::vector> CreateResultTypes(size_t numMemoryStates) { - std::vector> types({ iostatetype::Create() }); - std::vector> memoryStates( + std::vector> types({ iostatetype::Create() }); + std::vector> memoryStates( numMemoryStates, MemoryStateType::Create()); types.insert(types.end(), memoryStates.begin(), memoryStates.end()); @@ -560,7 +560,7 @@ class StoreVolatileNode final : public StoreNode private: static std::shared_ptr - CheckAndExtractStoredType(const std::shared_ptr & type) + CheckAndExtractStoredType(const std::shared_ptr & type) { if (auto storedType = std::dynamic_pointer_cast(type)) return storedType; diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index f4d047811..cce8410e6 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -59,10 +59,10 @@ class CallOperation final : public jlm::rvsdg::simple_op } private: - static inline std::vector> + static inline std::vector> create_srctypes(const FunctionType & functionType) { - std::vector> types({ PointerType::Create() }); + std::vector> types({ PointerType::Create() }); for (auto & argumentType : functionType.Arguments()) types.emplace_back(argumentType); @@ -70,7 +70,7 @@ class CallOperation final : public jlm::rvsdg::simple_op } static void - CheckFunctionInputType(const jlm::rvsdg::type & type) + CheckFunctionInputType(const jlm::rvsdg::Type & type) { if (!is(type)) throw jlm::util::error("Expected pointer type."); @@ -488,7 +488,7 @@ class CallNode final : public jlm::rvsdg::simple_node private: static void - CheckFunctionInputType(const jlm::rvsdg::type & type) + CheckFunctionInputType(const jlm::rvsdg::Type & type) { if (!is(type)) throw jlm::util::error("Expected pointer type."); diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 0e804de2e..eb1f222c1 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -384,13 +384,13 @@ class output final : public rvsdg::structural_output public: ~output() override; - output(delta::node * node, std::shared_ptr type) + output(delta::node * node, std::shared_ptr type) : structural_output(node, std::move(type)) {} private: static output * - create(delta::node * node, std::shared_ptr type) + create(delta::node * node, std::shared_ptr type) { auto output = std::make_unique(node, std::move(type)); return static_cast(node->append_output(std::move(output))); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 4a54a8b9c..31ba510d5 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -484,13 +484,13 @@ class output final : public jlm::rvsdg::structural_output public: ~output() override; - output(lambda::node * node, std::shared_ptr type) + output(lambda::node * node, std::shared_ptr type) : structural_output(node, std::move(type)) {} private: static output * - create(lambda::node * node, std::shared_ptr type) + create(lambda::node * node, std::shared_ptr type) { auto output = std::make_unique(node, std::move(type)); return jlm::util::AssertedCast(node->append_output(std::move(output))); @@ -529,12 +529,12 @@ class fctargument final : public rvsdg::RegionArgument Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; private: - fctargument(rvsdg::Region * region, std::shared_ptr type) + fctargument(rvsdg::Region * region, std::shared_ptr type) : rvsdg::RegionArgument(region, nullptr, std::move(type)) {} static fctargument * - create(rvsdg::Region * region, std::shared_ptr type) + create(rvsdg::Region * region, std::shared_ptr type) { auto argument = new fctargument(region, std::move(type)); region->append_argument(argument); diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index 1c0e948f3..5b87fdc89 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -33,7 +33,7 @@ class phi_op final : public jlm::rvsdg::simple_op inline phi_op( const std::vector & nodes, - const std::shared_ptr & type) + const std::shared_ptr & type) : jlm::rvsdg::simple_op({ nodes.size(), type }, { type }), nodes_(nodes) {} @@ -55,13 +55,13 @@ class phi_op final : public jlm::rvsdg::simple_op virtual std::unique_ptr copy() const override; - inline const jlm::rvsdg::type & + inline const jlm::rvsdg::Type & type() const noexcept { return *result(0); } - inline const std::shared_ptr & + inline const std::shared_ptr & Type() const noexcept { return result(0); @@ -77,7 +77,7 @@ class phi_op final : public jlm::rvsdg::simple_op static std::unique_ptr create( const std::vector> & arguments, - std::shared_ptr type) + std::shared_ptr type) { std::vector nodes; std::vector operands; @@ -102,7 +102,7 @@ class assignment_op final : public jlm::rvsdg::simple_op public: virtual ~assignment_op() noexcept; - explicit inline assignment_op(const std::shared_ptr & type) + explicit inline assignment_op(const std::shared_ptr & type) : simple_op({ type, type }, {}) {} @@ -136,7 +136,7 @@ class select_op final : public jlm::rvsdg::simple_op public: virtual ~select_op() noexcept; - explicit select_op(const std::shared_ptr & type) + explicit select_op(const std::shared_ptr & type) : jlm::rvsdg::simple_op({ jlm::rvsdg::bittype::Create(1), type, type }, { type }) {} @@ -149,13 +149,13 @@ class select_op final : public jlm::rvsdg::simple_op virtual std::unique_ptr copy() const override; - [[nodiscard]] const jlm::rvsdg::type & + [[nodiscard]] const jlm::rvsdg::Type & type() const noexcept { return *result(0); } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return result(0); @@ -193,13 +193,13 @@ class vectorselect_op final : public jlm::rvsdg::simple_op virtual std::unique_ptr copy() const override; - [[nodiscard]] const rvsdg::type & + [[nodiscard]] const rvsdg::Type & type() const noexcept { return *result(0); } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return result(0); @@ -254,8 +254,8 @@ class fp2ui_op final : public jlm::rvsdg::unary_op {} inline fp2ui_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto st = dynamic_cast(srctype.get()); @@ -284,7 +284,7 @@ class fp2ui_op final : public jlm::rvsdg::unary_op const override; static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -317,8 +317,8 @@ class fp2si_op final : public jlm::rvsdg::unary_op {} inline fp2si_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : jlm::rvsdg::unary_op(srctype, dsttype) { auto st = dynamic_cast(srctype.get()); @@ -347,7 +347,7 @@ class fp2si_op final : public jlm::rvsdg::unary_op const override; static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -385,7 +385,7 @@ class ctl2bits_op final : public jlm::rvsdg::simple_op copy() const override; static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -463,14 +463,14 @@ class ConstantPointerNullOperation final : public jlm::rvsdg::simple_op } static std::unique_ptr - Create(std::shared_ptr type) + Create(std::shared_ptr type) { ConstantPointerNullOperation operation(CheckAndExtractType(type)); return tac::create(operation, {}); } static jlm::rvsdg::output * - Create(rvsdg::Region * region, std::shared_ptr type) + Create(rvsdg::Region * region, std::shared_ptr type) { ConstantPointerNullOperation operation(CheckAndExtractType(type)); return jlm::rvsdg::simple_node::create_normalized(region, operation, {})[0]; @@ -478,7 +478,7 @@ class ConstantPointerNullOperation final : public jlm::rvsdg::simple_op private: static const std::shared_ptr - CheckAndExtractType(std::shared_ptr type) + CheckAndExtractType(std::shared_ptr type) { if (auto pointerType = std::dynamic_pointer_cast(type)) return pointerType; @@ -501,8 +501,8 @@ class bits2ptr_op final : public jlm::rvsdg::unary_op {} inline bits2ptr_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto at = dynamic_cast(srctype.get()); @@ -537,7 +537,7 @@ class bits2ptr_op final : public jlm::rvsdg::unary_op } static std::unique_ptr - create(const variable * argument, std::shared_ptr type) + create(const variable * argument, std::shared_ptr type) { auto at = std::dynamic_pointer_cast(argument->Type()); if (!at) @@ -552,7 +552,7 @@ class bits2ptr_op final : public jlm::rvsdg::unary_op } static jlm::rvsdg::output * - create(jlm::rvsdg::output * operand, std::shared_ptr type) + create(jlm::rvsdg::output * operand, std::shared_ptr type) { auto ot = std::dynamic_pointer_cast(operand->Type()); if (!ot) @@ -581,8 +581,8 @@ class ptr2bits_op final : public jlm::rvsdg::unary_op {} inline ptr2bits_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto pt = dynamic_cast(srctype.get()); @@ -617,7 +617,7 @@ class ptr2bits_op final : public jlm::rvsdg::unary_op } static std::unique_ptr - create(const variable * argument, const std::shared_ptr & type) + create(const variable * argument, const std::shared_ptr & type) { auto pt = std::dynamic_pointer_cast(argument->Type()); if (!pt) @@ -787,8 +787,8 @@ class zext_op final : public jlm::rvsdg::unary_op } inline zext_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto st = dynamic_cast(srctype.get()); @@ -832,7 +832,7 @@ class zext_op final : public jlm::rvsdg::unary_op } static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto operandBitType = CheckAndExtractBitType(operand->Type()); auto resultBitType = CheckAndExtractBitType(type); @@ -842,7 +842,7 @@ class zext_op final : public jlm::rvsdg::unary_op } static rvsdg::output & - Create(rvsdg::output & operand, const std::shared_ptr & resultType) + Create(rvsdg::output & operand, const std::shared_ptr & resultType) { auto operandBitType = CheckAndExtractBitType(operand.Type()); auto resultBitType = CheckAndExtractBitType(resultType); @@ -853,7 +853,7 @@ class zext_op final : public jlm::rvsdg::unary_op private: static std::shared_ptr - CheckAndExtractBitType(const std::shared_ptr & type) + CheckAndExtractBitType(const std::shared_ptr & type) { if (auto bitType = std::dynamic_pointer_cast(type)) { @@ -903,7 +903,7 @@ class ConstantFP final : public jlm::rvsdg::simple_op } static std::unique_ptr - create(const ::llvm::APFloat & constant, const std::shared_ptr & type) + create(const ::llvm::APFloat & constant, const std::shared_ptr & type) { auto ft = std::dynamic_pointer_cast(type); if (!ft) @@ -1011,7 +1011,7 @@ class UndefValueOperation final : public jlm::rvsdg::simple_op public: ~UndefValueOperation() noexcept override; - explicit UndefValueOperation(std::shared_ptr type) + explicit UndefValueOperation(std::shared_ptr type) : simple_op({}, { std::move(type) }) {} @@ -1032,28 +1032,28 @@ class UndefValueOperation final : public jlm::rvsdg::simple_op [[nodiscard]] std::unique_ptr copy() const override; - [[nodiscard]] const rvsdg::type & + [[nodiscard]] const rvsdg::Type & GetType() const noexcept { return *result(0); } static jlm::rvsdg::output * - Create(rvsdg::Region & region, std::shared_ptr type) + Create(rvsdg::Region & region, std::shared_ptr type) { UndefValueOperation operation(std::move(type)); return jlm::rvsdg::simple_node::create_normalized(®ion, operation, {})[0]; } static std::unique_ptr - Create(std::shared_ptr type) + Create(std::shared_ptr type) { UndefValueOperation operation(std::move(type)); return tac::create(operation, {}); } static std::unique_ptr - Create(std::shared_ptr type, const std::string & name) + Create(std::shared_ptr type, const std::string & name) { UndefValueOperation operation(std::move(type)); return tac::create(operation, {}, { name }); @@ -1111,7 +1111,7 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op } static std::unique_ptr - Create(const std::shared_ptr & type) + Create(const std::shared_ptr & type) { auto valueType = CheckAndConvertType(type); @@ -1120,7 +1120,7 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op } static jlm::rvsdg::output * - Create(rvsdg::Region * region, const std::shared_ptr & type) + Create(rvsdg::Region * region, const std::shared_ptr & type) { auto valueType = CheckAndConvertType(type); @@ -1130,7 +1130,7 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op private: static std::shared_ptr - CheckAndConvertType(const std::shared_ptr & type) + CheckAndConvertType(const std::shared_ptr & type) { if (auto valueType = std::dynamic_pointer_cast(type)) return valueType; @@ -1235,8 +1235,8 @@ class fpext_op final : public jlm::rvsdg::unary_op } inline fpext_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto st = dynamic_cast(srctype.get()); @@ -1280,7 +1280,7 @@ class fpext_op final : public jlm::rvsdg::unary_op } static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -1369,8 +1369,8 @@ class fptrunc_op final : public jlm::rvsdg::unary_op } inline fptrunc_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto st = dynamic_cast(srctype.get()); @@ -1383,7 +1383,7 @@ class fptrunc_op final : public jlm::rvsdg::unary_op if (st->size() == fpsize::half || (st->size() == fpsize::flt && dt->size() != fpsize::half) || (st->size() == fpsize::dbl && dt->size() == fpsize::dbl)) - throw jlm::util::error("destination tpye size must be smaller than source size type."); + throw jlm::util::error("destination type size must be smaller than source size type."); } virtual bool @@ -1415,7 +1415,7 @@ class fptrunc_op final : public jlm::rvsdg::unary_op } static std::unique_ptr - create(const variable * operand, std::shared_ptr type) + create(const variable * operand, std::shared_ptr type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -1437,7 +1437,7 @@ class valist_op final : public jlm::rvsdg::simple_op public: virtual ~valist_op(); - explicit valist_op(std::vector> types) + explicit valist_op(std::vector> types) : simple_op(std::move(types), { varargtype::Create() }) {} @@ -1461,7 +1461,7 @@ class valist_op final : public jlm::rvsdg::simple_op static std::unique_ptr create(const std::vector & arguments) { - std::vector> operands; + std::vector> operands; for (const auto & argument : arguments) operands.push_back(argument->Type()); @@ -1472,7 +1472,7 @@ class valist_op final : public jlm::rvsdg::simple_op static rvsdg::output * Create(rvsdg::Region & region, const std::vector & operands) { - std::vector> operandTypes; + std::vector> operandTypes; operandTypes.reserve(operands.size()); for (auto & operand : operands) operandTypes.emplace_back(operand->Type()); @@ -1496,8 +1496,8 @@ class bitcast_op final : public jlm::rvsdg::unary_op {} inline bitcast_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { check_types(srctype, dsttype); @@ -1530,7 +1530,7 @@ class bitcast_op final : public jlm::rvsdg::unary_op const override; static std::unique_ptr - create(const variable * operand, std::shared_ptr type) + create(const variable * operand, std::shared_ptr type) { auto pair = check_types(operand->Type(), type); @@ -1539,7 +1539,7 @@ class bitcast_op final : public jlm::rvsdg::unary_op } static jlm::rvsdg::output * - create(jlm::rvsdg::output * operand, std::shared_ptr rtype) + create(jlm::rvsdg::output * operand, std::shared_ptr rtype) { auto pair = check_types(operand->Type(), rtype); @@ -1552,8 +1552,8 @@ class bitcast_op final : public jlm::rvsdg::unary_op std::shared_ptr, std::shared_ptr> check_types( - const std::shared_ptr & otype, - const std::shared_ptr & rtype) + const std::shared_ptr & otype, + const std::shared_ptr & rtype) { auto ot = std::dynamic_pointer_cast(otype); if (!ot) @@ -1596,7 +1596,7 @@ class ConstantStruct final : public jlm::rvsdg::simple_op static std::unique_ptr create( const std::vector & elements, - const std::shared_ptr & type) + const std::shared_ptr & type) { auto structType = CheckAndExtractStructType(type); @@ -1608,7 +1608,7 @@ class ConstantStruct final : public jlm::rvsdg::simple_op Create( rvsdg::Region & region, const std::vector & operands, - std::shared_ptr resultType) + std::shared_ptr resultType) { auto structType = CheckAndExtractStructType(std::move(resultType)); @@ -1617,10 +1617,10 @@ class ConstantStruct final : public jlm::rvsdg::simple_op } private: - static inline std::vector> + static inline std::vector> create_srctypes(const StructType & type) { - std::vector> types; + std::vector> types; for (size_t n = 0; n < type.GetDeclaration().NumElements(); n++) types.push_back(type.GetDeclaration().GetElementType(n)); @@ -1628,7 +1628,7 @@ class ConstantStruct final : public jlm::rvsdg::simple_op } static std::shared_ptr - CheckAndExtractStructType(std::shared_ptr type) + CheckAndExtractStructType(std::shared_ptr type) { if (auto structType = std::dynamic_pointer_cast(type)) { @@ -1656,8 +1656,8 @@ class trunc_op final : public jlm::rvsdg::unary_op } inline trunc_op( - std::shared_ptr optype, - std::shared_ptr restype) + std::shared_ptr optype, + std::shared_ptr restype) : unary_op(optype, restype) { auto ot = dynamic_cast(optype.get()); @@ -1701,7 +1701,7 @@ class trunc_op final : public jlm::rvsdg::unary_op } static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto ot = std::dynamic_pointer_cast(operand->Type()); if (!ot) @@ -1741,8 +1741,8 @@ class uitofp_op final : public jlm::rvsdg::unary_op {} inline uitofp_op( - std::shared_ptr optype, - std::shared_ptr restype) + std::shared_ptr optype, + std::shared_ptr restype) : unary_op(optype, restype) { auto st = dynamic_cast(optype.get()); @@ -1771,7 +1771,7 @@ class uitofp_op final : public jlm::rvsdg::unary_op const override; static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -1800,8 +1800,8 @@ class sitofp_op final : public jlm::rvsdg::unary_op {} inline sitofp_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto st = dynamic_cast(srctype.get()); @@ -1830,7 +1830,7 @@ class sitofp_op final : public jlm::rvsdg::unary_op const override; static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) @@ -1888,7 +1888,7 @@ class ConstantArray final : public jlm::rvsdg::simple_op auto vt = std::dynamic_pointer_cast(elements[0]->Type()); if (!vt) - throw jlm::util::error("expected value type.\n"); + throw jlm::util::error("expected value Type.\n"); ConstantArray op(vt, elements.size()); return tac::create(op, elements); @@ -1918,7 +1918,7 @@ class ConstantAggregateZero final : public jlm::rvsdg::simple_op public: virtual ~ConstantAggregateZero(); - ConstantAggregateZero(std::shared_ptr type) + ConstantAggregateZero(std::shared_ptr type) : simple_op({}, { type }) { auto st = dynamic_cast(type.get()); @@ -1938,14 +1938,14 @@ class ConstantAggregateZero final : public jlm::rvsdg::simple_op copy() const override; static std::unique_ptr - create(std::shared_ptr type) + create(std::shared_ptr type) { ConstantAggregateZero op(std::move(type)); return tac::create(op, {}); } static jlm::rvsdg::output * - Create(rvsdg::Region & region, std::shared_ptr type) + Create(rvsdg::Region & region, std::shared_ptr type) { ConstantAggregateZero operation(std::move(type)); return jlm::rvsdg::simple_node::create_normalized(®ion, operation, {})[0]; @@ -2072,7 +2072,7 @@ class constantvector_op final : public jlm::rvsdg::simple_op static inline std::unique_ptr create( const std::vector & operands, - const std::shared_ptr & type) + const std::shared_ptr & type) { auto vt = std::dynamic_pointer_cast(type); if (!vt) @@ -2209,7 +2209,7 @@ class vectorunary_op final : public jlm::rvsdg::simple_op create( const jlm::rvsdg::unary_op & unop, const llvm::variable * operand, - const std::shared_ptr & type) + const std::shared_ptr & type) { auto vct1 = std::dynamic_pointer_cast(operand->Type()); auto vct2 = std::dynamic_pointer_cast(type); @@ -2305,7 +2305,7 @@ class vectorbinary_op final : public jlm::rvsdg::simple_op const jlm::rvsdg::binary_op & binop, const llvm::variable * op1, const llvm::variable * op2, - const std::shared_ptr & type) + const std::shared_ptr & type) { auto vct1 = std::dynamic_pointer_cast(op1->Type()); auto vct2 = std::dynamic_pointer_cast(op2->Type()); @@ -2380,7 +2380,7 @@ class ExtractValue final : public jlm::rvsdg::simple_op virtual ~ExtractValue(); inline ExtractValue( - const std::shared_ptr & aggtype, + const std::shared_ptr & aggtype, const std::vector & indices) : simple_op({ aggtype }, { dsttype(aggtype, indices) }), indices_(indices) @@ -2424,12 +2424,12 @@ class ExtractValue final : public jlm::rvsdg::simple_op } private: - static inline std::vector> + static inline std::vector> dsttype( - const std::shared_ptr & aggtype, + const std::shared_ptr & aggtype, const std::vector & indices) { - std::shared_ptr type = aggtype; + std::shared_ptr type = aggtype; for (const auto & index : indices) { if (auto st = std::dynamic_pointer_cast(type)) @@ -2566,24 +2566,24 @@ class FreeOperation final : public jlm::rvsdg::simple_op } private: - static std::vector> + static std::vector> CreateOperandTypes(size_t numMemoryStates) { - std::vector> memoryStates( + std::vector> memoryStates( numMemoryStates, MemoryStateType::Create()); - std::vector> types({ PointerType::Create() }); + std::vector> types({ PointerType::Create() }); types.insert(types.end(), memoryStates.begin(), memoryStates.end()); types.emplace_back(iostatetype::Create()); return types; } - static std::vector> + static std::vector> CreateResultTypes(size_t numMemoryStates) { - std::vector> types( + std::vector> types( numMemoryStates, MemoryStateType::Create()); types.emplace_back(iostatetype::Create()); diff --git a/jlm/llvm/ir/operators/sext.hpp b/jlm/llvm/ir/operators/sext.hpp index 555ff49b1..1e623666c 100644 --- a/jlm/llvm/ir/operators/sext.hpp +++ b/jlm/llvm/ir/operators/sext.hpp @@ -30,8 +30,8 @@ class sext_op final : public rvsdg::unary_op } inline sext_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(srctype, dsttype) { auto ot = std::dynamic_pointer_cast(srctype); @@ -74,7 +74,7 @@ class sext_op final : public rvsdg::unary_op } static std::unique_ptr - create(const variable * operand, const std::shared_ptr & type) + create(const variable * operand, const std::shared_ptr & type) { auto ot = std::dynamic_pointer_cast(operand->Type()); if (!ot) diff --git a/jlm/llvm/ir/tac.hpp b/jlm/llvm/ir/tac.hpp index 136f01160..da0b0c227 100644 --- a/jlm/llvm/ir/tac.hpp +++ b/jlm/llvm/ir/tac.hpp @@ -28,7 +28,7 @@ class tacvariable final : public variable tacvariable( llvm::tac * tac, - std::shared_ptr type, + std::shared_ptr type, const std::string & name) : variable(std::move(type), name), tac_(tac) @@ -41,7 +41,7 @@ class tacvariable final : public variable } static std::unique_ptr - create(llvm::tac * tac, std::shared_ptr type, const std::string & name) + create(llvm::tac * tac, std::shared_ptr type, const std::string & name) { return std::make_unique(tac, std::move(type), name); } diff --git a/jlm/llvm/ir/types.cpp b/jlm/llvm/ir/types.cpp index 5b7217986..9b1b1d305 100644 --- a/jlm/llvm/ir/types.cpp +++ b/jlm/llvm/ir/types.cpp @@ -18,8 +18,8 @@ namespace jlm::llvm FunctionType::~FunctionType() noexcept = default; FunctionType::FunctionType( - std::vector> argumentTypes, - std::vector> resultTypes) + std::vector> argumentTypes, + std::vector> resultTypes) : jlm::rvsdg::valuetype(), ResultTypes_(std::move(resultTypes)), ArgumentTypes_(std::move(argumentTypes)) @@ -33,13 +33,13 @@ FunctionType::FunctionType(FunctionType && other) noexcept ArgumentTypes_(std::move(other.ArgumentTypes_)) {} -const std::vector> & +const std::vector> & FunctionType::Arguments() const noexcept { return ArgumentTypes_; } -const std::vector> & +const std::vector> & FunctionType::Results() const noexcept { return ResultTypes_; @@ -52,7 +52,7 @@ FunctionType::debug_string() const } bool -FunctionType::operator==(const jlm::rvsdg::type & _other) const noexcept +FunctionType::operator==(const jlm::rvsdg::Type & _other) const noexcept { auto other = dynamic_cast(&_other); if (other == nullptr) @@ -112,8 +112,8 @@ FunctionType::operator=(FunctionType && rhs) noexcept std::shared_ptr FunctionType::Create( - std::vector> argumentTypes, - std::vector> resultTypes) + std::vector> argumentTypes, + std::vector> resultTypes) { return std::make_shared(std::move(argumentTypes), std::move(resultTypes)); } @@ -127,7 +127,7 @@ PointerType::debug_string() const } bool -PointerType::operator==(const jlm::rvsdg::type & other) const noexcept +PointerType::operator==(const jlm::rvsdg::Type & other) const noexcept { return jlm::rvsdg::is(other); } @@ -157,7 +157,7 @@ arraytype::debug_string() const } bool -arraytype::operator==(const jlm::rvsdg::type & other) const noexcept +arraytype::operator==(const jlm::rvsdg::Type & other) const noexcept { auto type = dynamic_cast(&other); return type && type->element_type() == element_type() && type->nelements() == nelements(); @@ -190,7 +190,7 @@ fptype::debug_string() const } bool -fptype::operator==(const jlm::rvsdg::type & other) const noexcept +fptype::operator==(const jlm::rvsdg::Type & other) const noexcept { auto type = dynamic_cast(&other); return type && type->size() == size(); @@ -248,7 +248,7 @@ varargtype::~varargtype() {} bool -varargtype::operator==(const jlm::rvsdg::type & other) const noexcept +varargtype::operator==(const jlm::rvsdg::Type & other) const noexcept { return dynamic_cast(&other) != nullptr; } @@ -275,7 +275,7 @@ varargtype::Create() StructType::~StructType() = default; bool -StructType::operator==(const jlm::rvsdg::type & other) const noexcept +StructType::operator==(const jlm::rvsdg::Type & other) const noexcept { auto type = dynamic_cast(&other); return type && type->IsPacked_ == IsPacked_ && type->Name_ == Name_ @@ -301,7 +301,7 @@ StructType::debug_string() const /* vectortype */ bool -vectortype::operator==(const jlm::rvsdg::type & other) const noexcept +vectortype::operator==(const jlm::rvsdg::Type & other) const noexcept { auto type = dynamic_cast(&other); return type && type->size_ == size_ && *type->type_ == *type_; @@ -313,7 +313,7 @@ fixedvectortype::~fixedvectortype() {} bool -fixedvectortype::operator==(const jlm::rvsdg::type & other) const noexcept +fixedvectortype::operator==(const jlm::rvsdg::Type & other) const noexcept { return vectortype::operator==(other); } @@ -338,7 +338,7 @@ scalablevectortype::~scalablevectortype() {} bool -scalablevectortype::operator==(const jlm::rvsdg::type & other) const noexcept +scalablevectortype::operator==(const jlm::rvsdg::Type & other) const noexcept { return vectortype::operator==(other); } @@ -363,7 +363,7 @@ iostatetype::~iostatetype() {} bool -iostatetype::operator==(const jlm::rvsdg::type & other) const noexcept +iostatetype::operator==(const jlm::rvsdg::Type & other) const noexcept { return jlm::rvsdg::is(other); } @@ -399,7 +399,7 @@ MemoryStateType::debug_string() const } bool -MemoryStateType::operator==(const jlm::rvsdg::type & other) const noexcept +MemoryStateType::operator==(const jlm::rvsdg::Type & other) const noexcept { return jlm::rvsdg::is(other); } diff --git a/jlm/llvm/ir/types.hpp b/jlm/llvm/ir/types.hpp index 104cac2f1..24dcdf8df 100644 --- a/jlm/llvm/ir/types.hpp +++ b/jlm/llvm/ir/types.hpp @@ -25,8 +25,8 @@ class FunctionType final : public jlm::rvsdg::valuetype ~FunctionType() noexcept override; FunctionType( - std::vector> argumentTypes, - std::vector> resultTypes); + std::vector> argumentTypes, + std::vector> resultTypes); FunctionType(const FunctionType & other); @@ -38,10 +38,10 @@ class FunctionType final : public jlm::rvsdg::valuetype FunctionType & operator=(FunctionType && other) noexcept; - const std::vector> & + const std::vector> & Arguments() const noexcept; - const std::vector> & + const std::vector> & Results() const noexcept; size_t @@ -56,14 +56,14 @@ class FunctionType final : public jlm::rvsdg::valuetype return ArgumentTypes_.size(); } - const jlm::rvsdg::type & + const jlm::rvsdg::Type & ResultType(size_t index) const noexcept { JLM_ASSERT(index < ResultTypes_.size()); return *ResultTypes_[index]; } - const jlm::rvsdg::type & + const jlm::rvsdg::Type & ArgumentType(size_t index) const noexcept { JLM_ASSERT(index < ArgumentTypes_.size()); @@ -74,19 +74,19 @@ class FunctionType final : public jlm::rvsdg::valuetype debug_string() const override; bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; static std::shared_ptr Create( - std::vector> argumentTypes, - std::vector> resultTypes); + std::vector> argumentTypes, + std::vector> resultTypes); private: - std::vector> ResultTypes_; - std::vector> ArgumentTypes_; + std::vector> ResultTypes_; + std::vector> ArgumentTypes_; }; /** \brief PointerType class @@ -104,7 +104,7 @@ class PointerType final : public jlm::rvsdg::valuetype debug_string() const override; bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -140,7 +140,7 @@ class arraytype final : public jlm::rvsdg::valuetype debug_string() const override; virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -199,7 +199,7 @@ class fptype final : public jlm::rvsdg::valuetype debug_string() const override; virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -229,7 +229,7 @@ class varargtype final : public jlm::rvsdg::statetype {} virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -242,15 +242,15 @@ class varargtype final : public jlm::rvsdg::statetype }; static inline bool -is_varargtype(const jlm::rvsdg::type & type) +is_varargtype(const jlm::rvsdg::Type & type) { return dynamic_cast(&type) != nullptr; } -static inline std::unique_ptr +static inline std::unique_ptr create_varargtype() { - return std::unique_ptr(new varargtype()); + return std::unique_ptr(new varargtype()); } /** \brief StructType class @@ -312,7 +312,7 @@ class StructType final : public jlm::rvsdg::valuetype } bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -343,7 +343,7 @@ class StructType::Declaration final public: ~Declaration() = default; - Declaration(std::vector> types) + Declaration(std::vector> types) : Types_(std::move(types)) {} @@ -389,13 +389,13 @@ class StructType::Declaration final } static std::unique_ptr - Create(std::vector> types) + Create(std::vector> types) { return std::make_unique(std::move(types)); } private: - std::vector> Types_; + std::vector> Types_; }; /* vector type */ @@ -419,7 +419,7 @@ class vectortype : public jlm::rvsdg::valuetype operator=(vectortype && other) = default; virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; size_t size() const noexcept @@ -454,7 +454,7 @@ class fixedvectortype final : public vectortype {} virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -479,7 +479,7 @@ class scalablevectortype final : public vectortype {} virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -507,7 +507,7 @@ class iostatetype final : public jlm::rvsdg::statetype {} virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -537,7 +537,7 @@ class MemoryStateType final : public jlm::rvsdg::statetype debug_string() const override; bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -548,7 +548,7 @@ class MemoryStateType final : public jlm::rvsdg::statetype template inline bool -IsOrContains(const jlm::rvsdg::type & type) +IsOrContains(const jlm::rvsdg::Type & type) { if (jlm::rvsdg::is(type)) return true; @@ -576,10 +576,10 @@ IsOrContains(const jlm::rvsdg::type & type) * Given a type, determines if it is one of LLVM's aggregate types. * Vectors are not considered to be aggregate types, despite being based on a subtype. * @param type the type to check - * @return true if the type is an aggreate type, false otherwise + * @return true if the type is an aggregate type, false otherwise */ inline bool -IsAggregateType(const jlm::rvsdg::type & type) +IsAggregateType(const jlm::rvsdg::Type & type) { return jlm::rvsdg::is(type) || jlm::rvsdg::is(type); } diff --git a/jlm/llvm/ir/variable.hpp b/jlm/llvm/ir/variable.hpp index 51009edc0..48efebc09 100644 --- a/jlm/llvm/ir/variable.hpp +++ b/jlm/llvm/ir/variable.hpp @@ -23,7 +23,7 @@ class variable public: virtual ~variable() noexcept; - variable(std::shared_ptr type, const std::string & name) + variable(std::shared_ptr type, const std::string & name) : name_(name), type_(std::move(type)) {} @@ -54,13 +54,13 @@ class variable return name_; } - inline const jlm::rvsdg::type & + inline const jlm::rvsdg::Type & type() const noexcept { return *type_; } - inline const std::shared_ptr + inline const std::shared_ptr Type() const noexcept { return type_; @@ -68,7 +68,7 @@ class variable private: std::string name_; - std::shared_ptr type_; + std::shared_ptr type_; }; template @@ -89,7 +89,7 @@ class gblvariable : public variable public: virtual ~gblvariable(); - inline gblvariable(std::shared_ptr type, const std::string & name) + inline gblvariable(std::shared_ptr type, const std::string & name) : variable(std::move(type), name) {} }; diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index e439d1d05..8587cd7ae 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -17,7 +17,7 @@ namespace jlm::llvm::aa * @return true if pointees should be tracked for all values of the given type, otherwise false */ bool -IsOrContainsPointerType(const rvsdg::type & type) +IsOrContainsPointerType(const rvsdg::Type & type) { return IsOrContains(type); } diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 8309bd82c..f2aeae8f3 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -634,7 +634,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownPhi(const phi::node & phiNode) } else { - JLM_UNREACHABLE("Unhandled node type!"); + JLM_UNREACHABLE("Unhandled node Type!"); } } diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index df6781d0d..2e9f132f8 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -465,7 +465,7 @@ JlmToMlirConverter::ConvertGamma( } ::mlir::Type -JlmToMlirConverter::ConvertType(const rvsdg::type & type) +JlmToMlirConverter::ConvertType(const rvsdg::Type & type) { if (auto bt = dynamic_cast(&type)) { diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index 47aaf09e8..c4c0d6f7a 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -170,7 +170,7 @@ class JlmToMlirConverter final * \result The corresponding MLIR RVSDG type. */ ::mlir::Type - ConvertType(const rvsdg::type & type); + ConvertType(const rvsdg::Type & type); std::unique_ptr<::mlir::OpBuilder> Builder_; std::unique_ptr<::mlir::MLIRContext> Context_; diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index 2787cf64b..c49a35873 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -419,12 +419,12 @@ MlirToJlmConverter::ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::Region // Create the RVSDG function signature auto lambdaRefType = ::mlir::cast<::mlir::rvsdg::LambdaRefType>(result); - std::vector> argumentTypes; + std::vector> argumentTypes; for (auto argumentType : lambdaRefType.getParameterTypes()) { argumentTypes.push_back(ConvertType(argumentType)); } - std::vector> resultTypes; + std::vector> resultTypes; for (auto returnType : lambdaRefType.getReturnTypes()) { resultTypes.push_back(ConvertType(returnType)); @@ -447,7 +447,7 @@ MlirToJlmConverter::ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::Region return rvsdgLambda; } -std::unique_ptr +std::unique_ptr MlirToJlmConverter::ConvertType(::mlir::Type & type) { if (auto ctrlType = ::mlir::dyn_cast<::mlir::rvsdg::RVSDG_CTRLType>(type)) diff --git a/jlm/mlir/frontend/MlirToJlmConverter.hpp b/jlm/mlir/frontend/MlirToJlmConverter.hpp index fb8d36e5e..c4a9b6fa7 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.hpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.hpp @@ -170,7 +170,7 @@ class MlirToJlmConverter final * \param type The MLIR type to be converted. * \result The converted RVSDG type. */ - static std::unique_ptr + static std::unique_ptr ConvertType(::mlir::Type & type); std::unique_ptr<::mlir::MLIRContext> Context_; diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index 888be2bcd..234e0c266 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -135,8 +135,8 @@ class binary_op : public simple_op virtual ~binary_op() noexcept; inline binary_op( - const std::vector> operands, - std::shared_ptr result) + const std::vector> operands, + std::shared_ptr result) : simple_op(std::move(operands), { std::move(result) }) {} diff --git a/jlm/rvsdg/bitstring/slice.hpp b/jlm/rvsdg/bitstring/slice.hpp index dcbace9ba..4c5e5734f 100644 --- a/jlm/rvsdg/bitstring/slice.hpp +++ b/jlm/rvsdg/bitstring/slice.hpp @@ -54,7 +54,7 @@ class bitslice_op : public jlm::rvsdg::unary_op virtual std::unique_ptr copy() const override; - inline const type & + inline const Type & argument_type() const noexcept { return *std::static_pointer_cast(argument(0)); diff --git a/jlm/rvsdg/bitstring/type.cpp b/jlm/rvsdg/bitstring/type.cpp index 5bc2483e8..7a8a9cb67 100644 --- a/jlm/rvsdg/bitstring/type.cpp +++ b/jlm/rvsdg/bitstring/type.cpp @@ -23,7 +23,7 @@ bittype::debug_string() const } bool -bittype::operator==(const jlm::rvsdg::type & other) const noexcept +bittype::operator==(const jlm::rvsdg::Type & other) const noexcept { auto type = dynamic_cast(&other); return type != nullptr && this->nbits() == type->nbits(); diff --git a/jlm/rvsdg/bitstring/type.hpp b/jlm/rvsdg/bitstring/type.hpp index 22be1f218..8091ff45b 100644 --- a/jlm/rvsdg/bitstring/type.hpp +++ b/jlm/rvsdg/bitstring/type.hpp @@ -34,7 +34,7 @@ class bittype final : public jlm::rvsdg::valuetype debug_string() const override; virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index e2a639877..3272e79d0 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -33,7 +33,7 @@ ctltype::debug_string() const } bool -ctltype::operator==(const jlm::rvsdg::type & other) const noexcept +ctltype::operator==(const jlm::rvsdg::Type & other) const noexcept { auto type = dynamic_cast(&other); return type && type->nalternatives_ == nalternatives_; diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index 26c9f52c7..9c27174da 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -33,7 +33,7 @@ class ctltype final : public jlm::rvsdg::statetype debug_string() const override; virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept override; + operator==(const jlm::rvsdg::Type & other) const noexcept override; std::size_t ComputeHash() const noexcept override; @@ -63,7 +63,7 @@ class ctltype final : public jlm::rvsdg::statetype }; static inline bool -is_ctltype(const jlm::rvsdg::type & type) noexcept +is_ctltype(const jlm::rvsdg::Type & type) noexcept { return dynamic_cast(&type) != nullptr; } @@ -224,7 +224,7 @@ class match_op final : public jlm::rvsdg::unary_op private: static const bittype & - CheckAndExtractBitType(const rvsdg::type & type) + CheckAndExtractBitType(const rvsdg::Type & type) { if (auto bitType = dynamic_cast(&type)) { diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 23a8b51a0..2f79215f5 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -66,7 +66,7 @@ class gamma_normal_form final : public structural_normal_form /* gamma operation */ class output; -class type; +class Type; class GammaOperation final : public structural_op { @@ -325,7 +325,7 @@ class GammaInput final : public structural_input ~GammaInput() noexcept override; private: - GammaInput(GammaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) + GammaInput(GammaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : structural_input(node, origin, std::move(type)) {} @@ -385,7 +385,7 @@ class GammaOutput final : public structural_output public: ~GammaOutput() noexcept override; - GammaOutput(GammaNode * node, std::shared_ptr type) + GammaOutput(GammaNode * node, std::shared_ptr type) : structural_output(node, std::move(type)) {} diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 4abd6a91a..9391d029f 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -16,7 +16,7 @@ namespace jlm::rvsdg GraphImport::GraphImport( rvsdg::graph & graph, - std::shared_ptr type, + std::shared_ptr type, std::string name) : RegionArgument(graph.root(), nullptr, std::move(type)), Name_(std::move(name)) diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index 0e87f377d..fb46355c7 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -28,7 +28,7 @@ namespace jlm::rvsdg class GraphImport : public RegionArgument { protected: - GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name); + GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name); public: [[nodiscard]] const std::string & diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 45d562f7f..7f795d232 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -24,7 +24,7 @@ input::~input() noexcept input::input( jlm::rvsdg::output * origin, rvsdg::Region * region, - std::shared_ptr type) + std::shared_ptr type) : index_(0), origin_(origin), region_(region), @@ -83,7 +83,7 @@ output::~output() noexcept JLM_ASSERT(nusers() == 0); } -output::output(rvsdg::Region * region, std::shared_ptr type) +output::output(rvsdg::Region * region, std::shared_ptr type) : index_(0), region_(region), Type_(std::move(type)) @@ -149,14 +149,14 @@ namespace jlm::rvsdg node_input::node_input( jlm::rvsdg::output * origin, jlm::rvsdg::node * node, - std::shared_ptr type) + std::shared_ptr type) : jlm::rvsdg::input(origin, node->region(), std::move(type)), node_(node) {} /* node_output class */ -node_output::node_output(jlm::rvsdg::node * node, std::shared_ptr type) +node_output::node_output(jlm::rvsdg::node * node, std::shared_ptr type) : jlm::rvsdg::output(node->region(), std::move(type)), node_(node) {} diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 9022fcaab..d6c7a94c6 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -43,7 +43,7 @@ class input input( jlm::rvsdg::output * origin, rvsdg::Region * region, - std::shared_ptr type); + std::shared_ptr type); input(const input &) = delete; @@ -70,13 +70,13 @@ class input void divert_to(jlm::rvsdg::output * new_origin); - [[nodiscard]] const rvsdg::type & + [[nodiscard]] const rvsdg::Type & type() const noexcept { return *Type(); } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return Type_; @@ -265,7 +265,7 @@ class input size_t index_; jlm::rvsdg::output * origin_; rvsdg::Region * region_; - std::shared_ptr Type_; + std::shared_ptr Type_; }; template @@ -292,7 +292,7 @@ class output public: virtual ~output() noexcept; - output(rvsdg::Region * region, std::shared_ptr type); + output(rvsdg::Region * region, std::shared_ptr type); output(const output &) = delete; @@ -353,13 +353,13 @@ class output return users_.end(); } - [[nodiscard]] const rvsdg::type & + [[nodiscard]] const rvsdg::Type & type() const noexcept { return *Type(); } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return Type_; @@ -543,7 +543,7 @@ class output size_t index_; rvsdg::Region * region_; - std::shared_ptr Type_; + std::shared_ptr Type_; std::unordered_set users_; }; @@ -566,7 +566,7 @@ class node_input : public jlm::rvsdg::input node_input( jlm::rvsdg::output * origin, jlm::rvsdg::node * node, - std::shared_ptr type); + std::shared_ptr type); jlm::rvsdg::node * node() const noexcept @@ -583,7 +583,7 @@ class node_input : public jlm::rvsdg::input class node_output : public jlm::rvsdg::output { public: - node_output(jlm::rvsdg::node * node, std::shared_ptr type); + node_output(jlm::rvsdg::node * node, std::shared_ptr type); jlm::rvsdg::node * node() const noexcept diff --git a/jlm/rvsdg/nullary.hpp b/jlm/rvsdg/nullary.hpp index bcd07d670..c5202a503 100644 --- a/jlm/rvsdg/nullary.hpp +++ b/jlm/rvsdg/nullary.hpp @@ -25,7 +25,7 @@ class nullary_op : public simple_op public: virtual ~nullary_op() noexcept; - inline explicit nullary_op(std::shared_ptr result) + inline explicit nullary_op(std::shared_ptr result) : simple_op({}, { std::move(result) }) {} }; diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index 778d20090..348b4da2f 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -31,7 +31,7 @@ simple_op::narguments() const noexcept return operands_.size(); } -const std::shared_ptr & +const std::shared_ptr & simple_op::argument(size_t index) const noexcept { JLM_ASSERT(index < narguments()); @@ -44,7 +44,7 @@ simple_op::nresults() const noexcept return results_.size(); } -const std::shared_ptr & +const std::shared_ptr & simple_op::result(size_t index) const noexcept { JLM_ASSERT(index < nresults()); diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index 9dd28f62c..9427bf65e 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -67,8 +67,8 @@ class simple_op : public operation virtual ~simple_op(); simple_op( - std::vector> operands, - std::vector> results) + std::vector> operands, + std::vector> results) : operands_(std::move(operands)), results_(std::move(results)) {} @@ -76,21 +76,21 @@ class simple_op : public operation size_t narguments() const noexcept; - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & argument(size_t index) const noexcept; size_t nresults() const noexcept; - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & result(size_t index) const noexcept; static jlm::rvsdg::simple_normal_form * normal_form(jlm::rvsdg::graph * graph) noexcept; private: - std::vector> operands_; - std::vector> results_; + std::vector> operands_; + std::vector> results_; }; /* structural operation */ diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 7cb3ab5d6..064e5d122 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -25,7 +25,7 @@ RegionArgument::~RegionArgument() noexcept RegionArgument::RegionArgument( rvsdg::Region * region, jlm::rvsdg::structural_input * input, - std::shared_ptr type) + std::shared_ptr type) : output(region, std::move(type)), input_(input) { @@ -55,7 +55,7 @@ RegionResult::RegionResult( rvsdg::Region * region, jlm::rvsdg::output * origin, jlm::rvsdg::structural_output * output, - std::shared_ptr type) + std::shared_ptr type) : input(origin, region, std::move(type)), output_(output) { diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 851c293cc..18cbad422 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -54,7 +54,7 @@ class RegionArgument : public output RegionArgument( rvsdg::Region * region, structural_input * input, - std::shared_ptr type); + std::shared_ptr type); public: RegionArgument(const RegionArgument &) = delete; @@ -112,7 +112,7 @@ class RegionResult : public input rvsdg::Region * region, rvsdg::output * origin, structural_output * output, - std::shared_ptr type); + std::shared_ptr type); public: RegionResult(const RegionResult &) = delete; diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 4a742a97b..0fe1cb826 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -21,7 +21,7 @@ simple_input::~simple_input() noexcept simple_input::simple_input( jlm::rvsdg::simple_node * node, jlm::rvsdg::output * origin, - std::shared_ptr type) + std::shared_ptr type) : node_input(origin, node, std::move(type)) {} @@ -29,7 +29,7 @@ simple_input::simple_input( simple_output::simple_output( jlm::rvsdg::simple_node * node, - std::shared_ptr type) + std::shared_ptr type) : node_output(node, std::move(type)) {} diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index 1d7a41ae0..40de0070f 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -79,7 +79,7 @@ class simple_input final : public node_input simple_input( simple_node * node, jlm::rvsdg::output * origin, - std::shared_ptr type); + std::shared_ptr type); public: simple_node * @@ -98,7 +98,7 @@ class simple_output final : public node_output public: virtual ~simple_output() noexcept; - simple_output(jlm::rvsdg::simple_node * node, std::shared_ptr type); + simple_output(jlm::rvsdg::simple_node * node, std::shared_ptr type); public: simple_node * diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index 52d4f9695..a1ce62f89 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -93,7 +93,7 @@ is_mux_op(const jlm::rvsdg::operation & op) static inline std::vector create_state_mux( - std::shared_ptr type, + std::shared_ptr type, const std::vector & operands, size_t nresults) { @@ -111,7 +111,7 @@ create_state_mux( static inline jlm::rvsdg::output * create_state_merge( - std::shared_ptr type, + std::shared_ptr type, const std::vector & operands) { return create_state_mux(std::move(type), operands, 1)[0]; @@ -119,7 +119,7 @@ create_state_merge( static inline std::vector create_state_split( - std::shared_ptr type, + std::shared_ptr type, jlm::rvsdg::output * operand, size_t nresults) { diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index 99ac1518d..99913151f 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -23,7 +23,7 @@ structural_input::~structural_input() noexcept structural_input::structural_input( jlm::rvsdg::structural_node * node, jlm::rvsdg::output * origin, - std::shared_ptr type) + std::shared_ptr type) : node_input(origin, node, std::move(type)) { on_input_create(this); @@ -40,7 +40,7 @@ structural_output::~structural_output() noexcept structural_output::structural_output( jlm::rvsdg::structural_node * node, - std::shared_ptr type) + std::shared_ptr type) : node_output(node, std::move(type)) { on_output_create(this); diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index ce0d6c937..77c0c8fa3 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -79,13 +79,13 @@ class structural_input : public node_input structural_input( jlm::rvsdg::structural_node * node, jlm::rvsdg::output * origin, - std::shared_ptr type); + std::shared_ptr type); static structural_input * create( structural_node * node, jlm::rvsdg::output * origin, - std::shared_ptr type) + std::shared_ptr type) { auto input = std::make_unique(node, origin, std::move(type)); return node->append_input(std::move(input)); @@ -112,10 +112,10 @@ class structural_output : public node_output public: virtual ~structural_output() noexcept; - structural_output(jlm::rvsdg::structural_node * node, std::shared_ptr type); + structural_output(jlm::rvsdg::structural_node * node, std::shared_ptr type); static structural_output * - create(structural_node * node, std::shared_ptr type) + create(structural_node * node, std::shared_ptr type) { auto output = std::make_unique(node, std::move(type)); return node->append_output(std::move(output)); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 1d00216ae..41e62a003 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -255,7 +255,7 @@ class ThetaInput final : public structural_input public: ~ThetaInput() noexcept override; - ThetaInput(ThetaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) + ThetaInput(ThetaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : structural_input(node, origin, std::move(type)), output_(nullptr) {} @@ -300,7 +300,7 @@ class ThetaOutput final : public structural_output public: ~ThetaOutput() noexcept override; - ThetaOutput(ThetaNode * node, const std::shared_ptr type) + ThetaOutput(ThetaNode * node, const std::shared_ptr type) : structural_output(node, std::move(type)), input_(nullptr) {} diff --git a/jlm/rvsdg/type.cpp b/jlm/rvsdg/type.cpp index e7816d7f8..760b920fa 100644 --- a/jlm/rvsdg/type.cpp +++ b/jlm/rvsdg/type.cpp @@ -9,7 +9,7 @@ namespace jlm::rvsdg { -type::~type() noexcept +Type::~Type() noexcept {} valuetype::~valuetype() noexcept diff --git a/jlm/rvsdg/type.hpp b/jlm/rvsdg/type.hpp index 06e452095..6cc3774ac 100644 --- a/jlm/rvsdg/type.hpp +++ b/jlm/rvsdg/type.hpp @@ -13,21 +13,21 @@ namespace jlm::rvsdg { -class type +class Type { public: - virtual ~type() noexcept; + virtual ~Type() noexcept; protected: - inline constexpr type() noexcept + inline constexpr Type() noexcept {} public: virtual bool - operator==(const jlm::rvsdg::type & other) const noexcept = 0; + operator==(const jlm::rvsdg::Type & other) const noexcept = 0; inline bool - operator!=(const jlm::rvsdg::type & other) const noexcept + operator!=(const jlm::rvsdg::Type & other) const noexcept { return !(*this == other); } @@ -44,46 +44,46 @@ class type ComputeHash() const noexcept = 0; }; -class valuetype : public jlm::rvsdg::type +class valuetype : public jlm::rvsdg::Type { public: virtual ~valuetype() noexcept; protected: inline constexpr valuetype() noexcept - : jlm::rvsdg::type() + : jlm::rvsdg::Type() {} }; -class statetype : public jlm::rvsdg::type +class statetype : public jlm::rvsdg::Type { public: virtual ~statetype() noexcept; protected: inline constexpr statetype() noexcept - : jlm::rvsdg::type() + : jlm::rvsdg::Type() {} }; template static inline bool -is(const jlm::rvsdg::type & type) noexcept +is(const jlm::rvsdg::Type & type) noexcept { static_assert( - std::is_base_of::value, - "Template parameter T must be derived from jlm::rvsdg::type."); + std::is_base_of::value, + "Template parameter T must be derived from jlm::rvsdg::Type."); return dynamic_cast(&type) != nullptr; } template static inline bool -is(const std::shared_ptr & type) noexcept +is(const std::shared_ptr & type) noexcept { static_assert( - std::is_base_of::value, - "Template parameter T must be derived from jlm::rvsdg::type."); + std::is_base_of::value, + "Template parameter T must be derived from jlm::rvsdg::Type."); return dynamic_cast(type.get()) != nullptr; } diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index 02fb00c48..f2cb3f39b 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -60,8 +60,8 @@ class unary_op : public simple_op virtual ~unary_op() noexcept; inline unary_op( - std::shared_ptr operand, - std::shared_ptr result) + std::shared_ptr operand, + std::shared_ptr result) : simple_op({ std::move(operand) }, { std::move(result) }) {} diff --git a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp index f797bfc87..5d5255afa 100644 --- a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp @@ -57,13 +57,13 @@ SetupFunctionWithThreeAddressCode(const jlm::rvsdg::simple_op & operation) auto ipgModule = ipgraph_module::create(jlm::util::filepath(""), "", ""); auto & ipgraph = ipgModule->ipgraph(); - std::vector> operandTypes; + std::vector> operandTypes; for (size_t n = 0; n < operation.narguments(); n++) { operandTypes.emplace_back(operation.argument(n)); } - std::vector> resultTypes; + std::vector> resultTypes; for (size_t n = 0; n < operation.nresults(); n++) { resultTypes.emplace_back(operation.result(n)); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 46c6f785f..7ff50a3b3 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -215,7 +215,7 @@ structural_node::AddInputWithArguments(rvsdg::output & origin) } StructuralNodeOutput & -structural_node::AddOutput(std::shared_ptr type) +structural_node::AddOutput(std::shared_ptr type) { auto output = std::unique_ptr(new StructuralNodeOutput(*this, std::move(type))); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 283d6d098..6a031f5e3 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -26,7 +26,7 @@ namespace jlm::tests */ class GraphImport final : public rvsdg::GraphImport { - GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name) + GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name) : rvsdg::GraphImport(graph, std::move(type), std::move(name)) {} @@ -35,7 +35,7 @@ class GraphImport final : public rvsdg::GraphImport Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; static GraphImport & - Create(rvsdg::graph & graph, std::shared_ptr type, std::string name) + Create(rvsdg::graph & graph, std::shared_ptr type, std::string name) { auto graphImport = new GraphImport(graph, std::move(type), std::move(name)); graph.root()->append_argument(graphImport); @@ -74,8 +74,8 @@ class unary_op final : public rvsdg::unary_op virtual ~unary_op() noexcept; inline unary_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) noexcept + std::shared_ptr srctype, + std::shared_ptr dsttype) noexcept : rvsdg::unary_op(std::move(srctype), std::move(dsttype)) {} @@ -97,9 +97,9 @@ class unary_op final : public rvsdg::unary_op static inline rvsdg::node * create( rvsdg::Region * region, - std::shared_ptr srctype, + std::shared_ptr srctype, rvsdg::output * operand, - std::shared_ptr dsttype) + std::shared_ptr dsttype) { return rvsdg::simple_node::create( region, @@ -109,9 +109,9 @@ class unary_op final : public rvsdg::unary_op static inline rvsdg::output * create_normalized( - std::shared_ptr srctype, + std::shared_ptr srctype, rvsdg::output * operand, - std::shared_ptr dsttype) + std::shared_ptr dsttype) { unary_op op(std::move(srctype), std::move(dsttype)); return rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; @@ -138,8 +138,8 @@ class binary_op final : public rvsdg::binary_op virtual ~binary_op() noexcept; inline binary_op( - const std::shared_ptr & srctype, - std::shared_ptr dsttype, + const std::shared_ptr & srctype, + std::shared_ptr dsttype, const enum rvsdg::binary_op::flags & flags) noexcept : rvsdg::binary_op({ srctype, srctype }, std::move(dsttype)), flags_(flags) @@ -167,8 +167,8 @@ class binary_op final : public rvsdg::binary_op static inline rvsdg::node * create( - const std::shared_ptr & srctype, - std::shared_ptr dsttype, + const std::shared_ptr & srctype, + std::shared_ptr dsttype, rvsdg::output * op1, rvsdg::output * op2) { @@ -178,8 +178,8 @@ class binary_op final : public rvsdg::binary_op static inline rvsdg::output * create_normalized( - const std::shared_ptr srctype, - std::shared_ptr dsttype, + const std::shared_ptr srctype, + std::shared_ptr dsttype, rvsdg::output * op1, rvsdg::output * op2) { @@ -227,7 +227,7 @@ class structural_node final : public rvsdg::structural_node AddInputWithArguments(rvsdg::output & origin); StructuralNodeOutput & - AddOutput(std::shared_ptr type); + AddOutput(std::shared_ptr type); StructuralNodeOutput & AddOutputWithResults(const std::vector & origins); @@ -253,7 +253,7 @@ class StructuralNodeInput final : public rvsdg::structural_input StructuralNodeInput( structural_node & node, rvsdg::output & origin, - std::shared_ptr type) + std::shared_ptr type) : rvsdg::structural_input(&node, &origin, std::move(type)) {} @@ -288,7 +288,7 @@ class StructuralNodeOutput final : public rvsdg::structural_output ~StructuralNodeOutput() noexcept override; private: - StructuralNodeOutput(structural_node & node, std::shared_ptr type) + StructuralNodeOutput(structural_node & node, std::shared_ptr type) : rvsdg::structural_output(&node, std::move(type)) {} }; @@ -307,7 +307,7 @@ class StructuralNodeArgument final : public rvsdg::RegionArgument StructuralNodeArgument( rvsdg::Region & region, StructuralNodeInput * input, - std::shared_ptr type) + std::shared_ptr type) : rvsdg::RegionArgument(®ion, input, std::move(type)) {} @@ -320,7 +320,7 @@ class StructuralNodeArgument final : public rvsdg::RegionArgument } static StructuralNodeArgument & - Create(rvsdg::Region & region, std::shared_ptr type) + Create(rvsdg::Region & region, std::shared_ptr type) { auto argument = new StructuralNodeArgument(region, nullptr, std::move(type)); region.append_argument(argument); @@ -366,8 +366,8 @@ class test_op final : public rvsdg::simple_op virtual ~test_op(); inline test_op( - std::vector> arguments, - std::vector> results) + std::vector> arguments, + std::vector> results) : simple_op(std::move(arguments), std::move(results)) {} @@ -386,9 +386,9 @@ class test_op final : public rvsdg::simple_op create( rvsdg::Region * region, const std::vector & operands, - std::vector> result_types) + std::vector> result_types) { - std::vector> operand_types; + std::vector> operand_types; for (const auto & operand : operands) operand_types.push_back(operand->Type()); @@ -399,9 +399,9 @@ class test_op final : public rvsdg::simple_op static rvsdg::simple_node * Create( rvsdg::Region * region, - std::vector> operandTypes, + std::vector> operandTypes, const std::vector & operands, - std::vector> resultTypes) + std::vector> resultTypes) { test_op op(std::move(operandTypes), std::move(resultTypes)); return rvsdg::simple_node::create(region, op, { operands }); @@ -427,7 +427,7 @@ class SimpleNode final : public rvsdg::simple_node Create( rvsdg::Region & region, const std::vector & operands, - std::vector> resultTypes) + std::vector> resultTypes) { auto operandTypes = ExtractTypes(operands); test_op operation(std::move(operandTypes), std::move(resultTypes)); @@ -437,10 +437,10 @@ class SimpleNode final : public rvsdg::simple_node } private: - static std::vector> + static std::vector> ExtractTypes(const std::vector & outputs) { - std::vector> types; + std::vector> types; types.reserve(outputs.size()); for (auto output : outputs) { @@ -454,9 +454,9 @@ class SimpleNode final : public rvsdg::simple_node static inline std::unique_ptr create_testop_tac( const std::vector & arguments, - std::vector> result_types) + std::vector> result_types) { - std::vector> argument_types; + std::vector> argument_types; for (const auto & arg : arguments) argument_types.push_back(arg->Type()); @@ -468,9 +468,9 @@ static inline std::vector create_testop( rvsdg::Region * region, const std::vector & operands, - std::vector> result_types) + std::vector> result_types) { - std::vector> operand_types; + std::vector> operand_types; for (const auto & operand : operands) operand_types.push_back(operand->Type()); @@ -484,7 +484,7 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument TestGraphArgument( rvsdg::Region & region, jlm::rvsdg::structural_input * input, - std::shared_ptr type) + std::shared_ptr type) : jlm::rvsdg::RegionArgument(®ion, input, type) {} @@ -499,7 +499,7 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument Create( rvsdg::Region & region, jlm::rvsdg::structural_input * input, - std::shared_ptr type) + std::shared_ptr type) { auto graphArgument = new TestGraphArgument(region, input, std::move(type)); region.append_argument(graphArgument); diff --git a/tests/test-types.cpp b/tests/test-types.cpp index 9ef6319b0..16487b222 100644 --- a/tests/test-types.cpp +++ b/tests/test-types.cpp @@ -22,7 +22,7 @@ valuetype::debug_string() const } bool -valuetype::operator==(const rvsdg::type & other) const noexcept +valuetype::operator==(const rvsdg::Type & other) const noexcept { return dynamic_cast(&other) != nullptr; } @@ -52,7 +52,7 @@ statetype::debug_string() const } bool -statetype::operator==(const rvsdg::type & other) const noexcept +statetype::operator==(const rvsdg::Type & other) const noexcept { return dynamic_cast(&other) != nullptr; } diff --git a/tests/test-types.hpp b/tests/test-types.hpp index 7fbb02dab..b45db75a6 100644 --- a/tests/test-types.hpp +++ b/tests/test-types.hpp @@ -24,7 +24,7 @@ class valuetype final : public rvsdg::valuetype debug_string() const override; virtual bool - operator==(const rvsdg::type & other) const noexcept override; + operator==(const rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; @@ -46,7 +46,7 @@ class statetype final : public rvsdg::statetype debug_string() const override; virtual bool - operator==(const rvsdg::type & other) const noexcept override; + operator==(const rvsdg::Type & other) const noexcept override; [[nodiscard]] std::size_t ComputeHash() const noexcept override; From 726c6309a5cc5c24255577be663555fd14c8dee9 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 30 Sep 2024 19:15:05 +0200 Subject: [PATCH 094/170] Bumps hls-suite that now checks cycle count (#639) --- scripts/run-hls-test.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index 0d84012a5..f9278f2c5 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=51d327e20c42eebe3578d4b1ad0950e4ab389c2d +GIT_COMMIT=d0bb58feb2432aefbc65364e10fded264c024fd8 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" From 99fe50ba5c67f455559c9167a1a3c3d2599850b7 Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Tue, 1 Oct 2024 21:25:05 +0200 Subject: [PATCH 095/170] Speed up make rule for clang-format (#643) --- Makefile.rules | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/Makefile.rules b/Makefile.rules index 3c3151e7e..04c7572a7 100644 --- a/Makefile.rules +++ b/Makefile.rules @@ -179,11 +179,7 @@ docs: .PHONY # Clang format rules format: - @for FILE in $(SOURCES) $(HEADERS) ; do \ - clang-format-$(LLVM_VERSION) --Werror --style="file:.clang-format" --verbose -i $$FILE ;\ - done + clang-format-$(LLVM_VERSION) --Werror --style="file:.clang-format" --verbose -i $(SOURCES) $(HEADERS) format-dry-run: - @for FILE in $(SOURCES) $(HEADERS) ; do \ - clang-format-$(LLVM_VERSION) --dry-run --Werror --style="file:.clang-format" --verbose -i $$FILE || exit 1 ;\ - done + clang-format-$(LLVM_VERSION) --dry-run --Werror --style="file:.clang-format" --verbose -i $(SOURCES) $(HEADERS) From 8e11f8c9f43becb13e3bab93b97ad23e2dd59356 Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Tue, 1 Oct 2024 22:02:06 +0200 Subject: [PATCH 096/170] add GetNode method to output (symmetric to input) (#644) --- jlm/rvsdg/node.cpp | 7 +++++++ jlm/rvsdg/node.hpp | 11 +++++++++++ 2 files changed, 18 insertions(+) diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 7f795d232..2093efee8 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -95,6 +95,13 @@ output::debug_string() const return jlm::util::strfmt(index()); } +rvsdg::node * +output::GetNode(const rvsdg::output & output) noexcept +{ + auto nodeOutput = dynamic_cast(&output); + return nodeOutput ? nodeOutput->node() : nullptr; +} + void output::remove_user(jlm::rvsdg::input * user) { diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index d6c7a94c6..971da855f 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -374,6 +374,17 @@ class output virtual std::string debug_string() const; + /** + * Retrieve the associated node from \p output if \p output is derived from + * jlm::rvsdg::node_output. + * + * @param output The output from which to retrieve the node. + * @return The node associated with \p output if output is derived from jlm::rvsdg::node_output, + * otherwise nullptr. + */ + [[nodiscard]] static rvsdg::node * + GetNode(const rvsdg::output & output) noexcept; + template class iterator { From f1b2f15fcb5dbe3ca652748c17e3ff735f8ac47f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 2 Oct 2024 19:18:17 +0200 Subject: [PATCH 097/170] Rename valuetype class to ValueType (#642) Co-authored-by: HKrogstie --- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 +- jlm/hls/ir/hls.cpp | 4 +- jlm/hls/ir/hls.hpp | 94 +++++++++++------------ jlm/llvm/backend/jlm2llvm/instruction.cpp | 4 +- jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 4 +- jlm/llvm/backend/jlm2llvm/type.cpp | 4 +- jlm/llvm/frontend/LlvmTypeConversion.cpp | 20 ++--- jlm/llvm/frontend/LlvmTypeConversion.hpp | 2 +- jlm/llvm/ir/RvsdgModule.hpp | 8 +- jlm/llvm/ir/attribute.hpp | 6 +- jlm/llvm/ir/ipgraph.hpp | 8 +- jlm/llvm/ir/operators/GetElementPtr.hpp | 12 +-- jlm/llvm/ir/operators/Load.hpp | 26 +++---- jlm/llvm/ir/operators/Store.hpp | 32 ++++---- jlm/llvm/ir/operators/alloca.hpp | 12 +-- jlm/llvm/ir/operators/delta.hpp | 14 ++-- jlm/llvm/ir/operators/operators.hpp | 50 ++++++------ jlm/llvm/ir/types.cpp | 4 +- jlm/llvm/ir/types.hpp | 56 +++++++------- jlm/rvsdg/bitstring/type.hpp | 2 +- jlm/rvsdg/type.cpp | 6 +- jlm/rvsdg/type.hpp | 6 +- tests/jlm/llvm/ir/TestTypes.cpp | 6 +- tests/test-types.hpp | 4 +- 25 files changed, 195 insertions(+), 197 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index a912d8edf..6458dbed0 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -747,7 +747,7 @@ jlm::hls::ConnectRequestResponseMemPorts( // nodes in the new lambda // std::vector loadNodes; - std::vector> loadTypes; + std::vector> loadTypes; for (auto loadNode : originalLoadNodes) { JLM_ASSERT(smap.contains(*loadNode->output(0))); @@ -788,7 +788,7 @@ jlm::hls::ConnectRequestResponseMemPorts( auto replacement = ReplaceLoad(smap, originalLoadNodes[i], routed); auto address = route_request(lambdaRegion, replacement->output(replacement->noutputs() - 1)); loadAddresses.push_back(address); - std::shared_ptr type; + std::shared_ptr type; if (auto loadOperation = dynamic_cast(&replacement->operation())) { type = loadOperation->GetLoadedType(); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 641965121..45cc53947 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -195,7 +195,7 @@ convert_alloca(rvsdg::Region * region) std::cout << "alloca " << delta_name << ": " << po->value_type().debug_string() << "\n"; auto db = llvm::delta::node::Create( rr, - std::static_pointer_cast(po->ValueType()), + std::static_pointer_cast(po->ValueType()), delta_name, llvm::linkage::external_linkage, "", @@ -254,7 +254,7 @@ rename_delta(llvm::delta::node * odn) std::cout << "renaming delta node " << odn->name() << " to " << name << "\n"; auto db = llvm::delta::node::Create( odn->region(), - std::static_pointer_cast(odn->Type()), + std::static_pointer_cast(odn->Type()), name, llvm::linkage::external_linkage, "", diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 2be0b8c4d..95be4bd1b 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -192,7 +192,7 @@ loop_node::set_predicate(jlm::rvsdg::output * p) } std::shared_ptr -get_mem_req_type(std::shared_ptr elementType, bool write) +get_mem_req_type(std::shared_ptr elementType, bool write) { std::vector>> elements; elements.emplace_back("addr", llvm::PointerType::Create()); @@ -207,7 +207,7 @@ get_mem_req_type(std::shared_ptr elementType, bool write } std::shared_ptr -get_mem_res_type(std::shared_ptr dataType) +get_mem_res_type(std::shared_ptr dataType) { std::vector>> elements; elements.emplace_back("data", std::move(dataType)); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index edf71b3c7..6072e6844 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -796,7 +796,7 @@ class loop_node final : public jlm::rvsdg::structural_node copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; }; -class bundletype final : public jlm::rvsdg::valuetype +class bundletype final : public jlm::rvsdg::ValueType { public: ~bundletype() @@ -804,7 +804,7 @@ class bundletype final : public jlm::rvsdg::valuetype bundletype( const std::vector>> elements) - : jlm::rvsdg::valuetype(), + : jlm::rvsdg::ValueType(), elements_(std::move(elements)) {} @@ -867,10 +867,10 @@ class bundletype final : public jlm::rvsdg::valuetype }; std::shared_ptr -get_mem_req_type(std::shared_ptr elementType, bool write); +get_mem_req_type(std::shared_ptr elementType, bool write); std::shared_ptr -get_mem_res_type(std::shared_ptr dataType); +get_mem_res_type(std::shared_ptr dataType); class load_op final : public jlm::rvsdg::simple_op { @@ -878,7 +878,7 @@ class load_op final : public jlm::rvsdg::simple_op virtual ~load_op() {} - load_op(const std::shared_ptr & pointeeType, size_t numStates) + load_op(const std::shared_ptr & pointeeType, size_t numStates) : simple_op(CreateInTypes(pointeeType, numStates), CreateOutTypes(pointeeType, numStates)) {} @@ -892,7 +892,7 @@ class load_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateInTypes(std::shared_ptr pointeeType, size_t numStates) + CreateInTypes(std::shared_ptr pointeeType, size_t numStates) { std::vector> types( 1, @@ -906,7 +906,7 @@ class load_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateOutTypes(std::shared_ptr pointeeType, size_t numStates) + CreateOutTypes(std::shared_ptr pointeeType, size_t numStates) { std::vector> types(1, std::move(pointeeType)); std::vector> states( @@ -937,7 +937,7 @@ class load_op final : public jlm::rvsdg::simple_op { auto region = addr.region(); load_op op( - std::dynamic_pointer_cast(load_result.Type()), + std::dynamic_pointer_cast(load_result.Type()), states.size()); std::vector inputs; inputs.push_back(&addr); @@ -952,10 +952,10 @@ class load_op final : public jlm::rvsdg::simple_op return *util::AssertedCast(argument(0).get()); } - [[nodiscard]] std::shared_ptr + [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - return std::dynamic_pointer_cast(result(0)); + return std::dynamic_pointer_cast(result(0)); } }; @@ -1091,7 +1091,7 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op virtual ~decoupled_load_op() {} - decoupled_load_op(const std::shared_ptr & pointeeType) + decoupled_load_op(const std::shared_ptr & pointeeType) : simple_op(CreateInTypes(pointeeType), CreateOutTypes(pointeeType)) {} @@ -1104,7 +1104,7 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateInTypes(std::shared_ptr pointeeType) + CreateInTypes(std::shared_ptr pointeeType) { std::vector> types(1, llvm::PointerType::Create()); types.emplace_back(std::move(pointeeType)); // result @@ -1112,7 +1112,7 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateOutTypes(std::shared_ptr pointeeType) + CreateOutTypes(std::shared_ptr pointeeType) { std::vector> types(1, std::move(pointeeType)); types.emplace_back(llvm::PointerType::Create()); // addr @@ -1134,7 +1134,7 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op static std::vector create(jlm::rvsdg::output & addr, jlm::rvsdg::output & load_result) { - decoupled_load_op op(std::dynamic_pointer_cast(load_result.Type())); + decoupled_load_op op(std::dynamic_pointer_cast(load_result.Type())); std::vector inputs; inputs.push_back(&addr); inputs.push_back(&load_result); @@ -1147,10 +1147,10 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op return *util::AssertedCast(argument(0).get()); } - [[nodiscard]] std::shared_ptr + [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - return std::dynamic_pointer_cast(result(0)); + return std::dynamic_pointer_cast(result(0)); } }; @@ -1160,7 +1160,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op virtual ~mem_resp_op() {} - explicit mem_resp_op(const std::vector> & output_types) + explicit mem_resp_op(const std::vector> & output_types) : simple_op(CreateInTypes(output_types), CreateOutTypes(output_types)) {} @@ -1174,7 +1174,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateInTypes(const std::vector> & output_types) + CreateInTypes(const std::vector> & output_types) { size_t max_width = 64; // TODO: calculate size onece JlmSize is moved @@ -1189,7 +1189,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateOutTypes(const std::vector> & output_types) + CreateOutTypes(const std::vector> & output_types) { std::vector> types; types.reserve(output_types.size()); @@ -1215,7 +1215,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op static std::vector create( rvsdg::output & result, - const std::vector> & output_types) + const std::vector> & output_types) { auto region = result.region(); // TODO: verify port here @@ -1232,8 +1232,8 @@ class mem_req_op final : public jlm::rvsdg::simple_op virtual ~mem_req_op() = default; mem_req_op( - const std::vector> & load_types, - const std::vector> & store_types) + const std::vector> & load_types, + const std::vector> & store_types) : simple_op(CreateInTypes(load_types, store_types), CreateOutTypes(load_types, store_types)) { for (auto loadType : load_types) @@ -1262,8 +1262,8 @@ class mem_req_op final : public jlm::rvsdg::simple_op static std::vector> CreateInTypes( - const std::vector> & load_types, - const std::vector> & store_types) + const std::vector> & load_types, + const std::vector> & store_types) { std::vector> types; for (size_t i = 0; i < load_types.size(); i++) @@ -1280,8 +1280,8 @@ class mem_req_op final : public jlm::rvsdg::simple_op static std::vector> CreateOutTypes( - const std::vector> & load_types, - const std::vector> & store_types) + const std::vector> & load_types, + const std::vector> & store_types) { size_t max_width = 64; // TODO: fix once JlmSize is moved @@ -1315,18 +1315,18 @@ class mem_req_op final : public jlm::rvsdg::simple_op static std::vector create( const std::vector & load_operands, - const std::vector> & loadTypes, + const std::vector> & loadTypes, const std::vector & store_operands, rvsdg::Region * region) { // Stores have both addr and data operand // But we are only interested in the data operand type JLM_ASSERT(store_operands.size() % 2 == 0); - std::vector> storeTypes; + std::vector> storeTypes; for (size_t i = 1; i < store_operands.size(); i += 2) { storeTypes.push_back( - std::dynamic_pointer_cast(store_operands[i]->Type())); + std::dynamic_pointer_cast(store_operands[i]->Type())); } mem_req_op op(loadTypes, storeTypes); std::vector operands(load_operands); @@ -1363,7 +1363,7 @@ class store_op final : public jlm::rvsdg::simple_op virtual ~store_op() {} - store_op(const std::shared_ptr & pointeeType, size_t numStates) + store_op(const std::shared_ptr & pointeeType, size_t numStates) : simple_op(CreateInTypes(pointeeType, numStates), CreateOutTypes(pointeeType, numStates)) {} @@ -1377,7 +1377,7 @@ class store_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateInTypes(const std::shared_ptr & pointeeType, size_t numStates) + CreateInTypes(const std::shared_ptr & pointeeType, size_t numStates) { std::vector> types( { llvm::PointerType::Create(), pointeeType }); @@ -1389,7 +1389,7 @@ class store_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateOutTypes(const std::shared_ptr & pointeeType, size_t numStates) + CreateOutTypes(const std::shared_ptr & pointeeType, size_t numStates) { std::vector> types( numStates, @@ -1417,7 +1417,7 @@ class store_op final : public jlm::rvsdg::simple_op jlm::rvsdg::output & value, const std::vector & states) { - store_op op(std::dynamic_pointer_cast(value.Type()), states.size()); + store_op op(std::dynamic_pointer_cast(value.Type()), states.size()); std::vector inputs; inputs.push_back(&addr); inputs.push_back(&value); @@ -1431,10 +1431,10 @@ class store_op final : public jlm::rvsdg::simple_op return *util::AssertedCast(argument(0).get()); } - [[nodiscard]] const rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & GetStoredType() const noexcept { - return *util::AssertedCast(argument(1).get()); + return *util::AssertedCast(argument(1).get()); } }; @@ -1538,7 +1538,7 @@ class local_load_op final : public jlm::rvsdg::simple_op virtual ~local_load_op() {} - local_load_op(const std::shared_ptr & valuetype, size_t numStates) + local_load_op(const std::shared_ptr & valuetype, size_t numStates) : simple_op(CreateInTypes(valuetype, numStates), CreateOutTypes(valuetype, numStates)) {} @@ -1552,7 +1552,7 @@ class local_load_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateInTypes(const std::shared_ptr & valuetype, size_t numStates) + CreateInTypes(const std::shared_ptr & valuetype, size_t numStates) { std::vector> types(1, jlm::rvsdg::bittype::Create(64)); std::vector> states( @@ -1564,7 +1564,7 @@ class local_load_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateOutTypes(const std::shared_ptr & valuetype, size_t numStates) + CreateOutTypes(const std::shared_ptr & valuetype, size_t numStates) { std::vector> types(1, valuetype); std::vector> states( @@ -1594,7 +1594,7 @@ class local_load_op final : public jlm::rvsdg::simple_op jlm::rvsdg::output & load_result) { auto region = index.region(); - auto valuetype = std::dynamic_pointer_cast(load_result.Type()); + auto valuetype = std::dynamic_pointer_cast(load_result.Type()); local_load_op op(valuetype, states.size()); std::vector inputs; inputs.push_back(&index); @@ -1603,10 +1603,10 @@ class local_load_op final : public jlm::rvsdg::simple_op return jlm::rvsdg::simple_node::create_normalized(region, op, inputs); } - [[nodiscard]] std::shared_ptr + [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - return std::dynamic_pointer_cast(result(0)); + return std::dynamic_pointer_cast(result(0)); } }; @@ -1616,7 +1616,7 @@ class local_store_op final : public jlm::rvsdg::simple_op virtual ~local_store_op() {} - local_store_op(const std::shared_ptr & valuetype, size_t numStates) + local_store_op(const std::shared_ptr & valuetype, size_t numStates) : simple_op(CreateInTypes(valuetype, numStates), CreateOutTypes(valuetype, numStates)) {} @@ -1630,7 +1630,7 @@ class local_store_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateInTypes(const std::shared_ptr & valuetype, size_t numStates) + CreateInTypes(const std::shared_ptr & valuetype, size_t numStates) { std::vector> types( { jlm::rvsdg::bittype::Create(64), valuetype }); @@ -1642,7 +1642,7 @@ class local_store_op final : public jlm::rvsdg::simple_op } static std::vector> - CreateOutTypes(const std::shared_ptr & valuetype, size_t numStates) + CreateOutTypes(const std::shared_ptr & valuetype, size_t numStates) { std::vector> types( numStates, @@ -1671,7 +1671,7 @@ class local_store_op final : public jlm::rvsdg::simple_op const std::vector & states) { auto region = index.region(); - auto valuetype = std::dynamic_pointer_cast(value.Type()); + auto valuetype = std::dynamic_pointer_cast(value.Type()); local_store_op op(valuetype, states.size()); std::vector inputs; inputs.push_back(&index); @@ -1680,10 +1680,10 @@ class local_store_op final : public jlm::rvsdg::simple_op return jlm::rvsdg::simple_node::create_normalized(region, op, inputs); } - [[nodiscard]] const jlm::rvsdg::valuetype & + [[nodiscard]] const jlm::rvsdg::ValueType & GetStoredType() const noexcept { - return *util::AssertedCast(argument(1).get()); + return *util::AssertedCast(argument(1).get()); } }; diff --git a/jlm/llvm/backend/jlm2llvm/instruction.cpp b/jlm/llvm/backend/jlm2llvm/instruction.cpp index a834215e5..d99b57488 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.cpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.cpp @@ -279,7 +279,7 @@ convert_phi( static ::llvm::Value * CreateLoadInstruction( - const rvsdg::valuetype & loadedType, + const rvsdg::ValueType & loadedType, const variable * address, bool isVolatile, size_t alignment, @@ -840,7 +840,7 @@ convert_cast( context & ctx) { JLM_ASSERT(::llvm::Instruction::isCast(OPCODE)); - auto dsttype = std::dynamic_pointer_cast(op.result(0)); + auto dsttype = std::dynamic_pointer_cast(op.result(0)); auto operand = operands[0]; if (auto vt = dynamic_cast(&operand->type())) diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 8301b3f32..441142bcc 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -42,7 +42,7 @@ has_return_value(const llvm::cfg & cfg) for (size_t n = 0; n < cfg.exit()->nresults(); n++) { auto result = cfg.exit()->result(n); - if (rvsdg::is(result->type())) + if (rvsdg::is(result->type())) return true; } @@ -65,7 +65,7 @@ create_return(const cfg_node * node, context & ctx) } auto result = cfg.exit()->result(0); - JLM_ASSERT(rvsdg::is(result->type())); + JLM_ASSERT(rvsdg::is(result->type())); builder.CreateRet(ctx.value(result)); } diff --git a/jlm/llvm/backend/jlm2llvm/type.cpp b/jlm/llvm/backend/jlm2llvm/type.cpp index fdb9250f0..c111568ee 100644 --- a/jlm/llvm/backend/jlm2llvm/type.cpp +++ b/jlm/llvm/backend/jlm2llvm/type.cpp @@ -47,11 +47,11 @@ convert(const FunctionType & functionType, context & ctx) } /* - The return type can either be (valuetype, statetype, statetype, ...) if the function has + The return type can either be (ValueType, statetype, statetype, ...) if the function has a return value, or (statetype, statetype, ...) if the function returns void. */ auto resultType = ::llvm::Type::getVoidTy(lctx); - if (functionType.NumResults() > 0 && rvsdg::is(functionType.ResultType(0))) + if (functionType.NumResults() > 0 && rvsdg::is(functionType.ResultType(0))) resultType = convert_type(functionType.ResultType(0), ctx); return ::llvm::FunctionType::get(resultType, argumentTypes, isvararg); diff --git a/jlm/llvm/frontend/LlvmTypeConversion.cpp b/jlm/llvm/frontend/LlvmTypeConversion.cpp index 97b7a2b7b..db12cb7fc 100644 --- a/jlm/llvm/frontend/LlvmTypeConversion.cpp +++ b/jlm/llvm/frontend/LlvmTypeConversion.cpp @@ -31,7 +31,7 @@ ExtractFloatingPointSize(const ::llvm::Type * type) return i->second; } -static std::shared_ptr +static std::shared_ptr convert_integer_type(const ::llvm::Type * t, context & ctx) { JLM_ASSERT(t->getTypeID() == ::llvm::Type::IntegerTyID); @@ -40,14 +40,14 @@ convert_integer_type(const ::llvm::Type * t, context & ctx) return rvsdg::bittype::Create(type->getBitWidth()); } -static std::shared_ptr +static std::shared_ptr convert_pointer_type(const ::llvm::Type * t, context &) { JLM_ASSERT(t->getTypeID() == ::llvm::Type::PointerTyID); return PointerType::Create(); } -static std::shared_ptr +static std::shared_ptr convert_function_type(const ::llvm::Type * t, context & ctx) { JLM_ASSERT(t->getTypeID() == ::llvm::Type::FunctionTyID); @@ -72,7 +72,7 @@ convert_function_type(const ::llvm::Type * t, context & ctx) return FunctionType::Create(std::move(argumentTypes), std::move(resultTypes)); } -static std::shared_ptr +static std::shared_ptr convert_fp_type(const ::llvm::Type * t, context & ctx) { static const std::unordered_map<::llvm::Type::TypeID, fpsize> map( @@ -87,7 +87,7 @@ convert_fp_type(const ::llvm::Type * t, context & ctx) return fptype::Create(i->second); } -static std::shared_ptr +static std::shared_ptr convert_struct_type(const ::llvm::Type * t, context & ctx) { JLM_ASSERT(t->isStructTy()); @@ -100,7 +100,7 @@ convert_struct_type(const ::llvm::Type * t, context & ctx) : StructType::Create(isPacked, declaration); } -static std::shared_ptr +static std::shared_ptr convert_array_type(const ::llvm::Type * t, context & ctx) { JLM_ASSERT(t->isArrayTy()); @@ -108,7 +108,7 @@ convert_array_type(const ::llvm::Type * t, context & ctx) return arraytype::Create(std::move(etype), t->getArrayNumElements()); } -static std::shared_ptr +static std::shared_ptr convert_fixed_vector_type(const ::llvm::Type * t, context & ctx) { JLM_ASSERT(t->getTypeID() == ::llvm::Type::FixedVectorTyID); @@ -118,7 +118,7 @@ convert_fixed_vector_type(const ::llvm::Type * t, context & ctx) ::llvm::cast<::llvm::FixedVectorType>(t)->getNumElements()); } -static std::shared_ptr +static std::shared_ptr convert_scalable_vector_type(const ::llvm::Type * t, context & ctx) { JLM_ASSERT(t->getTypeID() == ::llvm::Type::ScalableVectorTyID); @@ -128,12 +128,12 @@ convert_scalable_vector_type(const ::llvm::Type * t, context & ctx) ::llvm::cast<::llvm::ScalableVectorType>(t)->getMinNumElements()); } -std::shared_ptr +std::shared_ptr ConvertType(const ::llvm::Type * t, context & ctx) { static std::unordered_map< ::llvm::Type::TypeID, - std::function(const ::llvm::Type *, context &)>> + std::function(const ::llvm::Type *, context &)>> map({ { ::llvm::Type::IntegerTyID, convert_integer_type }, { ::llvm::Type::PointerTyID, convert_pointer_type }, { ::llvm::Type::FunctionTyID, convert_function_type }, diff --git a/jlm/llvm/frontend/LlvmTypeConversion.hpp b/jlm/llvm/frontend/LlvmTypeConversion.hpp index 33b151797..c9dc00da6 100644 --- a/jlm/llvm/frontend/LlvmTypeConversion.hpp +++ b/jlm/llvm/frontend/LlvmTypeConversion.hpp @@ -27,7 +27,7 @@ class context; fpsize ExtractFloatingPointSize(const ::llvm::Type * type); -std::shared_ptr +std::shared_ptr ConvertType(const ::llvm::Type * type, context & ctx); static inline std::shared_ptr diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 857673879..86ac07ffc 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -23,7 +23,7 @@ class GraphImport final : public rvsdg::GraphImport private: GraphImport( rvsdg::graph & graph, - std::shared_ptr valueType, + std::shared_ptr valueType, std::string name, llvm::linkage linkage) : rvsdg::GraphImport(graph, PointerType::Create(), std::move(name)), @@ -38,7 +38,7 @@ class GraphImport final : public rvsdg::GraphImport return Linkage_; } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & ValueType() const noexcept { return ValueType_; @@ -50,7 +50,7 @@ class GraphImport final : public rvsdg::GraphImport static GraphImport & Create( rvsdg::graph & graph, - std::shared_ptr valueType, + std::shared_ptr valueType, std::string name, llvm::linkage linkage) { @@ -62,7 +62,7 @@ class GraphImport final : public rvsdg::GraphImport private: llvm::linkage Linkage_; - std::shared_ptr ValueType_; + std::shared_ptr ValueType_; }; /** diff --git a/jlm/llvm/ir/attribute.hpp b/jlm/llvm/ir/attribute.hpp index 1fdb6ca3f..69f3c17af 100644 --- a/jlm/llvm/ir/attribute.hpp +++ b/jlm/llvm/ir/attribute.hpp @@ -226,12 +226,12 @@ class type_attribute final : public enum_attribute public: ~type_attribute() noexcept override; - type_attribute(attribute::kind kind, std::shared_ptr type) + type_attribute(attribute::kind kind, std::shared_ptr type) : enum_attribute(kind), type_(std::move(type)) {} - [[nodiscard]] const jlm::rvsdg::valuetype & + [[nodiscard]] const jlm::rvsdg::ValueType & type() const noexcept { return *type_; @@ -241,7 +241,7 @@ class type_attribute final : public enum_attribute operator==(const attribute &) const override; private: - std::shared_ptr type_; + std::shared_ptr type_; }; } diff --git a/jlm/llvm/ir/ipgraph.hpp b/jlm/llvm/ir/ipgraph.hpp index 054eb5324..1f1e52b7e 100644 --- a/jlm/llvm/ir/ipgraph.hpp +++ b/jlm/llvm/ir/ipgraph.hpp @@ -377,7 +377,7 @@ class data_node final : public ipgraph_node inline data_node( llvm::ipgraph & clg, const std::string & name, - std::shared_ptr valueType, + std::shared_ptr valueType, const llvm::linkage & linkage, std::string section, bool constant) @@ -396,7 +396,7 @@ class data_node final : public ipgraph_node std::shared_ptr Type() const override; - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & GetValueType() const noexcept { return ValueType_; @@ -445,7 +445,7 @@ class data_node final : public ipgraph_node Create( llvm::ipgraph & clg, const std::string & name, - std::shared_ptr valueType, + std::shared_ptr valueType, const llvm::linkage & linkage, std::string section, bool constant) @@ -462,7 +462,7 @@ class data_node final : public ipgraph_node std::string name_; std::string Section_; llvm::linkage linkage_; - std::shared_ptr ValueType_; + std::shared_ptr ValueType_; std::unique_ptr init_; }; diff --git a/jlm/llvm/ir/operators/GetElementPtr.hpp b/jlm/llvm/ir/operators/GetElementPtr.hpp index bf371b709..c9eea2f3e 100644 --- a/jlm/llvm/ir/operators/GetElementPtr.hpp +++ b/jlm/llvm/ir/operators/GetElementPtr.hpp @@ -28,7 +28,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op public: GetElementPtrOperation( const std::vector> & offsetTypes, - std::shared_ptr pointeeType) + std::shared_ptr pointeeType) : simple_op(CreateOperandTypes(offsetTypes), { PointerType::Create() }), PointeeType_(std::move(pointeeType)) {} @@ -46,10 +46,10 @@ class GetElementPtrOperation final : public rvsdg::simple_op [[nodiscard]] std::unique_ptr copy() const override; - [[nodiscard]] const rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & GetPointeeType() const noexcept { - return *dynamic_cast(PointeeType_.get()); + return *dynamic_cast(PointeeType_.get()); } /** @@ -69,7 +69,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op Create( const variable * baseAddress, const std::vector & offsets, - std::shared_ptr pointeeType, + std::shared_ptr pointeeType, std::shared_ptr resultType) { CheckPointerType(baseAddress->type()); @@ -100,7 +100,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op Create( rvsdg::output * baseAddress, const std::vector & offsets, - std::shared_ptr pointeeType, + std::shared_ptr pointeeType, std::shared_ptr resultType) { CheckPointerType(baseAddress->type()); @@ -152,7 +152,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op return types; } - std::shared_ptr PointeeType_; + std::shared_ptr PointeeType_; }; } diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index b49766929..e895f67d4 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -139,7 +139,7 @@ class LoadOperation : public rvsdg::simple_op JLM_ASSERT(is(addressType)); auto & loadedType = *resultTypes[0]; - JLM_ASSERT(is(loadedType)); + JLM_ASSERT(is(loadedType)); JLM_ASSERT(operandTypes.size() == resultTypes.size()); for (size_t n = 1; n < operandTypes.size(); n++) @@ -158,10 +158,10 @@ class LoadOperation : public rvsdg::simple_op return Alignment_; } - [[nodiscard]] std::shared_ptr + [[nodiscard]] std::shared_ptr GetLoadedType() const noexcept { - auto type = std::dynamic_pointer_cast(result(0)); + auto type = std::dynamic_pointer_cast(result(0)); JLM_ASSERT(type); return type; } @@ -190,7 +190,7 @@ class LoadVolatileOperation final : public LoadOperation ~LoadVolatileOperation() noexcept override; LoadVolatileOperation( - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t numMemoryStates, size_t alignment) : LoadOperation( @@ -216,7 +216,7 @@ class LoadVolatileOperation final : public LoadOperation const variable * address, const variable * iOState, const variable * memoryState, - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t alignment) { LoadVolatileOperation operation(std::move(loadedType), 1, alignment); @@ -237,7 +237,7 @@ class LoadVolatileOperation final : public LoadOperation } static std::vector> - CreateResultTypes(std::shared_ptr loadedType, size_t numMemoryStates) + CreateResultTypes(std::shared_ptr loadedType, size_t numMemoryStates) { std::vector> types( { std::move(loadedType), iostatetype::Create() }); @@ -330,7 +330,7 @@ class LoadNode : public rvsdg::simple_node GetLoadedValueOutput() const noexcept { auto valueOutput = output(0); - JLM_ASSERT(is(valueOutput->type())); + JLM_ASSERT(is(valueOutput->type())); return *valueOutput; } @@ -409,7 +409,7 @@ class LoadVolatileNode final : public LoadNode rvsdg::output & address, rvsdg::output & iOState, const std::vector & memoryStates, - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t alignment) { std::vector operands({ &address, &iOState }); @@ -440,7 +440,7 @@ class LoadNonVolatileOperation final : public LoadOperation ~LoadNonVolatileOperation() noexcept override; LoadNonVolatileOperation( - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t numMemoryStates, size_t alignment) : LoadOperation( @@ -472,7 +472,7 @@ class LoadNonVolatileOperation final : public LoadOperation Create( const variable * address, const variable * state, - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t alignment) { LoadNonVolatileOperation operation(std::move(loadedType), 1, alignment); @@ -492,7 +492,7 @@ class LoadNonVolatileOperation final : public LoadOperation } static std::vector> - CreateResultTypes(std::shared_ptr loadedType, size_t numMemoryStates) + CreateResultTypes(std::shared_ptr loadedType, size_t numMemoryStates) { std::vector> types(1, std::move(loadedType)); std::vector> states( @@ -536,7 +536,7 @@ class LoadNonVolatileNode final : public LoadNode Create( rvsdg::output * address, const std::vector & memoryStates, - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t alignment) { return rvsdg::outputs(&CreateNode(*address, memoryStates, std::move(loadedType), alignment)); @@ -546,7 +546,7 @@ class LoadNonVolatileNode final : public LoadNode CreateNode( rvsdg::output & address, const std::vector & memoryStates, - std::shared_ptr loadedType, + std::shared_ptr loadedType, size_t alignment) { std::vector operands({ &address }); diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 839771143..2fbfaa089 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -101,7 +101,7 @@ class StoreOperation : public rvsdg::simple_op JLM_ASSERT(is(addressType)); auto & storedType = *operandTypes[1]; - JLM_ASSERT(is(storedType)); + JLM_ASSERT(is(storedType)); JLM_ASSERT(operandTypes.size() == resultTypes.size() + 2); for (size_t n = 0; n < resultTypes.size(); n++) @@ -120,10 +120,10 @@ class StoreOperation : public rvsdg::simple_op return Alignment_; } - [[nodiscard]] const rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & GetStoredType() const noexcept { - return *util::AssertedCast(argument(1).get()); + return *util::AssertedCast(argument(1).get()); } [[nodiscard]] virtual size_t @@ -144,7 +144,7 @@ class StoreNonVolatileOperation final : public StoreOperation ~StoreNonVolatileOperation() noexcept override; StoreNonVolatileOperation( - std::shared_ptr storedType, + std::shared_ptr storedType, size_t numMemoryStates, size_t alignment) : StoreOperation( @@ -182,10 +182,10 @@ class StoreNonVolatileOperation final : public StoreOperation } private: - static const std::shared_ptr + static const std::shared_ptr CheckAndExtractStoredType(const std::shared_ptr & type) { - if (auto storedType = std::dynamic_pointer_cast(type)) + if (auto storedType = std::dynamic_pointer_cast(type)) { return storedType; } @@ -194,7 +194,7 @@ class StoreNonVolatileOperation final : public StoreOperation } static std::vector> - CreateOperandTypes(std::shared_ptr storedType, size_t numMemoryStates) + CreateOperandTypes(std::shared_ptr storedType, size_t numMemoryStates) { std::vector> types( { PointerType::Create(), std::move(storedType) }); @@ -287,7 +287,7 @@ class StoreNode : public rvsdg::simple_node GetStoredValueInput() const noexcept { auto valueInput = input(1); - JLM_ASSERT(is(valueInput->type())); + JLM_ASSERT(is(valueInput->type())); return *valueInput; } @@ -381,10 +381,10 @@ class StoreNonVolatileNode final : public StoreNode } private: - static std::shared_ptr + static std::shared_ptr CheckAndExtractStoredType(const std::shared_ptr & type) { - if (auto storedType = std::dynamic_pointer_cast(type)) + if (auto storedType = std::dynamic_pointer_cast(type)) { return storedType; } @@ -410,7 +410,7 @@ class StoreVolatileOperation final : public StoreOperation ~StoreVolatileOperation() noexcept override; StoreVolatileOperation( - std::shared_ptr storedType, + std::shared_ptr storedType, size_t numMemoryStates, size_t alignment) : StoreOperation( @@ -446,17 +446,17 @@ class StoreVolatileOperation final : public StoreOperation } private: - static std::shared_ptr + static std::shared_ptr CheckAndExtractStoredType(const std::shared_ptr & type) { - if (auto storedType = std::dynamic_pointer_cast(type)) + if (auto storedType = std::dynamic_pointer_cast(type)) return storedType; throw jlm::util::error("Expected value type"); } static std::vector> - CreateOperandTypes(std::shared_ptr storedType, size_t numMemoryStates) + CreateOperandTypes(std::shared_ptr storedType, size_t numMemoryStates) { std::vector> types( { PointerType::Create(), std::move(storedType), iostatetype::Create() }); @@ -559,10 +559,10 @@ class StoreVolatileNode final : public StoreNode } private: - static std::shared_ptr + static std::shared_ptr CheckAndExtractStoredType(const std::shared_ptr & type) { - if (auto storedType = std::dynamic_pointer_cast(type)) + if (auto storedType = std::dynamic_pointer_cast(type)) return storedType; throw jlm::util::error("Expected value type."); diff --git a/jlm/llvm/ir/operators/alloca.hpp b/jlm/llvm/ir/operators/alloca.hpp index 1f6b7a0a2..65a509475 100644 --- a/jlm/llvm/ir/operators/alloca.hpp +++ b/jlm/llvm/ir/operators/alloca.hpp @@ -24,7 +24,7 @@ class alloca_op final : public rvsdg::simple_op virtual ~alloca_op() noexcept; inline alloca_op( - std::shared_ptr allocatedType, + std::shared_ptr allocatedType, std::shared_ptr btype, size_t alignment) : simple_op({ btype }, { { PointerType::Create() }, { MemoryStateType::Create() } }), @@ -51,13 +51,13 @@ class alloca_op final : public rvsdg::simple_op return *std::static_pointer_cast(argument(0)); } - inline const rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & value_type() const noexcept { return *AllocatedType_; } - inline const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & ValueType() const noexcept { return AllocatedType_; @@ -71,7 +71,7 @@ class alloca_op final : public rvsdg::simple_op static std::unique_ptr create( - std::shared_ptr allocatedType, + std::shared_ptr allocatedType, const variable * size, size_t alignment) { @@ -85,7 +85,7 @@ class alloca_op final : public rvsdg::simple_op static std::vector create( - std::shared_ptr allocatedType, + std::shared_ptr allocatedType, rvsdg::output * size, size_t alignment) { @@ -99,7 +99,7 @@ class alloca_op final : public rvsdg::simple_op private: size_t alignment_; - std::shared_ptr AllocatedType_; + std::shared_ptr AllocatedType_; }; } diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index eb1f222c1..ff88f914a 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -26,7 +26,7 @@ class operation final : public rvsdg::structural_op ~operation() override; operation( - std::shared_ptr type, + std::shared_ptr type, const std::string & name, const llvm::linkage & linkage, std::string section, @@ -81,13 +81,13 @@ class operation final : public rvsdg::structural_op return constant_; } - [[nodiscard]] const rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & type() const noexcept { return *type_; } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return type_; @@ -98,7 +98,7 @@ class operation final : public rvsdg::structural_op std::string name_; std::string Section_; llvm::linkage linkage_; - std::shared_ptr type_; + std::shared_ptr type_; }; class cvargument; @@ -162,13 +162,13 @@ class node final : public rvsdg::structural_node return *static_cast(&structural_node::operation()); } - [[nodiscard]] const rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & type() const noexcept { return operation().type(); } - [[nodiscard]] const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return operation().Type(); @@ -282,7 +282,7 @@ class node final : public rvsdg::structural_node static node * Create( rvsdg::Region * parent, - std::shared_ptr type, + std::shared_ptr type, const std::string & name, const llvm::linkage & linkage, std::string section, diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index 5b87fdc89..55777a9a1 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -639,7 +639,7 @@ class ConstantDataArray final : public jlm::rvsdg::simple_op public: virtual ~ConstantDataArray(); - ConstantDataArray(const std::shared_ptr & type, size_t size) + ConstantDataArray(const std::shared_ptr & type, size_t size) : simple_op({ size, type }, { arraytype::Create(type, size) }) { if (size == 0) @@ -661,7 +661,7 @@ class ConstantDataArray final : public jlm::rvsdg::simple_op return std::static_pointer_cast(result(0))->nelements(); } - const jlm::rvsdg::valuetype & + const jlm::rvsdg::ValueType & type() const noexcept { return std::static_pointer_cast(result(0))->element_type(); @@ -673,7 +673,7 @@ class ConstantDataArray final : public jlm::rvsdg::simple_op if (elements.size() == 0) throw jlm::util::error("expected at least one element."); - auto vt = std::dynamic_pointer_cast(elements[0]->Type()); + auto vt = std::dynamic_pointer_cast(elements[0]->Type()); if (!vt) throw jlm::util::error("expected value type."); @@ -687,7 +687,7 @@ class ConstantDataArray final : public jlm::rvsdg::simple_op if (elements.empty()) throw jlm::util::error("Expected at least one element."); - auto valueType = std::dynamic_pointer_cast(elements[0]->Type()); + auto valueType = std::dynamic_pointer_cast(elements[0]->Type()); if (!valueType) { throw jlm::util::error("Expected value type."); @@ -1081,7 +1081,7 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op public: ~PoisonValueOperation() noexcept override; - explicit PoisonValueOperation(std::shared_ptr type) + explicit PoisonValueOperation(std::shared_ptr type) : jlm::rvsdg::simple_op({}, { std::move(type) }) {} @@ -1104,10 +1104,10 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op std::unique_ptr copy() const override; - const jlm::rvsdg::valuetype & + const jlm::rvsdg::ValueType & GetType() const noexcept { - return *util::AssertedCast(result(0).get()); + return *util::AssertedCast(result(0).get()); } static std::unique_ptr @@ -1129,10 +1129,10 @@ class PoisonValueOperation final : public jlm::rvsdg::simple_op } private: - static std::shared_ptr + static std::shared_ptr CheckAndConvertType(const std::shared_ptr & type) { - if (auto valueType = std::dynamic_pointer_cast(type)) + if (auto valueType = std::dynamic_pointer_cast(type)) return valueType; throw jlm::util::error("Expected value type."); @@ -1490,8 +1490,8 @@ class bitcast_op final : public jlm::rvsdg::unary_op virtual ~bitcast_op(); inline bitcast_op( - std::shared_ptr srctype, - std::shared_ptr dsttype) + std::shared_ptr srctype, + std::shared_ptr dsttype) : unary_op(std::move(srctype), std::move(dsttype)) {} @@ -1549,17 +1549,17 @@ class bitcast_op final : public jlm::rvsdg::unary_op private: static std::pair< - std::shared_ptr, - std::shared_ptr> + std::shared_ptr, + std::shared_ptr> check_types( const std::shared_ptr & otype, const std::shared_ptr & rtype) { - auto ot = std::dynamic_pointer_cast(otype); + auto ot = std::dynamic_pointer_cast(otype); if (!ot) throw jlm::util::error("expected value type."); - auto rt = std::dynamic_pointer_cast(rtype); + auto rt = std::dynamic_pointer_cast(rtype); if (!rt) throw jlm::util::error("expected value type."); @@ -1852,7 +1852,7 @@ class ConstantArray final : public jlm::rvsdg::simple_op public: virtual ~ConstantArray(); - ConstantArray(const std::shared_ptr & type, size_t size) + ConstantArray(const std::shared_ptr & type, size_t size) : jlm::rvsdg::simple_op({ size, type }, { arraytype::Create(type, size) }) { if (size == 0) @@ -1874,7 +1874,7 @@ class ConstantArray final : public jlm::rvsdg::simple_op return std::static_pointer_cast(result(0))->nelements(); } - const jlm::rvsdg::valuetype & + const jlm::rvsdg::ValueType & type() const noexcept { return std::static_pointer_cast(result(0))->element_type(); @@ -1886,7 +1886,7 @@ class ConstantArray final : public jlm::rvsdg::simple_op if (elements.size() == 0) throw jlm::util::error("expected at least one element.\n"); - auto vt = std::dynamic_pointer_cast(elements[0]->Type()); + auto vt = std::dynamic_pointer_cast(elements[0]->Type()); if (!vt) throw jlm::util::error("expected value Type.\n"); @@ -1900,7 +1900,7 @@ class ConstantArray final : public jlm::rvsdg::simple_op if (operands.empty()) throw util::error("Expected at least one element.\n"); - auto valueType = std::dynamic_pointer_cast(operands[0]->Type()); + auto valueType = std::dynamic_pointer_cast(operands[0]->Type()); if (!valueType) { throw util::error("Expected value type.\n"); @@ -2092,7 +2092,7 @@ class insertelement_op final : public jlm::rvsdg::simple_op inline insertelement_op( const std::shared_ptr & vectype, - const std::shared_ptr & vtype, + const std::shared_ptr & vtype, const std::shared_ptr & btype) : simple_op({ vectype, vtype, btype }, { vectype }) { @@ -2120,7 +2120,7 @@ class insertelement_op final : public jlm::rvsdg::simple_op if (!vct) throw jlm::util::error("expected vector type."); - auto vt = std::dynamic_pointer_cast(value->Type()); + auto vt = std::dynamic_pointer_cast(value->Type()); if (!vt) throw jlm::util::error("expected value type."); @@ -2349,7 +2349,7 @@ class constant_data_vector_op final : public jlm::rvsdg::simple_op return std::static_pointer_cast(result(0))->size(); } - const jlm::rvsdg::valuetype & + const jlm::rvsdg::ValueType & type() const noexcept { return std::static_pointer_cast(result(0))->type(); @@ -2361,7 +2361,7 @@ class constant_data_vector_op final : public jlm::rvsdg::simple_op if (elements.empty()) throw jlm::util::error("Expected at least one element."); - auto vt = std::dynamic_pointer_cast(elements[0]->Type()); + auto vt = std::dynamic_pointer_cast(elements[0]->Type()); if (!vt) throw jlm::util::error("Expected value type."); @@ -2410,10 +2410,10 @@ class ExtractValue final : public jlm::rvsdg::simple_op return indices_.end(); } - const jlm::rvsdg::valuetype & + const jlm::rvsdg::ValueType & type() const noexcept { - return *std::static_pointer_cast(argument(0)); + return *std::static_pointer_cast(argument(0)); } static inline std::unique_ptr diff --git a/jlm/llvm/ir/types.cpp b/jlm/llvm/ir/types.cpp index 9b1b1d305..018f52e07 100644 --- a/jlm/llvm/ir/types.cpp +++ b/jlm/llvm/ir/types.cpp @@ -20,7 +20,7 @@ FunctionType::~FunctionType() noexcept = default; FunctionType::FunctionType( std::vector> argumentTypes, std::vector> resultTypes) - : jlm::rvsdg::valuetype(), + : jlm::rvsdg::ValueType(), ResultTypes_(std::move(resultTypes)), ArgumentTypes_(std::move(argumentTypes)) {} @@ -28,7 +28,7 @@ FunctionType::FunctionType( FunctionType::FunctionType(const FunctionType & rhs) = default; FunctionType::FunctionType(FunctionType && other) noexcept - : jlm::rvsdg::valuetype(other), + : jlm::rvsdg::ValueType(other), ResultTypes_(std::move(other.ResultTypes_)), ArgumentTypes_(std::move(other.ArgumentTypes_)) {} diff --git a/jlm/llvm/ir/types.hpp b/jlm/llvm/ir/types.hpp index 24dcdf8df..029b41598 100644 --- a/jlm/llvm/ir/types.hpp +++ b/jlm/llvm/ir/types.hpp @@ -19,7 +19,7 @@ namespace jlm::llvm /** \brief Function type class * */ -class FunctionType final : public jlm::rvsdg::valuetype +class FunctionType final : public jlm::rvsdg::ValueType { public: ~FunctionType() noexcept override; @@ -93,7 +93,7 @@ class FunctionType final : public jlm::rvsdg::valuetype * * This operator is the Jlm equivalent of LLVM's PointerType class. */ -class PointerType final : public jlm::rvsdg::valuetype +class PointerType final : public jlm::rvsdg::ValueType { public: ~PointerType() noexcept override; @@ -115,13 +115,13 @@ class PointerType final : public jlm::rvsdg::valuetype /* array type */ -class arraytype final : public jlm::rvsdg::valuetype +class arraytype final : public rvsdg::ValueType { public: virtual ~arraytype(); - inline arraytype(std::shared_ptr type, size_t nelements) - : jlm::rvsdg::valuetype(), + inline arraytype(std::shared_ptr type, size_t nelements) + : jlm::rvsdg::ValueType(), nelements_(nelements), type_(std::move(type)) {} @@ -151,27 +151,27 @@ class arraytype final : public jlm::rvsdg::valuetype return nelements_; } - inline const jlm::rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & element_type() const noexcept { return *type_; } - inline const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & GetElementType() const noexcept { return type_; } static std::shared_ptr - Create(std::shared_ptr type, size_t nelements) + Create(std::shared_ptr type, size_t nelements) { return std::make_shared(std::move(type), nelements); } private: size_t nelements_; - std::shared_ptr type_; + std::shared_ptr type_; }; /* floating point type */ @@ -185,13 +185,13 @@ enum class fpsize fp128 }; -class fptype final : public jlm::rvsdg::valuetype +class fptype final : public rvsdg::ValueType { public: virtual ~fptype(); inline fptype(const fpsize & size) - : jlm::rvsdg::valuetype(), + : rvsdg::ValueType(), size_(size) {} @@ -257,7 +257,7 @@ create_varargtype() * * This class is the equivalent of LLVM's StructType class. */ -class StructType final : public jlm::rvsdg::valuetype +class StructType final : public rvsdg::ValueType { public: class Declaration; @@ -265,13 +265,13 @@ class StructType final : public jlm::rvsdg::valuetype ~StructType() override; StructType(bool isPacked, const Declaration & declaration) - : jlm::rvsdg::valuetype(), + : rvsdg::ValueType(), IsPacked_(isPacked), Declaration_(declaration) {} StructType(std::string name, bool isPacked, const Declaration & declaration) - : jlm::rvsdg::valuetype(), + : rvsdg::ValueType(), IsPacked_(isPacked), Name_(std::move(name)), Declaration_(declaration) @@ -360,24 +360,24 @@ class StructType::Declaration final return Types_.size(); } - [[nodiscard]] const valuetype & + [[nodiscard]] const ValueType & GetElement(size_t index) const noexcept { JLM_ASSERT(index < NumElements()); - return *util::AssertedCast(Types_[index].get()); + return *util::AssertedCast(Types_[index].get()); } - [[nodiscard]] std::shared_ptr + [[nodiscard]] std::shared_ptr GetElementType(size_t index) const noexcept { JLM_ASSERT(index < NumElements()); - auto type = std::dynamic_pointer_cast(Types_[index]); + auto type = std::dynamic_pointer_cast(Types_[index]); JLM_ASSERT(type); return type; } void - Append(std::shared_ptr type) + Append(std::shared_ptr type) { Types_.push_back(std::move(type)); } @@ -400,10 +400,10 @@ class StructType::Declaration final /* vector type */ -class vectortype : public jlm::rvsdg::valuetype +class vectortype : public rvsdg::ValueType { public: - vectortype(std::shared_ptr type, size_t size) + vectortype(std::shared_ptr type, size_t size) : size_(size), type_(std::move(type)) {} @@ -427,13 +427,13 @@ class vectortype : public jlm::rvsdg::valuetype return size_; } - const jlm::rvsdg::valuetype & + [[nodiscard]] const rvsdg::ValueType & type() const noexcept { return *type_; } - const std::shared_ptr & + [[nodiscard]] const std::shared_ptr & Type() const noexcept { return type_; @@ -441,7 +441,7 @@ class vectortype : public jlm::rvsdg::valuetype private: size_t size_; - std::shared_ptr type_; + std::shared_ptr type_; }; class fixedvectortype final : public vectortype @@ -449,7 +449,7 @@ class fixedvectortype final : public vectortype public: ~fixedvectortype() override; - fixedvectortype(std::shared_ptr type, size_t size) + fixedvectortype(std::shared_ptr type, size_t size) : vectortype(std::move(type), size) {} @@ -463,7 +463,7 @@ class fixedvectortype final : public vectortype debug_string() const override; static std::shared_ptr - Create(std::shared_ptr type, size_t size) + Create(std::shared_ptr type, size_t size) { return std::make_shared(std::move(type), size); } @@ -474,7 +474,7 @@ class scalablevectortype final : public vectortype public: ~scalablevectortype() override; - scalablevectortype(std::shared_ptr type, size_t size) + scalablevectortype(std::shared_ptr type, size_t size) : vectortype(std::move(type), size) {} @@ -488,7 +488,7 @@ class scalablevectortype final : public vectortype debug_string() const override; static std::shared_ptr - Create(std::shared_ptr type, size_t size) + Create(std::shared_ptr type, size_t size) { return std::make_shared(std::move(type), size); } diff --git a/jlm/rvsdg/bitstring/type.hpp b/jlm/rvsdg/bitstring/type.hpp index 8091ff45b..3eeae18ad 100644 --- a/jlm/rvsdg/bitstring/type.hpp +++ b/jlm/rvsdg/bitstring/type.hpp @@ -15,7 +15,7 @@ namespace jlm::rvsdg /* bitstring type */ -class bittype final : public jlm::rvsdg::valuetype +class bittype final : public jlm::rvsdg::ValueType { public: virtual ~bittype() noexcept; diff --git a/jlm/rvsdg/type.cpp b/jlm/rvsdg/type.cpp index 760b920fa..4677ac340 100644 --- a/jlm/rvsdg/type.cpp +++ b/jlm/rvsdg/type.cpp @@ -9,11 +9,9 @@ namespace jlm::rvsdg { -Type::~Type() noexcept -{} +Type::~Type() noexcept = default; -valuetype::~valuetype() noexcept -{} +ValueType::~ValueType() noexcept = default; statetype::~statetype() noexcept {} diff --git a/jlm/rvsdg/type.hpp b/jlm/rvsdg/type.hpp index 6cc3774ac..6beb4e290 100644 --- a/jlm/rvsdg/type.hpp +++ b/jlm/rvsdg/type.hpp @@ -44,13 +44,13 @@ class Type ComputeHash() const noexcept = 0; }; -class valuetype : public jlm::rvsdg::Type +class ValueType : public Type { public: - virtual ~valuetype() noexcept; + ~ValueType() noexcept override; protected: - inline constexpr valuetype() noexcept + constexpr ValueType() noexcept : jlm::rvsdg::Type() {} }; diff --git a/tests/jlm/llvm/ir/TestTypes.cpp b/tests/jlm/llvm/ir/TestTypes.cpp index cc38be22d..2fe73eacd 100644 --- a/tests/jlm/llvm/ir/TestTypes.cpp +++ b/tests/jlm/llvm/ir/TestTypes.cpp @@ -28,9 +28,9 @@ TestIsOrContains() assert(!IsOrContains(*ioStateType)); // Checking supertypes should work - assert(IsOrContains(*pointerType)); - assert(!IsOrContains(*memoryStateType)); - assert(!IsOrContains(*ioStateType)); + assert(IsOrContains(*pointerType)); + assert(!IsOrContains(*memoryStateType)); + assert(!IsOrContains(*ioStateType)); assert(!IsOrContains(*pointerType)); assert(IsOrContains(*memoryStateType)); assert(IsOrContains(*ioStateType)); diff --git a/tests/test-types.hpp b/tests/test-types.hpp index b45db75a6..ac2e7e0ad 100644 --- a/tests/test-types.hpp +++ b/tests/test-types.hpp @@ -11,13 +11,13 @@ namespace jlm::tests { -class valuetype final : public rvsdg::valuetype +class valuetype final : public rvsdg::ValueType { public: virtual ~valuetype(); inline constexpr valuetype() noexcept - : rvsdg::valuetype() + : rvsdg::ValueType() {} virtual std::string From 08d3362e4f6679f7fa370b81ee6e77c86d7912f2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 3 Oct 2024 07:06:28 +0200 Subject: [PATCH 098/170] Rename statetype class to StateType (#645) --- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- .../rhls2firrtl/verilator-harness-hls.cpp | 17 +++++++---------- jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp | 2 +- jlm/hls/ir/hls.hpp | 4 ++-- jlm/llvm/backend/dot/DotWriter.cpp | 2 +- jlm/llvm/backend/jlm2llvm/instruction.cpp | 2 +- jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 2 +- jlm/llvm/backend/jlm2llvm/type.cpp | 4 ++-- jlm/llvm/ir/operators/Load.cpp | 2 +- jlm/llvm/ir/operators/Load.hpp | 2 +- jlm/llvm/ir/operators/Store.hpp | 2 +- jlm/llvm/ir/types.hpp | 17 ++++++----------- jlm/llvm/opt/push.cpp | 2 +- jlm/rvsdg/control.cpp | 2 +- jlm/rvsdg/control.hpp | 2 +- jlm/rvsdg/statemux.hpp | 4 ++-- jlm/rvsdg/type.cpp | 3 +-- jlm/rvsdg/type.hpp | 8 ++++---- tests/jlm/llvm/ir/TestTypes.cpp | 14 +++++++------- tests/test-types.hpp | 4 ++-- 20 files changed, 44 insertions(+), 53 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index a4c25860a..ade9137d9 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -121,7 +121,7 @@ BaseHLS::JlmSize(const jlm::rvsdg::Type * type) { return ceil(log2(ct->nalternatives())); } - else if (dynamic_cast(type)) + else if (dynamic_cast(type)) { return 1; } diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index 510531909..765826dae 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -478,7 +478,7 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) size_t register_ix = 0; for (size_t i = 0; i < ln->type().NumArguments(); ++i) { - if (dynamic_cast(&ln->type().ArgumentType(i))) + if (dynamic_cast(&ln->type().ArgumentType(i))) { register_ix++; continue; @@ -552,8 +552,7 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) " hls_loads.erase(hls_loads.begin(), hls_loads.end());\n" " hls_stores.erase(hls_stores.begin(), hls_stores.end());\n" " mem_access_ctr = 0;\n"; - if (ln->type().NumResults() - && !dynamic_cast(&ln->type().ResultType(0))) + if (ln->type().NumResults() && !dynamic_cast(&ln->type().ResultType(0))) { cpp << " return top->o_data_0;\n"; } @@ -607,8 +606,7 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) " close(fd[0]);\n" " }\n"; - if (ln->type().NumResults() - && !dynamic_cast(&ln->type().ResultType(0))) + if (ln->type().NumResults() && !dynamic_cast(&ln->type().ResultType(0))) { cpp << " return 0;\n"; } @@ -618,8 +616,7 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) " "; call_function(cpp, ln, "run_ref"); cpp << "\n"; - if (ln->type().NumResults() - && !dynamic_cast(&ln->type().ResultType(0))) + if (ln->type().NumResults() && !dynamic_cast(&ln->type().ResultType(0))) { cpp << " return "; } @@ -643,7 +640,7 @@ VerilatorHarnessHLS::call_function( cpp << function_name << "("; for (size_t i = 0; i < ln->type().NumArguments(); ++i) { - if (dynamic_cast(&ln->type().ArgumentType(i))) + if (dynamic_cast(&ln->type().ArgumentType(i))) { continue; } @@ -674,7 +671,7 @@ VerilatorHarnessHLS::get_function_header( else { auto type = &ln->type().ResultType(0); - if (dynamic_cast(type)) + if (dynamic_cast(type)) { return_type = "void"; } @@ -690,7 +687,7 @@ VerilatorHarnessHLS::get_function_header( cpp << return_type << " " << function_name << "(\n"; for (size_t i = 0; i < ln->type().NumArguments(); ++i) { - if (dynamic_cast(&ln->type().ArgumentType(i))) + if (dynamic_cast(&ln->type().ArgumentType(i))) { continue; } diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index b1a83aac6..82440d20d 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -96,7 +96,7 @@ CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) for (size_t i = 0; i < gammaNode.noutputs(); ++i) { auto gammaOutput = gammaNode.output(i); - if (rvsdg::is(gammaOutput->type())) + if (rvsdg::is(gammaOutput->type())) { // don't allow state outputs since they imply operations with side effects return false; diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 6072e6844..4badf0a59 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -457,14 +457,14 @@ class buffer_op final : public jlm::rvsdg::simple_op private: }; -class triggertype final : public jlm::rvsdg::statetype +class triggertype final : public rvsdg::StateType { public: virtual ~triggertype() {} triggertype() - : jlm::rvsdg::statetype() + : rvsdg::StateType() {} std::string diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp index 30aaec6bc..f3fede3b2 100644 --- a/jlm/llvm/backend/dot/DotWriter.cpp +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -36,7 +36,7 @@ GetOrCreateTypeGraphNode(const rvsdg::Type & type, util::Graph & typeGraph) node.SetLabel(type.debug_string()); // Some types get special handling, such as adding incoming edges from aggregate types - if (rvsdg::is(type) || rvsdg::is(type) + if (rvsdg::is(type) || rvsdg::is(type) || rvsdg::is(type) || rvsdg::is(type) || rvsdg::is(type)) { // No need to provide any information beyond the debug string diff --git a/jlm/llvm/backend/jlm2llvm/instruction.cpp b/jlm/llvm/backend/jlm2llvm/instruction.cpp index d99b57488..1dcd2b3b3 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.cpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.cpp @@ -661,7 +661,7 @@ convert_select( JLM_ASSERT(is(op)); auto & select = *static_cast(&op); - if (rvsdg::is(select.type())) + if (rvsdg::is(select.type())) return nullptr; auto c = ctx.value(operands[0]); diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 441142bcc..108e0eb81 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -344,7 +344,7 @@ convert_attributes(const function_node & f, context & ctx) { auto argument = f.cfg()->entry()->argument(n); - if (rvsdg::is(argument->type())) + if (rvsdg::is(argument->type())) continue; argsets.push_back(convert_attributes(argument->attributes(), ctx)); diff --git a/jlm/llvm/backend/jlm2llvm/type.cpp b/jlm/llvm/backend/jlm2llvm/type.cpp index c111568ee..c995b2e8b 100644 --- a/jlm/llvm/backend/jlm2llvm/type.cpp +++ b/jlm/llvm/backend/jlm2llvm/type.cpp @@ -47,8 +47,8 @@ convert(const FunctionType & functionType, context & ctx) } /* - The return type can either be (ValueType, statetype, statetype, ...) if the function has - a return value, or (statetype, statetype, ...) if the function returns void. + The return type can either be (ValueType, StateType, StateType, ...) if the function has + a return value, or (StateType, StateType, ...) if the function returns void. */ auto resultType = ::llvm::Type::getVoidTy(lctx); if (functionType.NumResults() > 0 && rvsdg::is(functionType.ResultType(0))) diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index f960636bc..8e45921ce 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -529,7 +529,7 @@ perform_load_load_state_reduction( rvsdg::output *(size_t, rvsdg::output *, std::vector> &)> reduce_state = [&](size_t index, rvsdg::output * operand, auto & mxstates) { - JLM_ASSERT(rvsdg::is(operand->type())); + JLM_ASSERT(rvsdg::is(operand->type())); if (!is(rvsdg::node_output::node(operand))) return operand; diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index e895f67d4..db4beb029 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -147,7 +147,7 @@ class LoadOperation : public rvsdg::simple_op auto & operandType = *operandTypes[n]; auto & resultType = *resultTypes[n]; JLM_ASSERT(operandType == resultType); - JLM_ASSERT(is(operandType)); + JLM_ASSERT(is(operandType)); } } diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 2fbfaa089..89903aaca 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -109,7 +109,7 @@ class StoreOperation : public rvsdg::simple_op auto & operandType = *operandTypes[n + 2]; auto & resultType = *resultTypes[n]; JLM_ASSERT(operandType == resultType); - JLM_ASSERT(is(operandType)); + JLM_ASSERT(is(operandType)); } } diff --git a/jlm/llvm/ir/types.hpp b/jlm/llvm/ir/types.hpp index 029b41598..4095cf78d 100644 --- a/jlm/llvm/ir/types.hpp +++ b/jlm/llvm/ir/types.hpp @@ -219,14 +219,12 @@ class fptype final : public rvsdg::ValueType /* vararg type */ -class varargtype final : public jlm::rvsdg::statetype +class varargtype final : public rvsdg::StateType { public: virtual ~varargtype(); - inline constexpr varargtype() - : jlm::rvsdg::statetype() - {} + constexpr varargtype() = default; virtual bool operator==(const jlm::rvsdg::Type & other) const noexcept override; @@ -498,13 +496,12 @@ class scalablevectortype final : public vectortype * * This type is used for state edges that sequentialize input/output operations. */ -class iostatetype final : public jlm::rvsdg::statetype +class iostatetype final : public rvsdg::StateType { public: ~iostatetype() override; - constexpr iostatetype() noexcept - {} + constexpr iostatetype() noexcept = default; virtual bool operator==(const jlm::rvsdg::Type & other) const noexcept override; @@ -524,14 +521,12 @@ class iostatetype final : public jlm::rvsdg::statetype * Represents the type of abstract memory locations and is used in state edges for sequentialiazing * memory operations, such as load and store operations. */ -class MemoryStateType final : public jlm::rvsdg::statetype +class MemoryStateType final : public rvsdg::StateType { public: ~MemoryStateType() noexcept override; - constexpr MemoryStateType() noexcept - : jlm::rvsdg::statetype() - {} + constexpr MemoryStateType() noexcept = default; std::string debug_string() const override; diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 695f98615..f9b14f62b 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -87,7 +87,7 @@ has_side_effects(const jlm::rvsdg::node * node) { for (size_t n = 0; n < node->noutputs(); n++) { - if (dynamic_cast(&node->output(n)->type())) + if (dynamic_cast(&node->output(n)->type())) return true; } diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index 3272e79d0..1eedce463 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -22,7 +22,7 @@ ctltype::~ctltype() noexcept {} ctltype::ctltype(size_t nalternatives) - : jlm::rvsdg::statetype(), + : StateType(), nalternatives_(nalternatives) {} diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index 9c27174da..6a9bd4739 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -22,7 +22,7 @@ namespace jlm::rvsdg /* control type */ -class ctltype final : public jlm::rvsdg::statetype +class ctltype final : public StateType { public: virtual ~ctltype() noexcept; diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index a1ce62f89..bea46e80a 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -65,7 +65,7 @@ class mux_op final : public simple_op public: virtual ~mux_op() noexcept; - inline mux_op(std::shared_ptr type, size_t narguments, size_t nresults) + inline mux_op(std::shared_ptr type, size_t narguments, size_t nresults) : simple_op({ narguments, type }, { nresults, type }) {} @@ -100,7 +100,7 @@ create_state_mux( if (operands.empty()) throw jlm::util::error("Insufficient number of operands."); - auto st = std::dynamic_pointer_cast(type); + auto st = std::dynamic_pointer_cast(type); if (!st) throw jlm::util::error("Expected state type."); diff --git a/jlm/rvsdg/type.cpp b/jlm/rvsdg/type.cpp index 4677ac340..f5e27096e 100644 --- a/jlm/rvsdg/type.cpp +++ b/jlm/rvsdg/type.cpp @@ -13,7 +13,6 @@ Type::~Type() noexcept = default; ValueType::~ValueType() noexcept = default; -statetype::~statetype() noexcept -{} +StateType::~StateType() noexcept = default; } diff --git a/jlm/rvsdg/type.hpp b/jlm/rvsdg/type.hpp index 6beb4e290..3595df847 100644 --- a/jlm/rvsdg/type.hpp +++ b/jlm/rvsdg/type.hpp @@ -55,14 +55,14 @@ class ValueType : public Type {} }; -class statetype : public jlm::rvsdg::Type +class StateType : public Type { public: - virtual ~statetype() noexcept; + ~StateType() noexcept override; protected: - inline constexpr statetype() noexcept - : jlm::rvsdg::Type() + constexpr StateType() noexcept + : Type() {} }; diff --git a/tests/jlm/llvm/ir/TestTypes.cpp b/tests/jlm/llvm/ir/TestTypes.cpp index 2fe73eacd..709f25859 100644 --- a/tests/jlm/llvm/ir/TestTypes.cpp +++ b/tests/jlm/llvm/ir/TestTypes.cpp @@ -31,9 +31,9 @@ TestIsOrContains() assert(IsOrContains(*pointerType)); assert(!IsOrContains(*memoryStateType)); assert(!IsOrContains(*ioStateType)); - assert(!IsOrContains(*pointerType)); - assert(IsOrContains(*memoryStateType)); - assert(IsOrContains(*ioStateType)); + assert(!IsOrContains(*pointerType)); + assert(IsOrContains(*memoryStateType)); + assert(IsOrContains(*ioStateType)); // Function types are not aggregate types auto functionType = FunctionType::Create( @@ -42,7 +42,7 @@ TestIsOrContains() assert(!IsAggregateType(*functionType)); assert(IsOrContains(*functionType)); assert(!IsOrContains(*functionType)); - assert(!IsOrContains(*functionType)); + assert(!IsOrContains(*functionType)); // Struct types are aggregates that can contain other types auto declaration = StructType::Declaration::Create({ valueType, pointerType }); @@ -50,7 +50,7 @@ TestIsOrContains() assert(IsAggregateType(*structType)); assert(IsOrContains(*structType)); assert(IsOrContains(*structType)); - assert(!IsOrContains(*structType)); + assert(!IsOrContains(*structType)); // Create an array containing the atruct type auto arrayType = arraytype::Create(structType, 20); @@ -58,7 +58,7 @@ TestIsOrContains() assert(IsOrContains(*arrayType)); assert(IsOrContains(*arrayType)); assert(IsOrContains(*arrayType)); - assert(!IsOrContains(*arrayType)); + assert(!IsOrContains(*arrayType)); // Vector types are weird, as LLVM does not consider them to be aggregate types, // but they still contain other types @@ -67,7 +67,7 @@ TestIsOrContains() assert(IsOrContains(*vectorType)); assert(IsOrContains(*vectorType)); assert(IsOrContains(*vectorType)); - assert(!IsOrContains(*vectorType)); + assert(!IsOrContains(*vectorType)); return 0; } diff --git a/tests/test-types.hpp b/tests/test-types.hpp index ac2e7e0ad..1de6d96ef 100644 --- a/tests/test-types.hpp +++ b/tests/test-types.hpp @@ -33,13 +33,13 @@ class valuetype final : public rvsdg::ValueType Create(); }; -class statetype final : public rvsdg::statetype +class statetype final : public rvsdg::StateType { public: virtual ~statetype(); inline constexpr statetype() noexcept - : rvsdg::statetype() + : rvsdg::StateType() {} virtual std::string From b251b2b6b04b083baa58e389640aea5bb0e4ba1a Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 4 Oct 2024 16:30:49 +0200 Subject: [PATCH 099/170] Rename ctltype class to ControlType (#646) --- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 4 +- jlm/hls/ir/hls.cpp | 2 +- jlm/hls/ir/hls.hpp | 26 ++++++------ jlm/llvm/backend/jlm2llvm/type.cpp | 4 +- jlm/llvm/backend/jlm2llvm/type.hpp | 2 +- .../frontend/ControlFlowRestructuring.cpp | 23 +++++----- jlm/llvm/ir/operators/operators.hpp | 10 ++--- jlm/mlir/backend/JlmToMlirConverter.cpp | 2 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 2 +- jlm/rvsdg/control.cpp | 42 +++++++++---------- jlm/rvsdg/control.hpp | 27 ++++++------ jlm/rvsdg/gamma.hpp | 4 +- jlm/rvsdg/theta.hpp | 4 +- .../rvsdg2rhls/DeadNodeEliminationTests.cpp | 9 ++-- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 2 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 6 +-- .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 5 ++- .../opt/InvariantValueRedirectionTests.cpp | 8 ++-- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 10 ++--- tests/jlm/llvm/opt/test-cne.cpp | 12 +++--- tests/jlm/llvm/opt/test-inlining.cpp | 7 +++- tests/jlm/llvm/opt/test-pull.cpp | 6 +-- tests/jlm/llvm/opt/test-push.cpp | 6 +-- .../mlir/backend/TestJlmToMlirConverter.cpp | 2 +- tests/jlm/rvsdg/test-gamma.cpp | 16 +++---- tests/jlm/rvsdg/test-theta.cpp | 10 ++--- 28 files changed, 131 insertions(+), 124 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index ade9137d9..aba7e75f7 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -117,7 +117,7 @@ BaseHLS::JlmSize(const jlm::rvsdg::Type * type) { return GetPointerSizeInBits(); } - else if (auto ct = dynamic_cast(type)) + else if (auto ct = dynamic_cast(type)) { return ceil(log2(ct->nalternatives())); } diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index 2f3c5dc20..a0558480f 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -171,7 +171,7 @@ DotHLS::edge(std::string src, std::string snk, const jlm::rvsdg::Type & type, bo { auto color = "black"; JLM_ASSERT(src != "" && snk != ""); - if (dynamic_cast(&type)) + if (dynamic_cast(&type)) { color = "green"; } diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index d1d15a41c..16148a55f 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -29,7 +29,7 @@ eliminate_gamma_ctl(rvsdg::GammaNode * gamma) for (size_t i = 0; i < gamma->noutputs(); ++i) { auto o = gamma->output(i); - if (dynamic_cast(&o->type())) + if (dynamic_cast(&o->type())) { bool eliminate = true; for (size_t j = 0; j < gamma->nsubregions(); ++j) @@ -73,7 +73,7 @@ fix_match_inversion(rvsdg::GammaNode * old_gamma) for (size_t i = 0; i < old_gamma->noutputs(); ++i) { auto o = old_gamma->output(i); - if (dynamic_cast(&o->type())) + if (dynamic_cast(&o->type())) { ctl_cnt++; swapped = true; diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 95be4bd1b..8ba4b8d27 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -171,7 +171,7 @@ loop_node::create(rvsdg::Region * parent, bool init) if (init) { auto predicate = jlm::rvsdg::control_false(ln->subregion()); - auto pred_arg = ln->add_backedge(jlm::rvsdg::ctltype::Create(2)); + auto pred_arg = ln->add_backedge(rvsdg::ControlType::Create(2)); pred_arg->result()->divert_to(predicate); // we need a buffer without pass-through behavior to avoid a combinatorial cycle of ready // signals diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 4badf0a59..a5930f36a 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -25,7 +25,7 @@ class branch_op final : public jlm::rvsdg::simple_op private: branch_op(size_t nalternatives, const std::shared_ptr & type, bool loop) : jlm::rvsdg::simple_op( - { jlm::rvsdg::ctltype::Create(nalternatives), type }, + { rvsdg::ControlType::Create(nalternatives), type }, { nalternatives, type }), loop(loop) {} @@ -58,9 +58,9 @@ class branch_op final : public jlm::rvsdg::simple_op static std::vector create(jlm::rvsdg::output & predicate, jlm::rvsdg::output & value, bool loop = false) { - auto ctl = dynamic_cast(&predicate.type()); + auto ctl = dynamic_cast(&predicate.type()); if (!ctl) - throw util::error("Predicate needs to be a ctltype."); + throw util::error("Predicate needs to be a control type."); auto region = predicate.region(); branch_op op(ctl->nalternatives(), value.Type(), loop); @@ -259,9 +259,9 @@ class mux_op final : public jlm::rvsdg::simple_op { if (alternatives.empty()) throw util::error("Insufficient number of operands."); - auto ctl = dynamic_cast(&predicate.type()); + auto ctl = dynamic_cast(&predicate.type()); if (!ctl) - throw util::error("Predicate needs to be a ctltype."); + throw util::error("Predicate needs to be a control type."); if (alternatives.size() != ctl->nalternatives()) throw util::error("Alternatives and predicate do not match."); @@ -281,7 +281,7 @@ class mux_op final : public jlm::rvsdg::simple_op { auto vec = std::vector>(nalternatives + 1, std::move(type)); - vec[0] = jlm::rvsdg::ctltype::Create(nalternatives); + vec[0] = rvsdg::ControlType::Create(nalternatives); return vec; } }; @@ -330,7 +330,7 @@ class predicate_buffer_op final : public jlm::rvsdg::simple_op virtual ~predicate_buffer_op() {} - explicit predicate_buffer_op(const std::shared_ptr & type) + explicit predicate_buffer_op(const std::shared_ptr & type) : jlm::rvsdg::simple_op({ type }, { type }) {} @@ -357,9 +357,9 @@ class predicate_buffer_op final : public jlm::rvsdg::simple_op create(jlm::rvsdg::output & predicate) { auto region = predicate.region(); - auto ctl = std::dynamic_pointer_cast(predicate.Type()); + auto ctl = std::dynamic_pointer_cast(predicate.Type()); if (!ctl) - throw util::error("Predicate needs to be a ctltype."); + throw util::error("Predicate needs to be a control type."); predicate_buffer_op op(ctl); return jlm::rvsdg::simple_node::create_normalized(region, op, { &predicate }); } @@ -372,7 +372,7 @@ class loop_constant_buffer_op final : public jlm::rvsdg::simple_op {} loop_constant_buffer_op( - const std::shared_ptr & ctltype, + const std::shared_ptr & ctltype, const std::shared_ptr & type) : jlm::rvsdg::simple_op({ ctltype, type }, { type }) {} @@ -400,9 +400,9 @@ class loop_constant_buffer_op final : public jlm::rvsdg::simple_op create(jlm::rvsdg::output & predicate, jlm::rvsdg::output & value) { auto region = predicate.region(); - auto ctl = std::dynamic_pointer_cast(predicate.Type()); + auto ctl = std::dynamic_pointer_cast(predicate.Type()); if (!ctl) - throw util::error("Predicate needs to be a ctltype."); + throw util::error("Predicate needs to be a control type."); loop_constant_buffer_op op(ctl, value.Type()); return jlm::rvsdg::simple_node::create_normalized(region, op, { &predicate, &value }); } @@ -770,7 +770,7 @@ class loop_node final : public jlm::rvsdg::structural_node predicate() const noexcept { auto result = subregion()->result(0); - JLM_ASSERT(dynamic_cast(&result->type())); + JLM_ASSERT(dynamic_cast(&result->type())); return result; } diff --git a/jlm/llvm/backend/jlm2llvm/type.cpp b/jlm/llvm/backend/jlm2llvm/type.cpp index c995b2e8b..15a8d6f86 100644 --- a/jlm/llvm/backend/jlm2llvm/type.cpp +++ b/jlm/llvm/backend/jlm2llvm/type.cpp @@ -70,7 +70,7 @@ convert(const arraytype & type, context & ctx) } static ::llvm::Type * -convert(const rvsdg::ctltype & type, context & ctx) +convert(const rvsdg::ControlType & type, context & ctx) { if (type.nalternatives() == 2) return ::llvm::Type::getInt1Ty(ctx.llvm_module().getContext()); @@ -143,7 +143,7 @@ convert_type(const rvsdg::Type & type, context & ctx) { typeid(FunctionType), convert }, { typeid(PointerType), convert }, { typeid(arraytype), convert }, - { typeid(rvsdg::ctltype), convert }, + { typeid(rvsdg::ControlType), convert }, { typeid(fptype), convert }, { typeid(StructType), convert }, { typeid(fixedvectortype), convert }, diff --git a/jlm/llvm/backend/jlm2llvm/type.hpp b/jlm/llvm/backend/jlm2llvm/type.hpp index 1baf4354b..36c9a58eb 100644 --- a/jlm/llvm/backend/jlm2llvm/type.hpp +++ b/jlm/llvm/backend/jlm2llvm/type.hpp @@ -62,7 +62,7 @@ convert_type(const arraytype & type, context & ctx) } static inline ::llvm::IntegerType * -convert_type(const rvsdg::ctltype & type, context & ctx) +convert_type(const rvsdg::ControlType & type, context & ctx) { auto t = convert_type(*static_cast(&type), ctx); JLM_ASSERT(t->getTypeID() == ::llvm::Type::IntegerTyID); diff --git a/jlm/llvm/frontend/ControlFlowRestructuring.cpp b/jlm/llvm/frontend/ControlFlowRestructuring.cpp index 74e1204a1..aaf196ab4 100644 --- a/jlm/llvm/frontend/ControlFlowRestructuring.cpp +++ b/jlm/llvm/frontend/ControlFlowRestructuring.cpp @@ -67,7 +67,7 @@ reinsert_tcloop(const tcloop & l) } static const tacvariable * -create_pvariable(basic_block & bb, std::shared_ptr type) +create_pvariable(basic_block & bb, std::shared_ptr type) { static size_t c = 0; auto name = util::strfmt("#p", c++, "#"); @@ -75,7 +75,7 @@ create_pvariable(basic_block & bb, std::shared_ptr type) } static const tacvariable * -create_qvariable(basic_block & bb, std::shared_ptr type) +create_qvariable(basic_block & bb, std::shared_ptr type) { static size_t c = 0; auto name = util::strfmt("#q", c++, "#"); @@ -83,7 +83,7 @@ create_qvariable(basic_block & bb, std::shared_ptr type) } static const tacvariable * -create_tvariable(basic_block & bb, std::shared_ptr type) +create_tvariable(basic_block & bb, std::shared_ptr type) { static size_t c = 0; auto name = util::strfmt("#q", c++, "#"); @@ -96,22 +96,23 @@ create_rvariable(basic_block & bb) static size_t c = 0; auto name = util::strfmt("#r", c++, "#"); - return bb.append_last(UndefValueOperation::Create(rvsdg::ctltype::Create(2), name))->result(0); + return bb.append_last(UndefValueOperation::Create(rvsdg::ControlType::Create(2), name)) + ->result(0); } static inline void append_branch(basic_block * bb, const variable * operand) { - JLM_ASSERT(dynamic_cast(&operand->type())); - auto nalternatives = static_cast(&operand->type())->nalternatives(); + JLM_ASSERT(dynamic_cast(&operand->type())); + auto nalternatives = static_cast(&operand->type())->nalternatives(); bb->append_last(branch_op::create(nalternatives, operand)); } static inline void append_constant(basic_block * bb, const tacvariable * result, size_t value) { - JLM_ASSERT(dynamic_cast(&result->type())); - auto nalternatives = static_cast(&result->type())->nalternatives(); + JLM_ASSERT(dynamic_cast(&result->type())); + auto nalternatives = static_cast(&result->type())->nalternatives(); rvsdg::ctlconstant_op op(rvsdg::ctlvalue_repr(value, nalternatives)); bb->append_last(tac::create(op, {})); @@ -261,14 +262,14 @@ restructure_loops(cfg_node * entry, cfg_node * exit, std::vector & loops if (sccstruct->nenodes() > 1) { auto bb = find_tvariable_bb(entry); - ev = create_tvariable(*bb, rvsdg::ctltype::Create(sccstruct->nenodes())); + ev = create_tvariable(*bb, rvsdg::ControlType::Create(sccstruct->nenodes())); } auto rv = create_rvariable(*new_ne); const tacvariable * xv = nullptr; if (sccstruct->nxnodes() > 1) - xv = create_qvariable(*new_ne, rvsdg::ctltype::Create(sccstruct->nxnodes())); + xv = create_qvariable(*new_ne, rvsdg::ControlType::Create(sccstruct->nxnodes())); append_branch(new_nr, rv); @@ -427,7 +428,7 @@ restructure_branches(cfg_node * entry, cfg_node * exit) } /* insert new continuation point */ - auto p = create_pvariable(hbb, rvsdg::ctltype::Create(c.points.size())); + auto p = create_pvariable(hbb, rvsdg::ControlType::Create(c.points.size())); auto cn = basic_block::create(cfg); append_branch(cn, p); std::unordered_map indices; diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index 55777a9a1..5b1fc042f 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -370,7 +370,7 @@ class ctl2bits_op final : public jlm::rvsdg::simple_op virtual ~ctl2bits_op() noexcept; inline ctl2bits_op( - std::shared_ptr srctype, + std::shared_ptr srctype, std::shared_ptr dsttype) : jlm::rvsdg::simple_op({ std::move(srctype) }, { std::move(dsttype) }) {} @@ -387,7 +387,7 @@ class ctl2bits_op final : public jlm::rvsdg::simple_op static std::unique_ptr create(const variable * operand, const std::shared_ptr & type) { - auto st = std::dynamic_pointer_cast(operand->Type()); + auto st = std::dynamic_pointer_cast(operand->Type()); if (!st) throw jlm::util::error("expected control type."); @@ -407,7 +407,7 @@ class branch_op final : public jlm::rvsdg::simple_op public: virtual ~branch_op() noexcept; - explicit inline branch_op(std::shared_ptr type) + explicit inline branch_op(std::shared_ptr type) : jlm::rvsdg::simple_op({ std::move(type) }, {}) {} @@ -423,13 +423,13 @@ class branch_op final : public jlm::rvsdg::simple_op inline size_t nalternatives() const noexcept { - return std::static_pointer_cast(argument(0))->nalternatives(); + return std::static_pointer_cast(argument(0))->nalternatives(); } static std::unique_ptr create(size_t nalternatives, const variable * operand) { - branch_op op(jlm::rvsdg::ctltype::Create(nalternatives)); + branch_op op(jlm::rvsdg::ControlType::Create(nalternatives)); return tac::create(op, { operand }); } }; diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 2e9f132f8..5b68714ca 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -479,7 +479,7 @@ JlmToMlirConverter::ConvertType(const rvsdg::Type & type) { return Builder_->getType<::mlir::rvsdg::MemStateEdgeType>(); } - else if (auto clt = dynamic_cast(&type)) + else if (auto clt = dynamic_cast(&type)) { return Builder_->getType<::mlir::rvsdg::RVSDG_CTRLType>(clt->nalternatives()); } diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index c49a35873..99aebcde7 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -452,7 +452,7 @@ MlirToJlmConverter::ConvertType(::mlir::Type & type) { if (auto ctrlType = ::mlir::dyn_cast<::mlir::rvsdg::RVSDG_CTRLType>(type)) { - return std::make_unique(ctrlType.getNumOptions()); + return std::make_unique(ctrlType.getNumOptions()); } else if (auto intType = ::mlir::dyn_cast<::mlir::IntegerType>(type)) { diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index 1eedce463..94fe1c720 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -14,48 +14,46 @@ namespace jlm::rvsdg /* control constant */ // explicit instantiation -template class domain_const_op; +template class domain_const_op; -/* control type */ +ControlType::~ControlType() noexcept = default; -ctltype::~ctltype() noexcept -{} - -ctltype::ctltype(size_t nalternatives) +ControlType::ControlType(size_t nalternatives) : StateType(), nalternatives_(nalternatives) {} std::string -ctltype::debug_string() const +ControlType::debug_string() const { return jlm::util::strfmt("ctl(", nalternatives_, ")"); } bool -ctltype::operator==(const jlm::rvsdg::Type & other) const noexcept +ControlType::operator==(const Type & other) const noexcept { - auto type = dynamic_cast(&other); + auto type = dynamic_cast(&other); return type && type->nalternatives_ == nalternatives_; } std::size_t -ctltype::ComputeHash() const noexcept +ControlType::ComputeHash() const noexcept { - auto typeHash = typeid(ctltype).hash_code(); + auto typeHash = typeid(ControlType).hash_code(); auto numAlternativesHash = std::hash()(nalternatives_); return util::CombineHashes(typeHash, numAlternativesHash); } -std::shared_ptr -ctltype::Create(std::size_t nalternatives) +std::shared_ptr +ControlType::Create(std::size_t nalternatives) { - static const ctltype static_instances[4] = { // ctltype(0) is not valid, but put it in here so - // the static array indexing works correctly - ctltype(0), - ctltype(1), - ctltype(2), - ctltype(3) + static const ControlType static_instances[4] = { + // ControlType(0) is not valid, but put it in here so + // the static array indexing works correctly + ControlType(0), + ControlType(1), + ControlType(2), + ControlType(3) }; if (nalternatives < 4) @@ -64,13 +62,13 @@ ctltype::Create(std::size_t nalternatives) { throw jlm::util::error("Alternatives of a control type must be non-zero."); } - return std::shared_ptr( + return std::shared_ptr( std::shared_ptr(), &static_instances[nalternatives]); } else { - return std::make_shared(nalternatives); + return std::make_shared(nalternatives); } } @@ -94,7 +92,7 @@ match_op::match_op( const std::unordered_map & mapping, uint64_t default_alternative, size_t nalternatives) - : jlm::rvsdg::unary_op(bittype::Create(nbits), ctltype::Create(nalternatives)), + : jlm::rvsdg::unary_op(bittype::Create(nbits), ControlType::Create(nalternatives)), default_alternative_(default_alternative), mapping_(mapping) {} diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index 6a9bd4739..7a243b249 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -20,14 +20,12 @@ namespace jlm::rvsdg { -/* control type */ - -class ctltype final : public StateType +class ControlType final : public StateType { public: - virtual ~ctltype() noexcept; + ~ControlType() noexcept override; - ctltype(size_t nalternatives); + explicit ControlType(size_t nalternatives); virtual std::string debug_string() const override; @@ -55,7 +53,7 @@ class ctltype final : public StateType * the specified number of alternatives. The returned instance * will usually be a static singleton for the type. */ - static std::shared_ptr + static std::shared_ptr Create(std::size_t nalternatives); private: @@ -65,7 +63,7 @@ class ctltype final : public StateType static inline bool is_ctltype(const jlm::rvsdg::Type & type) noexcept { - return dynamic_cast(&type) != nullptr; + return dynamic_cast(&type) != nullptr; } /* control value representation */ @@ -108,10 +106,10 @@ class ctlvalue_repr struct ctltype_of_value { - std::shared_ptr + std::shared_ptr operator()(const ctlvalue_repr & repr) const { - return ctltype::Create(repr.nalternatives()); + return ControlType::Create(repr.nalternatives()); } }; @@ -124,7 +122,8 @@ struct ctlformat_value } }; -typedef domain_const_op ctlconstant_op; +typedef domain_const_op + ctlconstant_op; static inline bool is_ctlconstant_op(const jlm::rvsdg::operation & op) noexcept @@ -172,7 +171,7 @@ class match_op final : public jlm::rvsdg::unary_op inline uint64_t nalternatives() const noexcept { - return std::static_pointer_cast(result(0))->nalternatives(); + return std::static_pointer_cast(result(0))->nalternatives(); } inline uint64_t @@ -247,7 +246,11 @@ match( jlm::rvsdg::output * operand); // declare explicit instantiation -extern template class domain_const_op; +extern template class domain_const_op< + ControlType, + ctlvalue_repr, + ctlformat_value, + ctltype_of_value>; static inline const match_op & to_match_op(const jlm::rvsdg::operation & op) noexcept diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 2f79215f5..049380e9b 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -453,8 +453,8 @@ class GammaOutput final : public structural_output inline GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) : structural_node(GammaOperation(nalternatives), predicate->region(), nalternatives) { - node::add_input( - std::unique_ptr(new GammaInput(this, predicate, ctltype::Create(nalternatives)))); + node::add_input(std::unique_ptr( + new GammaInput(this, predicate, ControlType::Create(nalternatives)))); } /** diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 41e62a003..390025904 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -107,7 +107,7 @@ class ThetaNode final : public structural_node predicate() const noexcept { auto result = subregion()->result(0); - JLM_ASSERT(dynamic_cast(&result->type())); + JLM_ASSERT(dynamic_cast(&result->type())); return result; } @@ -407,7 +407,7 @@ class ThetaPredicateResult final : public RegionResult private: explicit ThetaPredicateResult(rvsdg::output & origin) - : RegionResult(origin.region(), &origin, nullptr, ctltype::Create(2)) + : RegionResult(origin.region(), &origin, nullptr, ControlType::Create(2)) { JLM_ASSERT(is(origin.region()->node())); } diff --git a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp index 53d48e77c..93808a2cd 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp @@ -16,8 +16,9 @@ TestDeadLoopNode() // Arrange auto valueType = jlm::tests::valuetype::Create(); - auto functionType = - jlm::llvm::FunctionType::Create({ jlm::rvsdg::ctltype::Create(2), valueType }, { valueType }); + auto functionType = jlm::llvm::FunctionType::Create( + { jlm::rvsdg::ControlType::Create(2), valueType }, + { valueType }); jlm::llvm::RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule.Rvsdg(); @@ -47,8 +48,8 @@ TestDeadLoopNodeOutput() // Arrange auto valueType = jlm::tests::valuetype::Create(); auto functionType = jlm::llvm::FunctionType::Create( - { jlm::rvsdg::ctltype::Create(2), valueType }, - { jlm::rvsdg::ctltype::Create(2) }); + { jlm::rvsdg::ControlType::Create(2), valueType }, + { jlm::rvsdg::ControlType::Create(2) }); jlm::llvm::RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule.Rvsdg(); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index 22c9d6ce2..0d693d4d4 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -55,7 +55,7 @@ TestWithoutMatch() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ft = FunctionType::Create({ jlm::rvsdg::ctltype::Create(2), vt, vt }, { vt }); + auto ft = FunctionType::Create({ jlm::rvsdg::ControlType::Create(2), vt, vt }, { vt }); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 70d134525..0555b039e 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -25,7 +25,7 @@ TestGamma() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto p = &jlm::tests::GraphImport::Create(rvsdg, jlm::rvsdg::ctltype::Create(2), "p"); + auto p = &jlm::tests::GraphImport::Create(rvsdg, jlm::rvsdg::ControlType::Create(2), "p"); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); @@ -79,12 +79,12 @@ TestTheta() // Arrange auto valueType = jlm::tests::valuetype::Create(); auto functionType = FunctionType::Create( - { jlm::rvsdg::ctltype::Create(2), valueType, valueType, valueType }, + { jlm::rvsdg::ControlType::Create(2), valueType, valueType, valueType }, { valueType }); auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto p = &jlm::tests::GraphImport::Create(rvsdg, jlm::rvsdg::ctltype::Create(2), "p"); + auto p = &jlm::tests::GraphImport::Create(rvsdg, jlm::rvsdg::ControlType::Create(2), "p"); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp index 43ece0a7b..c457f4469 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -78,8 +78,9 @@ GammaWithoutMatch() // Arrange auto valueType = valuetype::Create(); - auto functionType = - FunctionType::Create({ jlm::rvsdg::ctltype::Create(2), valueType, valueType }, { valueType }); + auto functionType = FunctionType::Create( + { jlm::rvsdg::ControlType::Create(2), valueType, valueType }, + { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index c2c64fa0b..0710dbeae 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -36,7 +36,7 @@ TestGamma() // Arrange auto valueType = jlm::tests::valuetype::Create(); - auto controlType = jlm::rvsdg::ctltype::Create(2); + auto controlType = jlm::rvsdg::ControlType::Create(2); auto functionType = FunctionType::Create({ controlType, valueType, valueType }, { valueType, valueType }); @@ -88,7 +88,7 @@ TestTheta() auto ioStateType = iostatetype::Create(); auto valueType = jlm::tests::valuetype::Create(); - auto controlType = jlm::rvsdg::ctltype::Create(2); + auto controlType = jlm::rvsdg::ControlType::Create(2); auto functionType = FunctionType::Create( { controlType, valueType, ioStateType }, { controlType, valueType, ioStateType }); @@ -143,7 +143,7 @@ TestCall() auto ioStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); auto valueType = jlm::tests::valuetype::Create(); - auto controlType = jlm::rvsdg::ctltype::Create(2); + auto controlType = jlm::rvsdg::ControlType::Create(2); auto functionTypeTest1 = FunctionType::Create( { controlType, valueType, valueType, ioStateType, memoryStateType }, { valueType, valueType, ioStateType, memoryStateType }); @@ -230,7 +230,7 @@ TestCallWithMemoryStateNodes() auto ioStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); auto valueType = jlm::tests::valuetype::Create(); - auto controlType = jlm::rvsdg::ctltype::Create(2); + auto controlType = jlm::rvsdg::ControlType::Create(2); auto functionTypeTest1 = FunctionType::Create( { controlType, valueType, ioStateType, memoryStateType }, { valueType, ioStateType, memoryStateType }); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 3602280df..52d98bbb4 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -51,7 +51,7 @@ TestGamma() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -90,7 +90,7 @@ TestGamma2() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -120,7 +120,7 @@ TestTheta() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -163,7 +163,7 @@ TestNestedTheta() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -207,7 +207,7 @@ TestEvolvingTheta() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index a3b09cc20..363023a01 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -69,7 +69,7 @@ test_gamma() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -135,7 +135,7 @@ test_theta() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -188,7 +188,7 @@ test_theta2() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -232,7 +232,7 @@ test_theta3() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -291,7 +291,7 @@ test_theta4() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -344,7 +344,7 @@ test_theta5() using namespace jlm::llvm; auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index dfb95f23c..37b657b4d 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -50,9 +50,12 @@ test1() auto vt = jlm::tests::valuetype::Create(); auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); auto functionType = FunctionType::Create( - { jlm::rvsdg::ctltype::Create(2), vt, iostatetype::Create(), MemoryStateType::Create() }, + { jlm::rvsdg::ControlType::Create(2), + vt, + iostatetype::Create(), + MemoryStateType::Create() }, { vt, iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index ad2fd468d..3eb5b4326 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -22,7 +22,7 @@ test_pullin_top() { using namespace jlm::llvm; - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); jlm::tests::test_op uop({ vt }, { vt }); jlm::tests::test_op bop({ vt, vt }, { vt }); jlm::tests::test_op cop({ ct, vt }, { ct }); @@ -60,7 +60,7 @@ static inline void test_pullin_bottom() { auto vt = jlm::tests::valuetype::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); jlm::rvsdg::graph graph; auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -93,7 +93,7 @@ test_pull() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto p = &jlm::tests::GraphImport::Create(graph, jlm::rvsdg::ctltype::Create(2), ""); + auto p = &jlm::tests::GraphImport::Create(graph, jlm::rvsdg::ControlType::Create(2), ""); auto croot = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 191b42aa5..8a2f4aa18 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -25,7 +25,7 @@ test_gamma() { using namespace jlm::llvm; - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); @@ -59,7 +59,7 @@ test_theta() { using namespace jlm::llvm; - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); jlm::tests::test_op nop({}, { vt }); jlm::tests::test_op bop({ vt, vt }, { vt }); @@ -109,7 +109,7 @@ test_push_theta_bottom() auto mt = MemoryStateType::Create(); auto pt = PointerType::Create(); - auto ct = jlm::rvsdg::ctltype::Create(2); + auto ct = jlm::rvsdg::ControlType::Create(2); jlm::rvsdg::graph graph; auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 27413f03e..93ec90f8d 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -394,7 +394,7 @@ TestMatch() std::cout << "Function Setup" << std::endl; auto functionType = FunctionType::Create( { iostatetype::Create(), MemoryStateType::Create() }, - { jlm::rvsdg::ctltype::Create(2), iostatetype::Create(), MemoryStateType::Create() }); + { jlm::rvsdg::ControlType::Create(2), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 2f58ccf9c..1202bec05 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -21,7 +21,7 @@ test_gamma(void) auto v0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); auto v1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); auto v2 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); - auto v3 = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), ""); + auto v3 = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), ""); auto pred = match(2, { { 0, 0 }, { 1, 1 } }, 2, 3, cmp); @@ -90,7 +90,7 @@ test_invariant_reduction(void) jlm::rvsdg::graph graph; GammaOperation::normal_form(&graph)->set_invariant_reduction(true); - auto pred = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), ""); + auto pred = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), ""); auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); auto gamma = GammaNode::create(pred, 2); @@ -184,9 +184,9 @@ TestRemoveGammaOutputsWhere() // Arrange jlm::rvsdg::graph rvsdg; auto vt = jlm::tests::valuetype::Create(); - ctltype ct(2); + ControlType ct(2); - auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), ""); + auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), ""); auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v2 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); @@ -246,9 +246,9 @@ TestPruneOutputs() // Arrange jlm::rvsdg::graph rvsdg; auto vt = jlm::tests::valuetype::Create(); - ctltype ct(2); + ControlType ct(2); - auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), ""); + auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), ""); auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v2 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); @@ -295,9 +295,9 @@ TestIsInvariant() // Arrange jlm::rvsdg::graph rvsdg; auto vt = jlm::tests::valuetype::Create(); - ctltype ct(2); + ControlType ct(2); - auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), ""); + auto predicate = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), ""); auto v0 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index f5620301f..8e50f3fed 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -18,7 +18,7 @@ TestThetaCreation() jlm::rvsdg::graph graph; auto t = jlm::tests::valuetype::Create(); - auto imp1 = &jlm::tests::GraphImport::Create(graph, ctltype::Create(2), "imp1"); + auto imp1 = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), "imp1"); auto imp2 = &jlm::tests::GraphImport::Create(graph, t, "imp2"); auto imp3 = &jlm::tests::GraphImport::Create(graph, t, "imp3"); @@ -57,7 +57,7 @@ TestRemoveThetaOutputsWhere() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); @@ -107,7 +107,7 @@ TestPruneThetaOutputs() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); @@ -142,7 +142,7 @@ TestRemoveThetaInputsWhere() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); @@ -198,7 +198,7 @@ TestPruneThetaInputs() graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ctltype::Create(2), "ctl"); + auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); From 5e91210f835441c5df804cd8fcf98323b4d99e04 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sat, 5 Oct 2024 10:48:17 +0200 Subject: [PATCH 100/170] Generalize the memory response and request nodes to handle multiple memory ports (#634) Provides support for memories with multiple read and/or write ports, e.g., BRAMs in AMD FPGAs. --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 279 ++++++++++-------- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 12 + 2 files changed, 169 insertions(+), 122 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 2e719b4ec..8f9f55703 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -664,16 +664,8 @@ RhlsToFirrtlConverter::MlirGenHlsMemResp(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node, false); auto body = module.getBodyBlock(); - mlir::BlockArgument memRes = GetInPort(module, 0); - auto memResValid = GetSubfield(body, memRes, "valid"); - auto memResReady = GetSubfield(body, memRes, "ready"); - auto memResBundle = GetSubfield(body, memRes, "data"); - auto memResId = GetSubfield(body, memResBundle, "id"); - auto memResData = GetSubfield(body, memResBundle, "data"); - auto zeroBitValue = GetConstant(body, 1, 0); auto oneBitValue = GetConstant(body, 1, 1); - Connect(body, memResReady, zeroBitValue); for (size_t i = 0; i < node->noutputs(); ++i) { @@ -681,33 +673,68 @@ RhlsToFirrtlConverter::MlirGenHlsMemResp(const jlm::rvsdg::simple_node * node) auto outValid = GetSubfield(body, outBundle, "valid"); auto outData = GetSubfield(body, outBundle, "data"); Connect(body, outValid, zeroBitValue); - - int nbits = JlmSize(&node->output(i)->type()); - if (nbits == 64) + ConnectInvalid(body, outData); + } + for (size_t j = 0; j < node->ninputs(); ++j) + { + mlir::BlockArgument memRes = GetInPort(module, j); + auto memResValid = GetSubfield(body, memRes, "valid"); + auto memResReady = GetSubfield(body, memRes, "ready"); + auto memResBundle = GetSubfield(body, memRes, "data"); + auto memResId = GetSubfield(body, memResBundle, "id"); + auto memResData = GetSubfield(body, memResBundle, "data"); + auto elseBody = body; + for (size_t i = 0; i < node->noutputs(); ++i) { - Connect(body, outData, memResData); + auto outBundle = GetOutPort(module, i); + auto outValid = GetSubfield(elseBody, outBundle, "valid"); + auto outReady = GetSubfield(elseBody, outBundle, "ready"); + auto outData = GetSubfield(elseBody, outBundle, "data"); + auto condition = + AddAndOp(elseBody, memResValid, AddEqOp(elseBody, GetConstant(elseBody, 8, i), memResId)); + auto whenOp = AddWhenOp(elseBody, condition, true); + auto thenBody = whenOp.getThenBodyBuilder().getBlock(); + Connect(thenBody, outValid, oneBitValue); + Connect(thenBody, memResReady, outReady); + int nbits = JlmSize(&node->output(i)->type()); + if (nbits == 64) + { + Connect(thenBody, outData, memResData); + } + else + { + Connect(thenBody, outData, AddBitsOp(thenBody, memResData, nbits - 1, 0)); + } + elseBody = whenOp.getElseBodyBuilder().getBlock(); } - else + + // Connect to ready for other ids - for example stores + Connect(elseBody, memResReady, oneBitValue); + // Assert we don't get a response to the same ID on several in ports - if this shows up we need + // taken logic for outputs + for (size_t i = 0; i < j; ++i) { - Connect(body, outData, AddBitsOp(body, memResData, nbits - 1, 0)); + mlir::BlockArgument memRes2 = GetInPort(module, i); + auto memResValid2 = GetSubfield(body, memRes2, "valid"); + auto memResBundle2 = GetSubfield(body, memRes2, "data"); + auto memResId2 = GetSubfield(body, memResBundle2, "id"); + auto id_assert = Builder_->create( + Builder_->getUnknownLoc(), + GetClockSignal(module), + AddNotOp( + body, + AddAndOp( + body, + AddAndOp(body, memResValid, memResValid2), + AddEqOp(body, memResId, memResId2))), + AddNotOp(body, GetResetSignal(module)), + "overlapping reponse id", + mlir::ValueRange(), + "response_id_assert_" + std::to_string(j) + "_" + std::to_string(i)); + body->push_back(id_assert); } } - auto elseBody = body; - for (size_t i = 0; i < node->noutputs(); ++i) - { - auto outBundle = GetOutPort(module, i); - auto outValid = GetSubfield(elseBody, outBundle, "valid"); - auto outReady = GetSubfield(elseBody, outBundle, "ready"); - auto condition = AddEqOp(elseBody, GetConstant(elseBody, 8, i), memResId); - auto whenOp = AddWhenOp(elseBody, condition, true); - auto thenBody = whenOp.getThenBodyBuilder().getBlock(); - Connect(thenBody, outValid, memResValid); - Connect(thenBody, memResReady, outReady); - elseBody = whenOp.getElseBodyBuilder().getBlock(); - } - // connect to ready for other ids - for example stores - Connect(elseBody, memResReady, oneBitValue); return module; } @@ -719,25 +746,6 @@ RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) auto body = module.getBodyBlock(); auto op = dynamic_cast(&node->operation()); - auto reqType = dynamic_cast(&node->output(0)->type()); - // TODO: more robust check - auto hasWrite = reqType->elements_.size() == 5; - - mlir::BlockArgument memReq = GetOutPort(module, 0); - mlir::Value memReqData; - mlir::Value memReqWrite; - auto memReqReady = GetSubfield(body, memReq, "ready"); - auto memReqValid = GetSubfield(body, memReq, "valid"); - auto memReqBundle = GetSubfield(body, memReq, "data"); - auto memReqAddr = GetSubfield(body, memReqBundle, "addr"); - auto memReqSize = GetSubfield(body, memReqBundle, "size"); - auto memReqId = GetSubfield(body, memReqBundle, "id"); - if (hasWrite) - { - memReqData = GetSubfield(body, memReqBundle, "data"); - memReqWrite = GetSubfield(body, memReqBundle, "write"); - } - auto loadTypes = op->GetLoadTypes(); ::llvm::SmallVector loadAddrReadys; ::llvm::SmallVector loadAddrValids; @@ -752,12 +760,11 @@ RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) ::llvm::SmallVector storeDataValids; ::llvm::SmallVector storeDataDatas; ::llvm::SmallVector storeIds; - // the ports for loads come first and consist only of addresses. Stores have both addresses and - // data + // The ports for loads come first and consist only of addresses. + // Stores have both addresses and data size_t id = 0; for (size_t i = 0; i < op->get_nloads(); ++i) { - // loadAddrTypes.push_back(&node->input(i)->type()); auto bundle = GetInPort(module, i); loadAddrReadys.push_back(GetSubfield(body, bundle, "ready")); loadAddrValids.push_back(GetSubfield(body, bundle, "valid")); @@ -768,7 +775,6 @@ RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) for (size_t i = op->get_nloads(); i < node->ninputs(); ++i) { // Store - // storeAddrTypes.push_back(&node->input(i)->type()); auto addrBundle = GetInPort(module, i); storeAddrReadys.push_back(GetSubfield(body, addrBundle, "ready")); storeAddrValids.push_back(GetSubfield(body, addrBundle, "valid")); @@ -784,84 +790,113 @@ RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) auto zeroBitValue = GetConstant(body, 1, 0); auto oneBitValue = GetConstant(body, 1, 1); - // default req connection - Connect(body, memReqValid, zeroBitValue); - ConnectInvalid(body, memReqBundle); - mlir::Value previousGranted = GetConstant(body, 1, 0); - for (size_t i = 0; i < loadTypes->size(); ++i) - { - Connect(body, loadAddrReadys[i], zeroBitValue); - auto notOp = AddNotOp(body, previousGranted); - auto condition = AddAndOp(body, notOp, loadAddrValids[i]); - auto whenOp = AddWhenOp(body, condition, false); - auto thenBody = whenOp.getThenBodyBuilder().getBlock(); - Connect(thenBody, loadAddrReadys[i], memReqReady); - Connect(thenBody, memReqValid, loadAddrValids[i]); - Connect(thenBody, memReqAddr, loadAddrDatas[i]); - Connect(thenBody, memReqId, loadIds[i]); - // no data or write - int bitWidth; - auto loadType = loadTypes->at(i).get(); - if (auto bitType = dynamic_cast(loadType)) - { - bitWidth = bitType->nbits(); - } - else if (dynamic_cast(loadType)) - { - bitWidth = 64; - } - else - { - throw jlm::util::error("unknown width for mem request"); - } - int log2Bytes = log2(bitWidth / 8); - Connect(thenBody, memReqSize, GetConstant(thenBody, 3, log2Bytes)); + ::llvm::SmallVector loadGranted(loadTypes->size(), zeroBitValue); + ::llvm::SmallVector storeGranted(storeTypes->size(), zeroBitValue); + for (size_t j = 0; j < node->noutputs(); ++j) + { + auto reqType = util::AssertedCast(&node->output(j)->type()); + auto hasWrite = reqType->elements_.size() == 5; + mlir::BlockArgument memReq = GetOutPort(module, j); + mlir::Value memReqData; + mlir::Value memReqWrite; + auto memReqReady = GetSubfield(body, memReq, "ready"); + auto memReqValid = GetSubfield(body, memReq, "valid"); + auto memReqBundle = GetSubfield(body, memReq, "data"); + auto memReqAddr = GetSubfield(body, memReqBundle, "addr"); + auto memReqSize = GetSubfield(body, memReqBundle, "size"); + auto memReqId = GetSubfield(body, memReqBundle, "id"); if (hasWrite) { - Connect(thenBody, memReqWrite, zeroBitValue); + memReqData = GetSubfield(body, memReqBundle, "data"); + memReqWrite = GetSubfield(body, memReqBundle, "write"); } - // update for next iteration - previousGranted = AddOrOp(body, previousGranted, loadAddrValids[i]); - } - // stores - for (size_t i = 0; i < storeTypes->size(); ++i) - { - Connect(body, storeAddrReadys[i], zeroBitValue); - Connect(body, storeDataReadys[i], zeroBitValue); - auto notOp = AddNotOp(body, previousGranted); - auto condition = AddAndOp(body, notOp, storeAddrValids[i]); - condition = AddAndOp(body, condition, storeDataValids[i]); - auto whenOp = AddWhenOp(body, condition, false); - auto thenBody = whenOp.getThenBodyBuilder().getBlock(); - Connect(thenBody, storeAddrReadys[i], memReqReady); - Connect(thenBody, storeDataReadys[i], memReqReady); - Connect(thenBody, memReqValid, storeAddrValids[i]); - Connect(thenBody, memReqAddr, storeAddrDatas[i]); - // TODO: pad - Connect(thenBody, memReqData, storeDataDatas[i]); - Connect(thenBody, memReqId, storeIds[i]); - // no data or write - int bitWidth; - auto storeType = storeTypes->at(i).get(); - if (auto bitType = dynamic_cast(storeType)) + // Default request connection + Connect(body, memReqValid, zeroBitValue); + ConnectInvalid(body, memReqBundle); + mlir::Value previousGranted = zeroBitValue; + for (size_t i = 0; i < loadTypes->size(); ++i) { - bitWidth = bitType->nbits(); - } - else if (dynamic_cast(storeType)) - { - bitWidth = 64; + if (j == 0) + { + Connect(body, loadAddrReadys[i], zeroBitValue); + } + auto canGrant = AddNotOp(body, AddOrOp(body, previousGranted, loadGranted[i])); + auto grant = AddAndOp(body, canGrant, loadAddrValids[i]); + auto whenOp = AddWhenOp(body, grant, false); + auto thenBody = whenOp.getThenBodyBuilder().getBlock(); + Connect(thenBody, loadAddrReadys[i], memReqReady); + Connect(thenBody, memReqValid, loadAddrValids[i]); + Connect(thenBody, memReqAddr, loadAddrDatas[i]); + Connect(thenBody, memReqId, loadIds[i]); + // No data or write + int bitWidth; + auto loadType = loadTypes->at(i).get(); + if (auto bitType = dynamic_cast(loadType)) + { + bitWidth = bitType->nbits(); + } + else if (dynamic_cast(loadType)) + { + bitWidth = 64; + } + else + { + throw jlm::util::error("unknown width for mem request"); + } + int log2Bytes = log2(bitWidth / 8); + Connect(thenBody, memReqSize, GetConstant(thenBody, 3, log2Bytes)); + if (hasWrite) + { + Connect(thenBody, memReqWrite, zeroBitValue); + } + // Update for next iteration + previousGranted = AddOrOp(body, previousGranted, grant); + loadGranted[i] = AddOrOp(body, loadGranted[i], grant); } - else + // Stores + for (size_t i = 0; hasWrite && i < storeTypes->size(); ++i) { - throw jlm::util::error("unknown width for mem request"); + if (j == 0) + { + Connect(body, storeAddrReadys[i], zeroBitValue); + Connect(body, storeDataReadys[i], zeroBitValue); + } + auto notOp = AddNotOp(body, AddOrOp(body, previousGranted, storeGranted[i])); + auto grant = AddAndOp(body, notOp, storeAddrValids[i]); + grant = AddAndOp(body, grant, storeDataValids[i]); + auto whenOp = AddWhenOp(body, grant, false); + auto thenBody = whenOp.getThenBodyBuilder().getBlock(); + Connect(thenBody, storeAddrReadys[i], memReqReady); + Connect(thenBody, storeDataReadys[i], memReqReady); + Connect(thenBody, memReqValid, storeAddrValids[i]); + Connect(thenBody, memReqAddr, storeAddrDatas[i]); + // TODO: pad + Connect(thenBody, memReqData, storeDataDatas[i]); + Connect(thenBody, memReqId, storeIds[i]); + // No data or write + int bitWidth; + auto storeType = storeTypes->at(i).get(); + if (auto bitType = dynamic_cast(storeType)) + { + bitWidth = bitType->nbits(); + } + else if (dynamic_cast(storeType)) + { + bitWidth = 64; + } + else + { + throw jlm::util::error("unknown width for mem request"); + } + int log2Bytes = log2(bitWidth / 8); + Connect(thenBody, memReqSize, GetConstant(thenBody, 3, log2Bytes)); + Connect(thenBody, memReqWrite, oneBitValue); + // Update for next iteration + previousGranted = AddOrOp(body, previousGranted, grant); + storeGranted[i] = AddOrOp(body, storeGranted[i], grant); } - int log2Bytes = log2(bitWidth / 8); - Connect(thenBody, memReqSize, GetConstant(thenBody, 3, log2Bytes)); - Connect(thenBody, memReqWrite, oneBitValue); - // update for next iteration - previousGranted = AddOrOp(body, previousGranted, condition); } - // WriteModuleToFile(module, node); + return module; } diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 237739bff..fee8ea292 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -121,8 +121,20 @@ class RhlsToFirrtlConverter : public BaseHLS MlirGenStateGate(const jlm::rvsdg::simple_node * node); circt::firrtl::FModuleOp MlirGenMem(const jlm::rvsdg::simple_node * node); + /** + * Generate a FIRRTL module for a HLS memory response node that implements the functionality for + * retreiving memory responses. + * @param node The HLS memory response node. + * @return The generated FIRRTL module. + */ circt::firrtl::FModuleOp MlirGenHlsMemResp(const jlm::rvsdg::simple_node * node); + /** + * Generate a FIRRTL module for a HLS memory request node that implements the functionality for + * performing memory requests. + * @param node The HLS memory request node. + * @return The generated FIRRTL module. + */ circt::firrtl::FModuleOp MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node); circt::firrtl::FModuleOp From 198967ade3c9476457574d798e6ab18bc92601ed Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 6 Oct 2024 11:31:11 +0200 Subject: [PATCH 101/170] Remove static output_node::node() method (#648) The method was superseded by `output::GetNode()`. --- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 2 +- jlm/hls/ir/hls.cpp | 2 +- jlm/hls/opt/cne.cpp | 4 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 12 +- jlm/llvm/ir/operators/Load.cpp | 34 +- jlm/llvm/ir/operators/Store.cpp | 18 +- jlm/llvm/ir/operators/call.cpp | 2 +- jlm/llvm/ir/operators/call.hpp | 2 +- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/ir/operators/operators.cpp | 2 +- jlm/llvm/ir/operators/sext.cpp | 12 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/alias-analyses/PointsToGraph.cpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 8 +- jlm/llvm/opt/cne.cpp | 4 +- jlm/llvm/opt/inversion.cpp | 4 +- jlm/llvm/opt/pull.cpp | 10 +- jlm/llvm/opt/push.cpp | 4 +- jlm/llvm/opt/unroll.cpp | 16 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 56 +- jlm/rvsdg/bitstring/concat.cpp | 12 +- jlm/rvsdg/bitstring/slice.cpp | 2 +- jlm/rvsdg/gamma.cpp | 6 +- jlm/rvsdg/graph.cpp | 2 +- jlm/rvsdg/node.cpp | 12 +- jlm/rvsdg/reduction-helpers.hpp | 2 +- jlm/rvsdg/statemux.cpp | 6 +- jlm/rvsdg/theta.hpp | 2 +- jlm/rvsdg/traverser.cpp | 10 +- tests/TestRvsdgs.cpp | 133 +++-- .../llvm/ThreeAddressCodeConversionTests.cpp | 4 +- tests/jlm/llvm/ir/operators/LoadTests.cpp | 16 +- tests/jlm/llvm/ir/operators/StoreTests.cpp | 14 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 14 +- tests/jlm/llvm/ir/operators/test-sext.cpp | 4 +- tests/jlm/llvm/opt/TestLoadMuxReduction.cpp | 8 +- tests/jlm/llvm/opt/TestLoadStoreReduction.cpp | 4 +- .../alias-analyses/TestMemoryStateEncoder.cpp | 533 +++++++++--------- tests/jlm/llvm/opt/test-cne.cpp | 8 +- tests/jlm/llvm/opt/test-inlining.cpp | 2 +- tests/jlm/llvm/opt/test-inversion.cpp | 8 +- tests/jlm/llvm/opt/test-pull.cpp | 2 +- tests/jlm/llvm/opt/test-push.cpp | 6 +- tests/jlm/llvm/opt/test-unroll.cpp | 4 +- tests/jlm/rvsdg/bitstring/bitstring.cpp | 134 ++--- tests/jlm/rvsdg/test-binary.cpp | 12 +- tests/jlm/rvsdg/test-gamma.cpp | 6 +- tests/jlm/rvsdg/test-statemux.cpp | 4 +- 48 files changed, 583 insertions(+), 585 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 45cc53947..f2fd3d2f1 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -276,7 +276,7 @@ rename_delta(llvm::delta::node * odn) odn->output()->divert_users(data); jlm::rvsdg::remove(odn); - return static_cast(jlm::rvsdg::node_output::node(data)); + return static_cast(jlm::rvsdg::output::GetNode(*data)); } llvm::lambda::node * diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 8ba4b8d27..f4116c8a2 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -185,7 +185,7 @@ loop_node::create(rvsdg::Region * parent, bool init) void loop_node::set_predicate(jlm::rvsdg::output * p) { - auto node = jlm::rvsdg::node_output::node(predicate()->origin()); + auto node = jlm::rvsdg::output::GetNode(*predicate()->origin()); predicate()->origin()->divert_users(p); if (node && !node->has_users()) remove(node); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 96ca5bd18..c63ca01cc 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -198,8 +198,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return congruent(output1, output2, vs, ctx); } - auto n1 = jlm::rvsdg::node_output::node(o1); - auto n2 = jlm::rvsdg::node_output::node(o2); + auto n1 = jlm::rvsdg::output::GetNode(*o1); + auto n2 = jlm::rvsdg::output::GetNode(*o2); if (is(n1) && is(n2) && n1 == n2) { auto so1 = static_cast(o1); diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index f3ff729f9..edff1dfc3 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -202,7 +202,7 @@ convert_empty_gamma_node(const rvsdg::GammaNode * gamma, context & ctx) continue; } - auto matchnode = rvsdg::node_output::node(predicate); + auto matchnode = rvsdg::output::GetNode(*predicate); if (is(matchnode)) { auto matchop = static_cast(&matchnode->operation()); @@ -282,7 +282,7 @@ convert_gamma_node(const rvsdg::node & node, context & ctx) auto output = gamma->output(n); bool invariant = true; - auto matchnode = rvsdg::node_output::node(predicate); + auto matchnode = rvsdg::output::GetNode(*predicate); bool select = (gamma->nsubregions() == 2) && is(matchnode); std::vector> arguments; for (size_t r = 0; r < gamma->nsubregions(); r++) @@ -292,7 +292,7 @@ convert_gamma_node(const rvsdg::node & node, context & ctx) auto v = ctx.variable(origin); arguments.push_back(std::make_pair(v, phi_nodes[r])); invariant &= (v == ctx.variable(gamma->subregion(0)->result(n)->origin())); - auto tmp = rvsdg::node_output::node(origin); + auto tmp = rvsdg::output::GetNode(*origin); select &= (tmp == nullptr && origin->region()->node() == &node); } @@ -306,7 +306,7 @@ convert_gamma_node(const rvsdg::node & node, context & ctx) if (select) { /* use select instead of phi */ - auto matchnode = rvsdg::node_output::node(predicate); + auto matchnode = rvsdg::output::GetNode(*predicate); auto matchop = static_cast(&matchnode->operation()); auto d = matchop->default_alternative(); auto c = ctx.variable(matchnode->input(0)->origin()); @@ -442,7 +442,7 @@ convert_phi_node(const rvsdg::node & node, context & ctx) for (size_t n = 0; n < subregion->nresults(); n++) { JLM_ASSERT(subregion->argument(n)->input() == nullptr); - auto node = rvsdg::node_output::node(subregion->result(n)->origin()); + auto node = rvsdg::output::GetNode(*subregion->result(n)->origin()); if (auto lambda = dynamic_cast(node)) { @@ -469,7 +469,7 @@ convert_phi_node(const rvsdg::node & node, context & ctx) { JLM_ASSERT(subregion->argument(n)->input() == nullptr); auto result = subregion->result(n); - auto node = rvsdg::node_output::node(result->origin()); + auto node = rvsdg::output::GetNode(*result->origin()); if (auto lambda = dynamic_cast(node)) { diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 8e45921ce..471e1a3de 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -185,7 +185,7 @@ is_load_mux_reducible(const std::vector & operands) if (operands.size() != 2) return false; - auto memStateMergeNode = rvsdg::node_output::node(operands[1]); + auto memStateMergeNode = rvsdg::output::GetNode(*operands[1]); if (!is(memStateMergeNode)) return false; @@ -209,13 +209,13 @@ is_load_alloca_reducible(const std::vector & operands) { auto address = operands[0]; - auto allocanode = rvsdg::node_output::node(address); + auto allocanode = rvsdg::output::GetNode(*address); if (!is(allocanode)) return false; for (size_t n = 1; n < operands.size(); n++) { - auto node = rvsdg::node_output::node(operands[n]); + auto node = rvsdg::output::GetNode(*operands[n]); if (is(node) && node != allocanode) return true; } @@ -226,10 +226,10 @@ is_load_alloca_reducible(const std::vector & operands) static bool is_reducible_state(const rvsdg::output * state, const rvsdg::node * loadalloca) { - if (is(rvsdg::node_output::node(state))) + if (is(rvsdg::output::GetNode(*state))) { - auto storenode = rvsdg::node_output::node(state); - auto addressnode = rvsdg::node_output::node(storenode->input(0)->origin()); + auto storenode = rvsdg::output::GetNode(*state); + auto addressnode = rvsdg::output::GetNode(*storenode->input(0)->origin()); if (is(addressnode) && addressnode != loadalloca) return true; } @@ -257,7 +257,7 @@ is_load_store_state_reducible( if (operands.size() == 2) return false; - auto allocanode = rvsdg::node_output::node(address); + auto allocanode = rvsdg::output::GetNode(*address); if (!is(allocanode)) return false; @@ -303,7 +303,7 @@ is_load_store_reducible( // Check that the first state edge originates from a store auto firstState = operands[1]; - auto storeNode = dynamic_cast(rvsdg::node_output::node(firstState)); + auto storeNode = dynamic_cast(rvsdg::output::GetNode(*firstState)); if (!storeNode) { return false; @@ -317,7 +317,7 @@ is_load_store_reducible( for (size_t n = 1; n < operands.size(); n++) { auto state = operands[n]; - auto node = rvsdg::node_output::node(state); + auto node = rvsdg::output::GetNode(*state); if (node != storeNode) { return false; @@ -353,7 +353,7 @@ perform_load_store_reduction( const LoadNonVolatileOperation & op, const std::vector & operands) { - auto storenode = rvsdg::node_output::node(operands[1]); + auto storenode = rvsdg::output::GetNode(*operands[1]); std::vector results(1, storenode->input(1)->origin()); results.insert(results.end(), std::next(operands.begin()), operands.end()); @@ -366,7 +366,7 @@ perform_load_mux_reduction( const LoadNonVolatileOperation & op, const std::vector & operands) { - auto memStateMergeNode = rvsdg::node_output::node(operands[1]); + auto memStateMergeNode = rvsdg::output::GetNode(*operands[1]); auto ld = LoadNonVolatileNode::Create( operands[0], @@ -385,13 +385,13 @@ perform_load_alloca_reduction( const LoadNonVolatileOperation & op, const std::vector & operands) { - auto allocanode = rvsdg::node_output::node(operands[0]); + auto allocanode = rvsdg::output::GetNode(*operands[0]); std::vector loadstates; std::vector otherstates; for (size_t n = 1; n < operands.size(); n++) { - auto node = rvsdg::node_output::node(operands[n]); + auto node = rvsdg::output::GetNode(*operands[n]); if (!is(node) || node == allocanode) loadstates.push_back(operands[n]); else @@ -413,7 +413,7 @@ perform_load_store_state_reduction( const std::vector & operands) { auto address = operands[0]; - auto allocanode = rvsdg::node_output::node(address); + auto allocanode = rvsdg::output::GetNode(*address); std::vector new_loadstates; std::vector results(operands.size(), nullptr); @@ -493,7 +493,7 @@ is_load_load_state_reducible(const std::vector & operands) for (size_t n = 1; n < operands.size(); n++) { - if (is(rvsdg::node_output::node(operands[n]))) + if (is(rvsdg::output::GetNode(*operands[n]))) return true; } @@ -509,7 +509,7 @@ perform_load_load_state_reduction( auto load_state_input = [](rvsdg::output * result) { - auto ld = rvsdg::node_output::node(result); + auto ld = rvsdg::output::GetNode(*result); JLM_ASSERT(is(ld)); /* @@ -531,7 +531,7 @@ perform_load_load_state_reduction( { JLM_ASSERT(rvsdg::is(operand->type())); - if (!is(rvsdg::node_output::node(operand))) + if (!is(rvsdg::output::GetNode(*operand))) return operand; mxstates[index].push_back(operand); diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index eb3d73ef5..6d7acdd4e 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -167,13 +167,13 @@ is_store_mux_reducible(const std::vector & operands) { JLM_ASSERT(operands.size() > 2); - auto memStateMergeNode = jlm::rvsdg::node_output::node(operands[2]); + auto memStateMergeNode = jlm::rvsdg::output::GetNode(*operands[2]); if (!is(memStateMergeNode)) return false; for (size_t n = 2; n < operands.size(); n++) { - auto node = jlm::rvsdg::node_output::node(operands[n]); + auto node = jlm::rvsdg::output::GetNode(*operands[n]); if (node != memStateMergeNode) return false; } @@ -188,7 +188,7 @@ is_store_store_reducible( { JLM_ASSERT(operands.size() > 2); - auto storenode = jlm::rvsdg::node_output::node(operands[2]); + auto storenode = jlm::rvsdg::output::GetNode(*operands[2]); if (!is(storenode)) return false; @@ -201,7 +201,7 @@ is_store_store_reducible( for (size_t n = 2; n < operands.size(); n++) { - if (jlm::rvsdg::node_output::node(operands[n]) != storenode || operands[n]->nusers() != 1) + if (jlm::rvsdg::output::GetNode(*operands[n]) != storenode || operands[n]->nusers() != 1) return false; } @@ -216,7 +216,7 @@ is_store_alloca_reducible(const std::vector & operands) if (operands.size() == 3) return false; - auto alloca = jlm::rvsdg::node_output::node(operands[0]); + auto alloca = jlm::rvsdg::output::GetNode(*operands[0]); if (!alloca || !is(alloca->operation())) return false; @@ -246,7 +246,7 @@ perform_store_mux_reduction( const StoreNonVolatileOperation & op, const std::vector & operands) { - auto memStateMergeNode = jlm::rvsdg::node_output::node(operands[2]); + auto memStateMergeNode = jlm::rvsdg::output::GetNode(*operands[2]); auto memStateMergeOperands = jlm::rvsdg::operands(memStateMergeNode); auto states = StoreNonVolatileNode::Create( @@ -263,7 +263,7 @@ perform_store_store_reduction( const std::vector & operands) { JLM_ASSERT(is_store_store_reducible(op, operands)); - auto storenode = jlm::rvsdg::node_output::node(operands[2]); + auto storenode = jlm::rvsdg::output::GetNode(*operands[2]); auto storeops = jlm::rvsdg::operands(storenode); std::vector states(std::next(std::next(storeops.begin())), storeops.end()); @@ -277,7 +277,7 @@ perform_store_alloca_reduction( { auto value = operands[1]; auto address = operands[0]; - auto alloca_state = jlm::rvsdg::node_output::node(address)->output(1); + auto alloca_state = jlm::rvsdg::output::GetNode(*address)->output(1); std::unordered_set states( std::next(std::next(operands.begin())), operands.end()); @@ -358,7 +358,7 @@ store_normal_form::normalize_node(jlm::rvsdg::node * node) const if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) { auto outputs = perform_multiple_origin_reduction(*op, operands); - auto new_node = jlm::rvsdg::node_output::node(outputs[0]); + auto new_node = jlm::rvsdg::output::GetNode(*outputs[0]); std::unordered_map origin2output; for (size_t n = 0; n < outputs.size(); n++) diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 636be3534..d0d995dae 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -166,7 +166,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) if (is(origin)) return origin; - if (is(rvsdg::node_output::node(origin))) + if (is(rvsdg::output::GetNode(*origin))) return origin; if (is(origin)) diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index cce8410e6..32b4ffc90 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -388,7 +388,7 @@ class CallNode final : public jlm::rvsdg::simple_node [[nodiscard]] static rvsdg::simple_node * GetMemoryStateEntryMerge(const CallNode & callNode) noexcept { - auto node = rvsdg::node_output::node(callNode.GetMemoryStateInput()->origin()); + auto node = rvsdg::output::GetNode(*callNode.GetMemoryStateInput()->origin()); return is(node) ? dynamic_cast(node) : nullptr; } diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index b0443ad06..00f98479e 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -171,7 +171,7 @@ node::GetMemoryStateExitMerge(const lambda::node & lambdaNode) noexcept { auto & result = lambdaNode.GetMemoryStateRegionResult(); - auto node = rvsdg::node_output::node(result.origin()); + auto node = rvsdg::output::GetNode(*result.origin()); return is(node) ? dynamic_cast(node) : nullptr; } diff --git a/jlm/llvm/ir/operators/operators.cpp b/jlm/llvm/ir/operators/operators.cpp index 428f162cd..5c341b216 100644 --- a/jlm/llvm/ir/operators/operators.cpp +++ b/jlm/llvm/ir/operators/operators.cpp @@ -439,7 +439,7 @@ zext_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * opera { auto c = static_cast(&producer(operand)->operation()); return create_bitconstant( - rvsdg::node_output::node(operand)->region(), + rvsdg::output::GetNode(*operand)->region(), c->value().zext(ndstbits() - nsrcbits())); } diff --git a/jlm/llvm/ir/operators/sext.cpp b/jlm/llvm/ir/operators/sext.cpp index d2aa342cb..9c31d9c4f 100644 --- a/jlm/llvm/ir/operators/sext.cpp +++ b/jlm/llvm/ir/operators/sext.cpp @@ -17,19 +17,19 @@ static const rvsdg::unop_reduction_path_t sext_reduction_bitbinary = 129; static bool is_bitunary_reducible(const rvsdg::output * operand) { - return rvsdg::is(rvsdg::node_output::node(operand)); + return rvsdg::is(rvsdg::output::GetNode(*operand)); } static bool is_bitbinary_reducible(const rvsdg::output * operand) { - return rvsdg::is(rvsdg::node_output::node(operand)); + return rvsdg::is(rvsdg::output::GetNode(*operand)); } static bool is_inverse_reducible(const sext_op & op, const rvsdg::output * operand) { - auto node = rvsdg::node_output::node(operand); + auto node = rvsdg::output::GetNode(*operand); if (!node) return false; @@ -41,7 +41,7 @@ static rvsdg::output * perform_bitunary_reduction(const sext_op & op, rvsdg::output * operand) { JLM_ASSERT(is_bitunary_reducible(operand)); - auto unary = rvsdg::node_output::node(operand); + auto unary = rvsdg::output::GetNode(*operand); auto region = operand->region(); auto uop = static_cast(&unary->operation()); @@ -53,7 +53,7 @@ static rvsdg::output * perform_bitbinary_reduction(const sext_op & op, rvsdg::output * operand) { JLM_ASSERT(is_bitbinary_reducible(operand)); - auto binary = rvsdg::node_output::node(operand); + auto binary = rvsdg::output::GetNode(*operand); auto region = operand->region(); auto bop = static_cast(&binary->operation()); @@ -71,7 +71,7 @@ static rvsdg::output * perform_inverse_reduction(const sext_op & op, rvsdg::output * operand) { JLM_ASSERT(is_inverse_reducible(op, operand)); - return rvsdg::node_output::node(operand)->input(0)->origin(); + return rvsdg::output::GetNode(*operand)->input(0)->origin(); } sext_op::~sext_op() diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 33749d293..93ca2e768 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -216,7 +216,7 @@ InvariantValueRedirection::RedirectCallOutputs(CallNode & callNode) for (size_t i = 0; i < lambdaExitMerge->ninputs(); i++) { auto lambdaExitMergeInput = lambdaExitMerge->input(i); - auto node = rvsdg::node_output::node(lambdaExitMergeInput->origin()); + auto node = rvsdg::output::GetNode(*lambdaExitMergeInput->origin()); if (node == lambdaEntrySplit) { auto callExitSplitOutput = callExitSplit->output(lambdaExitMergeInput->index()); diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp index 15a5588ba..6c9388149 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp @@ -481,7 +481,7 @@ PointsToGraph::RegisterNode::~RegisterNode() noexcept = default; std::string PointsToGraph::RegisterNode::ToString(const rvsdg::output & output) { - auto node = jlm::rvsdg::node_output::node(&output); + auto node = jlm::rvsdg::output::GetNode(*&output); if (node != nullptr) return util::strfmt(node->operation().debug_string(), ":o", output.index()); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index bd7619125..51dd6589d 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -199,7 +199,7 @@ class RegisterLocation final : public Location [[nodiscard]] std::string DebugString() const noexcept override { - auto node = jlm::rvsdg::node_output::node(Output_); + auto node = jlm::rvsdg::output::GetNode(*Output_); auto index = Output_->index(); if (jlm::rvsdg::is(node)) @@ -241,13 +241,13 @@ class RegisterLocation final : public Location if (is(Output_)) { - auto dbgstr = jlm::rvsdg::node_output::node(Output_)->operation().debug_string(); + auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); } if (is(Output_)) { - auto dbgstr = jlm::rvsdg::node_output::node(Output_)->operation().debug_string(); + auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->operation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); } @@ -269,7 +269,7 @@ class RegisterLocation final : public Location } return jlm::util::strfmt( - jlm::rvsdg::node_output::node(Output_)->operation().debug_string(), + jlm::rvsdg::output::GetNode(*Output_)->operation().debug_string(), ":", index); } diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index ecbbd9562..8edcea23c 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -195,8 +195,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return congruent(output1, output2, vs, ctx); } - auto n1 = jlm::rvsdg::node_output::node(o1); - auto n2 = jlm::rvsdg::node_output::node(o2); + auto n1 = jlm::rvsdg::output::GetNode(*o1); + auto n2 = jlm::rvsdg::output::GetNode(*o2); if (is(n1) && is(n2) && n1 == n2) { auto so1 = static_cast(o1); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index deb49c718..36f0a19b6 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -51,7 +51,7 @@ class ivtstat final : public util::Statistics static rvsdg::GammaNode * is_applicable(const rvsdg::ThetaNode * theta) { - auto matchnode = jlm::rvsdg::node_output::node(theta->predicate()->origin()); + auto matchnode = jlm::rvsdg::output::GetNode(*theta->predicate()->origin()); if (!jlm::rvsdg::is(matchnode)) return nullptr; @@ -79,7 +79,7 @@ pullin(rvsdg::GammaNode * gamma, rvsdg::ThetaNode * theta) pullin_bottom(gamma); for (const auto & lv : *theta) { - if (jlm::rvsdg::node_output::node(lv->result()->origin()) != gamma) + if (jlm::rvsdg::output::GetNode(*lv->result()->origin()) != gamma) { auto ev = gamma->add_entryvar(lv->result()->origin()); JLM_ASSERT(ev->narguments() == 2); diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 515c068ff..a7166450f 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -131,8 +131,8 @@ pullin_top(rvsdg::GammaNode * gamma) auto ev = gamma->begin_entryvar(); while (ev != gamma->end_entryvar()) { - auto node = jlm::rvsdg::node_output::node(ev->origin()); - auto tmp = jlm::rvsdg::node_output::node(gamma->predicate()->origin()); + auto node = jlm::rvsdg::output::GetNode(*ev->origin()); + auto tmp = jlm::rvsdg::output::GetNode(*gamma->predicate()->origin()); if (node && tmp != node && single_successor(node)) { pullin_node(gamma, node); @@ -178,7 +178,7 @@ pullin_bottom(rvsdg::GammaNode * gamma) for (size_t i = 0; i < node->ninputs(); i++) { auto input = node->input(i); - if (jlm::rvsdg::node_output::node(input->origin()) == gamma) + if (jlm::rvsdg::output::GetNode(*input->origin()) == gamma) { auto output = static_cast(input->origin()); operands.push_back(gamma->subregion(r)->result(output->index())->origin()); @@ -251,13 +251,13 @@ pull(rvsdg::GammaNode * gamma) if (gamma->nsubregions() == 2 && empty(gamma)) return; - auto prednode = jlm::rvsdg::node_output::node(gamma->predicate()->origin()); + auto prednode = jlm::rvsdg::output::GetNode(*gamma->predicate()->origin()); /* FIXME: This is inefficient. We can do better. */ auto ev = gamma->begin_entryvar(); while (ev != gamma->end_entryvar()) { - auto node = jlm::rvsdg::node_output::node(ev->origin()); + auto node = jlm::rvsdg::output::GetNode(*ev->origin()); if (!node || prednode == node || !single_successor(node)) { ev++; diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index f9b14f62b..1faf44aab 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -355,7 +355,7 @@ pushout_store(jlm::rvsdg::node * storenode) std::unordered_set users; for (const auto & user : *states[n]) { - if (jlm::rvsdg::input::GetNode(*user) != jlm::rvsdg::node_output::node(nstates[0])) + if (jlm::rvsdg::input::GetNode(*user) != jlm::rvsdg::output::GetNode(*nstates[0])) users.insert(user); } @@ -371,7 +371,7 @@ push_bottom(rvsdg::ThetaNode * theta) { for (const auto & lv : *theta) { - auto storenode = jlm::rvsdg::node_output::node(lv->result()->origin()); + auto storenode = jlm::rvsdg::output::GetNode(*lv->result()->origin()); if (jlm::rvsdg::is(storenode) && is_movable_store(storenode)) { pushout_store(storenode); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 47a3e4bcb..2af878550 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -62,7 +62,7 @@ is_theta_invariant(const jlm::rvsdg::output * output) { JLM_ASSERT(is(output->region()->node())); - if (jlm::rvsdg::is(jlm::rvsdg::node_output::node(output))) + if (jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*output))) return true; auto argument = dynamic_cast(output); @@ -79,7 +79,7 @@ push_from_theta(jlm::rvsdg::output * output) if (argument) return argument; - auto tmp = jlm::rvsdg::node_output::node(output); + auto tmp = jlm::rvsdg::output::GetNode(*output); JLM_ASSERT(jlm::rvsdg::is(tmp)); JLM_ASSERT(is(tmp->region()->node())); auto theta = static_cast(tmp->region()->node()); @@ -104,7 +104,7 @@ is_idv(jlm::rvsdg::input * input) return false; auto tinput = static_cast(a->input()); - return jlm::rvsdg::node_output::node(tinput->result()->origin()) == node; + return jlm::rvsdg::output::GetNode(*tinput->result()->origin()) == node; } std::unique_ptr @@ -135,11 +135,11 @@ unrollinfo::create(rvsdg::ThetaNode * theta) { using namespace jlm::rvsdg; - auto matchnode = jlm::rvsdg::node_output::node(theta->predicate()->origin()); + auto matchnode = jlm::rvsdg::output::GetNode(*theta->predicate()->origin()); if (!is(matchnode)) return nullptr; - auto cmpnode = jlm::rvsdg::node_output::node(matchnode->input(0)->origin()); + auto cmpnode = jlm::rvsdg::output::GetNode(*matchnode->input(0)->origin()); if (!is(cmpnode)) return nullptr; @@ -149,7 +149,7 @@ unrollinfo::create(rvsdg::ThetaNode * theta) if (!end) return nullptr; - auto armnode = jlm::rvsdg::node_output::node((end == o0 ? o1 : o0)); + auto armnode = jlm::rvsdg::output::GetNode(*(end == o0 ? o1 : o0)); if (!is(armnode) && !is(armnode)) return nullptr; if (armnode->ninputs() != 2) @@ -242,7 +242,7 @@ unroll_theta(const unrollinfo & ui, rvsdg::SubstitutionMap & smap, size_t factor to a multiple of the step value. */ auto cmpnode = ui.cmpnode(); - auto cmp = jlm::rvsdg::node_output::node(smap.lookup(cmpnode->output(0))); + auto cmp = jlm::rvsdg::output::GetNode(*smap.lookup(cmpnode->output(0))); auto input = cmp->input(0)->origin() == smap.lookup(ui.end()) ? cmp->input(0) : cmp->input(1); JLM_ASSERT(input->origin() == smap.lookup(ui.end())); @@ -356,7 +356,7 @@ create_unrolled_theta_predicate( using namespace jlm::rvsdg; auto region = smap.lookup(ui.cmpnode()->output(0))->region(); - auto cmpnode = jlm::rvsdg::node_output::node(smap.lookup(ui.cmpnode()->output(0))); + auto cmpnode = jlm::rvsdg::output::GetNode(*smap.lookup(ui.cmpnode()->output(0))); auto step = smap.lookup(ui.step()); auto end = smap.lookup(ui.end()); auto nbits = ui.nbits(); diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index 99aebcde7..7280b3bc3 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -109,43 +109,43 @@ MlirToJlmConverter::ConvertCmpIOp( { if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::eq) { - return rvsdg::node_output::node(rvsdg::biteq_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::biteq_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ne) { - return rvsdg::node_output::node(rvsdg::bitne_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitne_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::sge) { - return rvsdg::node_output::node(rvsdg::bitsge_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitsge_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::sgt) { - return rvsdg::node_output::node(rvsdg::bitsgt_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitsgt_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::sle) { - return rvsdg::node_output::node(rvsdg::bitsle_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitsle_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::slt) { - return rvsdg::node_output::node(rvsdg::bitslt_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitslt_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::uge) { - return rvsdg::node_output::node(rvsdg::bituge_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bituge_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ugt) { - return rvsdg::node_output::node(rvsdg::bitugt_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitugt_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ule) { - return rvsdg::node_output::node(rvsdg::bitule_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitule_op::create(nbits, inputs[0], inputs[1])); } else if (CompOp.getPredicate() == ::mlir::arith::CmpIPredicate::ult) { - return rvsdg::node_output::node(rvsdg::bitult_op::create(nbits, inputs[0], inputs[1])); + return rvsdg::output::GetNode(*rvsdg::bitult_op::create(nbits, inputs[0], inputs[1])); } else { @@ -162,91 +162,91 @@ MlirToJlmConverter::ConvertBitBinaryNode( return nullptr; if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::AddIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitadd_op::create( + return rvsdg::output::GetNode(*rvsdg::bitadd_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::AndIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitand_op::create( + return rvsdg::output::GetNode(*rvsdg::bitand_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::ShRUIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitashr_op::create( + return rvsdg::output::GetNode(*rvsdg::bitashr_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::MulIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitmul_op::create( + return rvsdg::output::GetNode(*rvsdg::bitmul_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::OrIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitor_op::create( + return rvsdg::output::GetNode(*rvsdg::bitor_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::DivSIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitsdiv_op::create( + return rvsdg::output::GetNode(*rvsdg::bitsdiv_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::ShLIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitshl_op::create( + return rvsdg::output::GetNode(*rvsdg::bitshl_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::ShRUIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitshr_op::create( + return rvsdg::output::GetNode(*rvsdg::bitshr_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::RemSIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitsmod_op::create( + return rvsdg::output::GetNode(*rvsdg::bitsmod_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::SubIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitsub_op::create( + return rvsdg::output::GetNode(*rvsdg::bitsub_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::DivUIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitudiv_op::create( + return rvsdg::output::GetNode(*rvsdg::bitudiv_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::RemUIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitumod_op::create( + return rvsdg::output::GetNode(*rvsdg::bitumod_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); } else if (auto castedOp = ::mlir::dyn_cast<::mlir::arith::XOrIOp>(&mlirOperation)) { - return rvsdg::node_output::node(rvsdg::bitxor_op::create( + return rvsdg::output::GetNode(*rvsdg::bitxor_op::create( static_cast(castedOp.getType().cast<::mlir::IntegerType>().getWidth()), inputs[0], inputs[1])); @@ -275,7 +275,7 @@ MlirToJlmConverter::ConvertOperation( if (!st) JLM_ASSERT("frontend : expected bitstring type for ExtUIOp operation."); ::mlir::Type type = castedOp.getType(); - return rvsdg::node_output::node(&llvm::zext_op::Create(*(inputs[0]), ConvertType(type))); + return rvsdg::output::GetNode(*&llvm::zext_op::Create(*(inputs[0]), ConvertType(type))); } else if (::mlir::isa<::mlir::rvsdg::OmegaNode>(&mlirOperation)) @@ -294,8 +294,8 @@ MlirToJlmConverter::ConvertOperation( JLM_ASSERT(type.getTypeID() == ::mlir::IntegerType::getTypeID()); auto integerType = ::mlir::cast<::mlir::IntegerType>(type); - return rvsdg::node_output::node( - rvsdg::create_bitconstant(&rvsdgRegion, integerType.getWidth(), constant.value())); + return rvsdg::output::GetNode( + *rvsdg::create_bitconstant(&rvsdgRegion, integerType.getWidth(), constant.value())); } // Binary Integer Comparision operations @@ -312,7 +312,7 @@ MlirToJlmConverter::ConvertOperation( else if (auto MlirCtrlConst = ::mlir::dyn_cast<::mlir::rvsdg::ConstantCtrl>(&mlirOperation)) { JLM_ASSERT(::mlir::isa<::mlir::rvsdg::RVSDG_CTRLType>(MlirCtrlConst.getType())); - return rvsdg::node_output::node(rvsdg::control_constant( + return rvsdg::output::GetNode(*rvsdg::control_constant( &rvsdgRegion, ::mlir::cast<::mlir::rvsdg::RVSDG_CTRLType>(MlirCtrlConst.getType()).getNumOptions(), MlirCtrlConst.getValue())); @@ -369,7 +369,7 @@ MlirToJlmConverter::ConvertOperation( mapping[matchRuleAttr.getValues().front()] = matchRuleAttr.getIndex(); } - return rvsdg::node_output::node(rvsdg::match_op::Create( + return rvsdg::output::GetNode(*rvsdg::match_op::Create( *(inputs[0]), // predicate mapping, // mapping defaultAlternative, // defaultAlternative diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 12fabf24b..b2b4303a1 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -35,8 +35,8 @@ namespace jlm::rvsdg::output * concat_reduce_arg_pair(jlm::rvsdg::output * arg1, jlm::rvsdg::output * arg2) { - auto node1 = node_output::node(arg1); - auto node2 = node_output::node(arg2); + auto node1 = output::GetNode(*arg1); + auto node2 = output::GetNode(*arg2); if (!node1 || !node2) return nullptr; @@ -112,7 +112,7 @@ class concat_normal_form final : public simple_normal_form { // FIXME: switch to comparing operator, not just typeid, after // converting "concat" to not be a binary operator anymore - return is(node_output::node(arg)); + return is(output::GetNode(*arg)); }); } else @@ -160,7 +160,7 @@ class concat_normal_form final : public simple_normal_form { // FIXME: switch to comparing operator, not just typeid, after // converting "concat" to not be a binary operator anymore - return is(node_output::node(arg)); + return is(output::GetNode(*arg)); }); } else @@ -282,8 +282,8 @@ bitconcat_op::can_reduce_operand_pair( const jlm::rvsdg::output * arg1, const jlm::rvsdg::output * arg2) const noexcept { - auto node1 = node_output::node(arg1); - auto node2 = node_output::node(arg2); + auto node1 = output::GetNode(*arg1); + auto node2 = output::GetNode(*arg2); if (!node1 || !node2) return binop_reduction_none; diff --git a/jlm/rvsdg/bitstring/slice.cpp b/jlm/rvsdg/bitstring/slice.cpp index 823dc243c..32e7f9529 100644 --- a/jlm/rvsdg/bitstring/slice.cpp +++ b/jlm/rvsdg/bitstring/slice.cpp @@ -31,7 +31,7 @@ bitslice_op::debug_string() const unop_reduction_path_t bitslice_op::can_reduce_operand(const jlm::rvsdg::output * arg) const noexcept { - auto node = node_output::node(arg); + auto node = output::GetNode(*arg); auto & arg_type = *dynamic_cast(&arg->type()); if ((low() == 0) && (high() == arg_type.nbits())) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index b33e0b526..7356b2bde 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -16,7 +16,7 @@ namespace jlm::rvsdg static bool is_predicate_reducible(const GammaNode * gamma) { - auto constant = node_output::node(gamma->predicate()->origin()); + auto constant = output::GetNode(*gamma->predicate()->origin()); return constant && is_ctlconstant_op(constant->operation()); } @@ -73,7 +73,7 @@ static std::unordered_set is_control_constant_reducible(GammaNode * gamma) { /* check gamma predicate */ - auto match = node_output::node(gamma->predicate()->origin()); + auto match = output::GetNode(*gamma->predicate()->origin()); if (!is(match)) return {}; @@ -96,7 +96,7 @@ is_control_constant_reducible(GammaNode * gamma) size_t n; for (n = 0; n < it->nresults(); n++) { - auto node = node_output::node(it->result(n)->origin()); + auto node = output::GetNode(*it->result(n)->origin()); if (!is(node)) break; diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 9391d029f..d8ea29621 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -103,7 +103,7 @@ graph::ExtractTailNodes(const graph & rvsdg) auto output = rootRegion.result(n)->origin(); if (IsOnlyExported(*output)) { - nodes.push_back(rvsdg::node_output::node(output)); + nodes.push_back(rvsdg::output::GetNode(*output)); } } diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 2093efee8..e49c70b3d 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -109,7 +109,7 @@ output::remove_user(jlm::rvsdg::input * user) users_.erase(user); - if (auto node = node_output::node(this)) + if (auto node = output::GetNode(*this)) { if (!node->has_users()) region()->bottom_nodes.push_back(node); @@ -121,7 +121,7 @@ output::add_user(jlm::rvsdg::input * user) { JLM_ASSERT(users_.find(user) == users_.end()); - if (auto node = node_output::node(this)) + if (auto node = output::GetNode(*this)) { if (!node->has_users()) region()->bottom_nodes.erase(node); @@ -196,7 +196,7 @@ node::~node() node_input * node::add_input(std::unique_ptr input) { - auto producer = node_output::node(input->origin()); + auto producer = output::GetNode(*input->origin()); if (ninputs() == 0) { @@ -218,7 +218,7 @@ void node::RemoveInput(size_t index) { JLM_ASSERT(index < ninputs()); - auto producer = node_output::node(input(index)->origin()); + auto producer = output::GetNode(*input(index)->origin()); /* remove input */ for (size_t n = index; n < ninputs() - 1; n++) @@ -271,7 +271,7 @@ node::recompute_depth() noexcept size_t new_depth = 0; for (size_t n = 0; n < ninputs(); n++) { - auto producer = node_output::node(input(n)->origin()); + auto producer = output::GetNode(*input(n)->origin()); new_depth = std::max(new_depth, producer ? producer->depth() + 1 : 0); } if (new_depth == depth()) @@ -309,7 +309,7 @@ node::copy(rvsdg::Region * region, const std::vector & ope jlm::rvsdg::node * producer(const jlm::rvsdg::output * output) noexcept { - if (auto node = node_output::node(output)) + if (auto node = output::GetNode(*output)) return node; JLM_ASSERT(dynamic_cast(output)); diff --git a/jlm/rvsdg/reduction-helpers.hpp b/jlm/rvsdg/reduction-helpers.hpp index 56246d727..5cdd31be0 100644 --- a/jlm/rvsdg/reduction-helpers.hpp +++ b/jlm/rvsdg/reduction-helpers.hpp @@ -161,7 +161,7 @@ associative_flatten(std::vector args, const FlattenTester { auto arg = args[n]; JLM_ASSERT(is(arg)); - auto sub_args = jlm::rvsdg::operands(node_output::node(arg)); + auto sub_args = jlm::rvsdg::operands(output::GetNode(*arg)); args[n] = sub_args[0]; args.insert(args.begin() + n + 1, sub_args.begin() + 1, sub_args.end()); } diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 7d7a2e140..4eb261224 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -44,7 +44,7 @@ is_mux_mux_reducible(const std::vector & ops) for (const auto & operand : operands) { - auto node = node_output::node(operand); + auto node = output::GetNode(*operand); if (!node || !is_mux_op(node->operation())) continue; @@ -90,7 +90,7 @@ perform_mux_mux_reduction( std::vector new_operands; for (const auto & operand : old_operands) { - if (jlm::rvsdg::node_output::node(operand) == muxnode && !reduced) + if (jlm::rvsdg::output::GetNode(*operand) == muxnode && !reduced) { reduced = true; auto tmp = operands(muxnode); @@ -98,7 +98,7 @@ perform_mux_mux_reduction( continue; } - if (jlm::rvsdg::node_output::node(operand) != muxnode) + if (jlm::rvsdg::output::GetNode(*operand) != muxnode) new_operands.push_back(operand); } diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 390025904..cea84185e 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -114,7 +114,7 @@ class ThetaNode final : public structural_node inline void set_predicate(jlm::rvsdg::output * p) { - auto node = node_output::node(predicate()->origin()); + auto node = output::GetNode(*predicate()->origin()); predicate()->divert_to(p); if (node && !node->has_users()) diff --git a/jlm/rvsdg/traverser.cpp b/jlm/rvsdg/traverser.cpp index 7438bcc2a..cb8507561 100644 --- a/jlm/rvsdg/traverser.cpp +++ b/jlm/rvsdg/traverser.cpp @@ -52,7 +52,7 @@ topdown_traverser::predecessors_visited(const jlm::rvsdg::node * node) noexcept { for (size_t n = 0; n < node->ninputs(); n++) { - auto predecessor = node_output::node(node->input(n)->origin()); + auto predecessor = output::GetNode(*node->input(n)->origin()); if (!predecessor) continue; @@ -133,7 +133,7 @@ bottomup_traverser::bottomup_traverser(rvsdg::Region * region, bool revisit) for (size_t n = 0; n < region->nresults(); n++) { - auto node = node_output::node(region->result(n)->origin()); + auto node = output::GetNode(*region->result(n)->origin()); if (node && !node->has_successors()) tracker_.set_nodestate(node, traversal_nodestate::frontier); } @@ -156,7 +156,7 @@ bottomup_traverser::next() tracker_.set_nodestate(node, traversal_nodestate::behind); for (size_t n = 0; n < node->ninputs(); n++) { - auto producer = node_output::node(node->input(n)->origin()); + auto producer = output::GetNode(*node->input(n)->origin()); if (producer && tracker_.get_nodestate(producer) == traversal_nodestate::ahead) tracker_.set_nodestate(producer, traversal_nodestate::frontier); } @@ -180,7 +180,7 @@ bottomup_traverser::node_destroy(jlm::rvsdg::node * node) for (size_t n = 0; n < node->ninputs(); n++) { - auto producer = node_output::node(node->input(n)->origin()); + auto producer = output::GetNode(*node->input(n)->origin()); if (producer && tracker_.get_nodestate(producer) == traversal_nodestate::ahead) tracker_.set_nodestate(producer, traversal_nodestate::frontier); } @@ -192,7 +192,7 @@ bottomup_traverser::input_change(input * in, output * old_origin, output * new_o if (in->region() != region() || !is(*in) || !is(old_origin)) return; - auto node = node_output::node(old_origin); + auto node = output::GetNode(*old_origin); traversal_nodestate state = tracker_.get_nodestate(node); /* ignore nodes that have been traversed already, or that are already diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 67b968f1e..db533ac47 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -51,12 +51,12 @@ StoreTest1::SetupRvsdg() this->lambda = fct; - this->size = jlm::rvsdg::node_output::node(csize); + this->size = jlm::rvsdg::output::GetNode(*csize); - this->alloca_a = jlm::rvsdg::node_output::node(a[0]); - this->alloca_b = jlm::rvsdg::node_output::node(b[0]); - this->alloca_c = jlm::rvsdg::node_output::node(c[0]); - this->alloca_d = jlm::rvsdg::node_output::node(d[0]); + this->alloca_a = jlm::rvsdg::output::GetNode(*a[0]); + this->alloca_b = jlm::rvsdg::output::GetNode(*b[0]); + this->alloca_c = jlm::rvsdg::output::GetNode(*c[0]); + this->alloca_d = jlm::rvsdg::output::GetNode(*d[0]); return module; } @@ -108,13 +108,13 @@ StoreTest2::SetupRvsdg() this->lambda = fct; - this->size = jlm::rvsdg::node_output::node(csize); + this->size = jlm::rvsdg::output::GetNode(*csize); - this->alloca_a = jlm::rvsdg::node_output::node(a[0]); - this->alloca_b = jlm::rvsdg::node_output::node(b[0]); - this->alloca_x = jlm::rvsdg::node_output::node(x[0]); - this->alloca_y = jlm::rvsdg::node_output::node(y[0]); - this->alloca_p = jlm::rvsdg::node_output::node(p[0]); + this->alloca_a = jlm::rvsdg::output::GetNode(*a[0]); + this->alloca_b = jlm::rvsdg::output::GetNode(*b[0]); + this->alloca_x = jlm::rvsdg::output::GetNode(*x[0]); + this->alloca_y = jlm::rvsdg::output::GetNode(*y[0]); + this->alloca_p = jlm::rvsdg::output::GetNode(*p[0]); return module; } @@ -150,8 +150,8 @@ LoadTest1::SetupRvsdg() this->lambda = fct; - this->load_p = jlm::rvsdg::node_output::node(ld1[0]); - this->load_x = jlm::rvsdg::node_output::node(ld2[0]); + this->load_p = jlm::rvsdg::output::GetNode(*ld1[0]); + this->load_x = jlm::rvsdg::output::GetNode(*ld2[0]); return module; } @@ -207,16 +207,16 @@ LoadTest2::SetupRvsdg() this->lambda = fct; - this->size = jlm::rvsdg::node_output::node(csize); + this->size = jlm::rvsdg::output::GetNode(*csize); - this->alloca_a = jlm::rvsdg::node_output::node(a[0]); - this->alloca_b = jlm::rvsdg::node_output::node(b[0]); - this->alloca_x = jlm::rvsdg::node_output::node(x[0]); - this->alloca_y = jlm::rvsdg::node_output::node(y[0]); - this->alloca_p = jlm::rvsdg::node_output::node(p[0]); + this->alloca_a = jlm::rvsdg::output::GetNode(*a[0]); + this->alloca_b = jlm::rvsdg::output::GetNode(*b[0]); + this->alloca_x = jlm::rvsdg::output::GetNode(*x[0]); + this->alloca_y = jlm::rvsdg::output::GetNode(*y[0]); + this->alloca_p = jlm::rvsdg::output::GetNode(*p[0]); - this->load_x = jlm::rvsdg::node_output::node(ld1[0]); - this->load_a = jlm::rvsdg::node_output::node(ld2[0]); + this->load_x = jlm::rvsdg::output::GetNode(*ld1[0]); + this->load_a = jlm::rvsdg::output::GetNode(*ld2[0]); return module; } @@ -253,7 +253,7 @@ LoadFromUndefTest::SetupRvsdg() /* * Extract nodes */ - UndefValueNode_ = jlm::rvsdg::node_output::node(undefValue); + UndefValueNode_ = jlm::rvsdg::output::GetNode(*undefValue); return rvsdgModule; } @@ -307,8 +307,8 @@ GetElementPtrTest::SetupRvsdg() */ this->lambda = fct; - this->getElementPtrX = jlm::rvsdg::node_output::node(gepx); - this->getElementPtrY = jlm::rvsdg::node_output::node(gepy); + this->getElementPtrX = jlm::rvsdg::output::GetNode(*gepx); + this->getElementPtrY = jlm::rvsdg::output::GetNode(*gepy); return module; } @@ -339,7 +339,7 @@ BitCastTest::SetupRvsdg() * Assign nodes */ this->lambda = fct; - this->bitCast = jlm::rvsdg::node_output::node(cast); + this->bitCast = jlm::rvsdg::output::GetNode(*cast); return module; } @@ -374,7 +374,7 @@ Bits2PtrTest::SetupRvsdg() lambda->finalize({ cast, iOStateArgument, memoryStateArgument }); - return std::make_tuple(lambda, jlm::rvsdg::node_output::node(cast)); + return std::make_tuple(lambda, jlm::rvsdg::output::GetNode(*cast)); }; auto setupTestFunction = [&](lambda::output * b2p) @@ -453,7 +453,7 @@ ConstantPointerNullTest::SetupRvsdg() * Assign nodes */ this->lambda = fct; - this->constantPointerNullNode = jlm::rvsdg::node_output::node(constantPointerNullResult); + this->constantPointerNullNode = jlm::rvsdg::output::GetNode(*constantPointerNullResult); return module; } @@ -586,9 +586,9 @@ CallTest1::SetupRvsdg() lambda->finalize({ sum, callG.GetIoStateOutput(), callG.GetMemoryStateOutput() }); GraphExport::Create(*lambda->output(), "h"); - auto allocaX = jlm::rvsdg::node_output::node(x[0]); - auto allocaY = jlm::rvsdg::node_output::node(y[0]); - auto allocaZ = jlm::rvsdg::node_output::node(z[0]); + auto allocaX = jlm::rvsdg::output::GetNode(*x[0]); + auto allocaY = jlm::rvsdg::output::GetNode(*y[0]); + auto allocaZ = jlm::rvsdg::output::GetNode(*z[0]); return std::make_tuple(lambda, allocaX, allocaY, allocaZ, &callF, &callG); }; @@ -650,7 +650,7 @@ CallTest2::SetupRvsdg() lambda->finalize({ cast, iOStateArgument, mx }); - auto mallocNode = jlm::rvsdg::node_output::node(alloc[0]); + auto mallocNode = jlm::rvsdg::output::GetNode(*alloc[0]); return std::make_tuple(lambda, mallocNode); }; @@ -674,7 +674,7 @@ CallTest2::SetupRvsdg() lambda->finalize({ freeResults[1], freeResults[0] }); - auto freeNode = jlm::rvsdg::node_output::node(freeResults[0]); + auto freeNode = jlm::rvsdg::output::GetNode(*freeResults[0]); return std::make_tuple(lambda, freeNode); }; @@ -1029,10 +1029,9 @@ IndirectCallTest2::SetupRvsdg() lambdaOutput, &callX, &callY, + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pxAlloca[0])), jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pxAlloca[0])), - jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pyAlloca[0]))); + jlm::rvsdg::output::GetNode(*pyAlloca[0]))); }; auto SetupTest2Function = [&](lambda::output & functionX) @@ -1065,7 +1064,7 @@ IndirectCallTest2::SetupRvsdg() lambdaOutput, &callX, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pzAlloca[0]))); + jlm::rvsdg::output::GetNode(*pzAlloca[0]))); }; auto deltaG1 = SetupG1(); @@ -1455,7 +1454,7 @@ GammaTest2::SetupRvsdg() return std::make_tuple( lambda->output(), gammaOutputA->node(), - rvsdg::node_output::node(allocaZResults[0])); + rvsdg::output::GetNode(*allocaZResults[0])); }; auto SetupLambdaGH = [&](lambda::output & lambdaF, @@ -1508,8 +1507,8 @@ GammaTest2::SetupRvsdg() return std::make_tuple( lambda->output(), &call, - rvsdg::node_output::node(allocaXResults[0]), - rvsdg::node_output::node(allocaYResults[1])); + rvsdg::output::GetNode(*allocaXResults[0]), + rvsdg::output::GetNode(*allocaYResults[1])); }; auto [lambdaF, gammaNode, allocaZ] = SetupLambdaF(); @@ -1591,7 +1590,7 @@ ThetaTest::SetupRvsdg() */ this->lambda = fct; this->theta = thetanode; - this->gep = jlm::rvsdg::node_output::node(gepnode); + this->gep = jlm::rvsdg::output::GetNode(*gepnode); return module; } @@ -1667,7 +1666,7 @@ DeltaTest1::SetupRvsdg() auto lambdaOutput = lambda->finalize(callG.Results()); GraphExport::Create(*lambda->output(), "h"); - return std::make_tuple(lambdaOutput, &callG, jlm::rvsdg::node_output::node(five)); + return std::make_tuple(lambdaOutput, &callG, jlm::rvsdg::output::GetNode(*five)); }; auto f = SetupGlobalF(); @@ -2129,7 +2128,7 @@ PhiTest1::SetupRvsdg() auto lambdaOutput = lambda->finalize(call.Results()); GraphExport::Create(*lambdaOutput, "test"); - return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(allocaResults[0])); + return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::output::GetNode(*allocaResults[0])); }; auto [phiNode, fibfct, gammaNode, callFib1, callFib2] = SetupFib(); @@ -2256,7 +2255,7 @@ PhiTest2::SetupRvsdg() &callB, &callD, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(paAlloca[0]))); + jlm::rvsdg::output::GetNode(*paAlloca[0]))); }; auto SetupB = [&](jlm::rvsdg::Region & region, @@ -2301,7 +2300,7 @@ PhiTest2::SetupRvsdg() &callI, &callC, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pbAlloca[0]))); + jlm::rvsdg::output::GetNode(*pbAlloca[0]))); }; auto SetupC = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) @@ -2340,7 +2339,7 @@ PhiTest2::SetupRvsdg() lambdaOutput, &callA, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pcAlloca[0]))); + jlm::rvsdg::output::GetNode(*pcAlloca[0]))); }; auto SetupD = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) @@ -2370,7 +2369,7 @@ PhiTest2::SetupRvsdg() lambdaOutput, &callA, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pdAlloca[0]))); + jlm::rvsdg::output::GetNode(*pdAlloca[0]))); }; auto SetupPhi = [&](lambda::output & lambdaEight, lambda::output & lambdaI) @@ -2452,7 +2451,7 @@ PhiTest2::SetupRvsdg() lambdaOutput, &callA, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(pTestAlloca[0]))); + jlm::rvsdg::output::GetNode(*pTestAlloca[0]))); }; auto lambdaEight = SetupEight(); @@ -2482,13 +2481,13 @@ PhiTest2::SetupRvsdg() this->LambdaEight_ = lambdaEight->node(); this->LambdaI_ = lambdaI->node(); this->LambdaA_ = jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(lambdaA->result()->origin())); + jlm::rvsdg::output::GetNode(*lambdaA->result()->origin())); this->LambdaB_ = jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(lambdaB->result()->origin())); + jlm::rvsdg::output::GetNode(*lambdaB->result()->origin())); this->LambdaC_ = jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(lambdaC->result()->origin())); + jlm::rvsdg::output::GetNode(*lambdaC->result()->origin())); this->LambdaD_ = jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(lambdaD->result()->origin())); + jlm::rvsdg::output::GetNode(*lambdaD->result()->origin())); this->LambdaTest_ = lambdaTest->node(); this->CallAFromTest_ = callAFromTest; @@ -2695,7 +2694,7 @@ EscapedMemoryTest1::SetupRvsdg() return std::make_tuple( lambdaOutput, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(loadResults1[0]))); + jlm::rvsdg::output::GetNode(*loadResults1[0]))); }; auto deltaA = SetupDeltaA(); @@ -2787,7 +2786,7 @@ EscapedMemoryTest2::SetupRvsdg() GraphExport::Create(*lambdaOutput, "ReturnAddress"); - return std::make_tuple(lambdaOutput, jlm::rvsdg::node_output::node(mallocResults[0])); + return std::make_tuple(lambdaOutput, jlm::rvsdg::output::GetNode(*mallocResults[0])); }; auto SetupCallExternalFunction1 = [&](jlm::rvsdg::RegionArgument * externalFunction1Argument) @@ -2823,7 +2822,7 @@ EscapedMemoryTest2::SetupRvsdg() GraphExport::Create(*lambdaOutput, "CallExternalFunction1"); - return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(mallocResults[0])); + return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::output::GetNode(*mallocResults[0])); }; auto SetupCallExternalFunction2 = [&](jlm::rvsdg::RegionArgument * externalFunction2Argument) @@ -2864,7 +2863,7 @@ EscapedMemoryTest2::SetupRvsdg() lambdaOutput, &call, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(loadResults[0]))); + jlm::rvsdg::output::GetNode(*loadResults[0]))); }; auto externalFunction1 = SetupExternalFunction1Declaration(); @@ -2977,7 +2976,7 @@ EscapedMemoryTest3::SetupRvsdg() lambdaOutput, &call, jlm::util::AssertedCast( - jlm::rvsdg::node_output::node(loadResults[0]))); + jlm::rvsdg::output::GetNode(*loadResults[0]))); }; auto importExternalFunction = SetupExternalFunctionDeclaration(); @@ -3124,7 +3123,7 @@ MemcpyTest::SetupRvsdg() GraphExport::Create(*lambdaOutput, "g"); - return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::node_output::node(memcpyResults[0])); + return std::make_tuple(lambdaOutput, &call, jlm::rvsdg::output::GetNode(*memcpyResults[0])); }; auto localArray = SetupLocalArray(); @@ -3194,7 +3193,7 @@ MemcpyTest2::SetupRvsdg() auto lambdaOutput = lambda->finalize({ iOStateArgument, memcpyResults[0] }); - return std::make_tuple(lambdaOutput, jlm::rvsdg::node_output::node(memcpyResults[0])); + return std::make_tuple(lambdaOutput, jlm::rvsdg::output::GetNode(*memcpyResults[0])); }; auto SetupFunctionF = [&](lambda::output & functionF) @@ -3298,8 +3297,8 @@ MemcpyTest3::SetupRvsdg() GraphExport::Create(*lambdaOutput, "f"); - Alloca_ = rvsdg::node_output::node(allocaResults[0]); - Memcpy_ = rvsdg::node_output::node(memcpyResults[0]); + Alloca_ = rvsdg::output::GetNode(*allocaResults[0]); + Memcpy_ = rvsdg::output::GetNode(*memcpyResults[0]); return rvsdgModule; } @@ -3374,7 +3373,7 @@ LinkedListTest::SetupRvsdg() auto lambdaOutput = lambda->finalize({ load4[0], iOStateArgument, load4[1] }); GraphExport::Create(*lambdaOutput, "next"); - return std::make_tuple(jlm::rvsdg::node_output::node(alloca[0]), lambdaOutput); + return std::make_tuple(jlm::rvsdg::output::GetNode(*alloca[0]), lambdaOutput); }; auto deltaMyList = SetupDeltaMyList(); @@ -3433,7 +3432,7 @@ AllMemoryNodesTest::SetupRvsdg() // Create alloca node auto allocaSize = jlm::rvsdg::create_bitconstant(Lambda_->subregion(), 32, 1); auto allocaOutputs = alloca_op::create(pointerType, allocaSize, 8); - Alloca_ = jlm::rvsdg::node_output::node(allocaOutputs[0]); + Alloca_ = jlm::rvsdg::output::GetNode(*allocaOutputs[0]); auto afterAllocaMemoryState = MemoryStateMergeOperation::Create( std::vector{ entryMemoryState, allocaOutputs[1] }); @@ -3441,7 +3440,7 @@ AllMemoryNodesTest::SetupRvsdg() // Create malloc node auto mallocSize = jlm::rvsdg::create_bitconstant(Lambda_->subregion(), 32, 4); auto mallocOutputs = malloc_op::create(mallocSize); - Malloc_ = jlm::rvsdg::node_output::node(mallocOutputs[0]); + Malloc_ = jlm::rvsdg::output::GetNode(*mallocOutputs[0]); auto afterMallocMemoryState = MemoryStateMergeOperation::Create( std::vector{ afterAllocaMemoryState, mallocOutputs[1] }); @@ -3511,7 +3510,7 @@ NAllocaNodesTest::SetupRvsdg() for (size_t i = 0; i < NumAllocaNodes_; i++) { auto allocaOutputs = alloca_op::create(jlm::rvsdg::bittype::Create(32), allocaSize, 4); - auto allocaNode = jlm::rvsdg::node_output::node(allocaOutputs[0]); + auto allocaNode = jlm::rvsdg::output::GetNode(*allocaOutputs[0]); AllocaNodes_.push_back(allocaNode); @@ -3566,7 +3565,7 @@ EscapingLocalFunctionTest::SetupRvsdg() const auto allocaSize = rvsdg::create_bitconstant(LocalFunc_->subregion(), 32, 1); const auto allocaOutputs = alloca_op::create(uint32Type, allocaSize, 4); - LocalFuncParamAllocaNode_ = rvsdg::node_output::node(allocaOutputs[0]); + LocalFuncParamAllocaNode_ = rvsdg::output::GetNode(*allocaOutputs[0]); // Merge function's input Memory State and alloca node's memory state rvsdg::output * mergedMemoryState = MemoryStateMergeOperation::Create( @@ -3784,7 +3783,7 @@ VariadicFunctionTest1::SetupRvsdg() auto allocaResults = alloca_op::create(jlm::rvsdg::bittype::Create(32), one, 4); auto merge = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); - AllocaNode_ = rvsdg::node_output::node(allocaResults[0]); + AllocaNode_ = rvsdg::output::GetNode(*allocaResults[0]); auto storeResults = StoreNonVolatileNode::Create(allocaResults[0], five, { merge }, 4); @@ -3872,7 +3871,7 @@ VariadicFunctionTest2::SetupRvsdg() auto allocaResults = alloca_op::create(arrayType, one, 16); auto memoryState = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); - AllocaNode_ = rvsdg::node_output::node(allocaResults[0]); + AllocaNode_ = rvsdg::output::GetNode(*allocaResults[0]); auto & callLLvmLifetimeStart = CallNode::CreateNode( llvmLifetimeStartArgument, diff --git a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp index 5d5255afa..f0bd91788 100644 --- a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp @@ -97,7 +97,7 @@ LoadVolatileConversion() // Assert auto lambdaOutput = rvsdgModule->Rvsdg().root()->result(0)->origin(); - auto lambda = dynamic_cast(jlm::rvsdg::node_output::node(lambdaOutput)); + auto lambda = dynamic_cast(jlm::rvsdg::output::GetNode(*lambdaOutput)); auto loadVolatileNode = lambda->subregion()->nodes.first(); assert(dynamic_cast(loadVolatileNode)); @@ -126,7 +126,7 @@ StoreVolatileConversion() // Assert auto lambdaOutput = rvsdgModule->Rvsdg().root()->result(0)->origin(); - auto lambda = dynamic_cast(jlm::rvsdg::node_output::node(lambdaOutput)); + auto lambda = dynamic_cast(jlm::rvsdg::output::GetNode(*lambdaOutput)); auto storeVolatileNode = lambda->subregion()->nodes.first(); assert(dynamic_cast(storeVolatileNode)); diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index d484d216e..f59f8a33f 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -66,7 +66,7 @@ TestCopy() auto loadResults = LoadNonVolatileNode::Create(address1, { memoryState1 }, valueType, 4); // Act - auto node = jlm::rvsdg::node_output::node(loadResults[0]); + auto node = jlm::rvsdg::output::GetNode(*loadResults[0]); auto loadNode = jlm::util::AssertedCast(node); auto copiedNode = loadNode->copy(graph.root(), { address2, memoryState2 }); @@ -111,7 +111,7 @@ TestLoadAllocaReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex.origin()); + auto node = jlm::rvsdg::output::GetNode(*ex.origin()); assert(is(node)); assert(node->ninputs() == 3); assert(node->input(1)->origin() == alloca1[1]); @@ -150,7 +150,7 @@ TestMultipleOriginReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex.origin()); + auto node = jlm::rvsdg::output::GetNode(*ex.origin()); assert(is(node)); assert(node->ninputs() == 2); } @@ -192,11 +192,11 @@ TestLoadStoreStateReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex1.origin()); + auto node = jlm::rvsdg::output::GetNode(*ex1.origin()); assert(is(node)); assert(node->ninputs() == 2); - node = jlm::rvsdg::node_output::node(ex2.origin()); + node = jlm::rvsdg::output::GetNode(*ex2.origin()); assert(is(node)); assert(node->ninputs() == 2); } @@ -286,15 +286,15 @@ TestLoadLoadReduction() // Assert assert(graph.root()->nnodes() == 6); - auto ld = jlm::rvsdg::node_output::node(x1.origin()); + auto ld = jlm::rvsdg::output::GetNode(*x1.origin()); assert(is(ld)); - auto mx1 = jlm::rvsdg::node_output::node(x2.origin()); + auto mx1 = jlm::rvsdg::output::GetNode(*x2.origin()); assert(is(mx1) && mx1->ninputs() == 2); assert(mx1->input(0)->origin() == ld1[1] || mx1->input(0)->origin() == ld->output(2)); assert(mx1->input(1)->origin() == ld1[1] || mx1->input(1)->origin() == ld->output(2)); - auto mx2 = jlm::rvsdg::node_output::node(x3.origin()); + auto mx2 = jlm::rvsdg::output::GetNode(*x3.origin()); assert(is(mx2) && mx2->ninputs() == 2); assert(mx2->input(0)->origin() == ld2[1] || mx2->input(0)->origin() == ld->output(3)); assert(mx2->input(1)->origin() == ld2[1] || mx2->input(1)->origin() == ld->output(3)); diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index 7af40fa2d..f683d6c5f 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -194,7 +194,7 @@ TestCopy() auto storeResults = StoreNonVolatileNode::Create(address1, value1, { memoryState1 }, 4); // Act - auto node = jlm::rvsdg::node_output::node(storeResults[0]); + auto node = jlm::rvsdg::output::GetNode(*storeResults[0]); auto storeNode = jlm::util::AssertedCast(node); auto copiedNode = storeNode->copy(graph.root(), { address2, value2, memoryState2 }); @@ -242,12 +242,12 @@ TestStoreMuxReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto muxnode = jlm::rvsdg::node_output::node(ex.origin()); + auto muxnode = jlm::rvsdg::output::GetNode(*ex.origin()); assert(is(muxnode)); assert(muxnode->ninputs() == 3); - auto n0 = jlm::rvsdg::node_output::node(muxnode->input(0)->origin()); - auto n1 = jlm::rvsdg::node_output::node(muxnode->input(1)->origin()); - auto n2 = jlm::rvsdg::node_output::node(muxnode->input(2)->origin()); + auto n0 = jlm::rvsdg::output::GetNode(*muxnode->input(0)->origin()); + auto n1 = jlm::rvsdg::output::GetNode(*muxnode->input(1)->origin()); + auto n2 = jlm::rvsdg::output::GetNode(*muxnode->input(2)->origin()); assert(jlm::rvsdg::is(n0->operation())); assert(jlm::rvsdg::is(n1->operation())); assert(jlm::rvsdg::is(n2->operation())); @@ -288,7 +288,7 @@ TestMultipleOriginReduction() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::node_output::node(ex.origin()); + auto node = jlm::rvsdg::output::GetNode(*ex.origin()); assert(jlm::rvsdg::is(node->operation()) && node->ninputs() == 3); } @@ -374,7 +374,7 @@ TestStoreStoreReduction() // Assert assert(graph.root()->nnodes() == 1); - assert(jlm::rvsdg::node_output::node(ex.origin())->input(1)->origin() == v2); + assert(jlm::rvsdg::output::GetNode(*ex.origin())->input(1)->origin() == v2); } static int diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 67a6573c3..7ba64dad9 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -40,7 +40,7 @@ TestCopy() auto callResults = CallNode::Create(function1, functionType, { value1, iOState1, memoryState1 }); // Act - auto node = jlm::rvsdg::node_output::node(callResults[0]); + auto node = jlm::rvsdg::output::GetNode(*callResults[0]); auto callNode = jlm::util::AssertedCast(node); auto copiedNode = callNode->copy(rvsdg.root(), { function2, value2, iOState2, memoryState2 }); @@ -71,7 +71,7 @@ TestCallNodeAccessors() // Act auto results = CallNode::Create(f, functionType, { v, i, m }); - auto & callNode = *jlm::util::AssertedCast(jlm::rvsdg::node_output::node(results[0])); + auto & callNode = *jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*results[0])); // Assert assert(callNode.NumArguments() == 3); @@ -137,7 +137,7 @@ TestCallTypeClassifierIndirectCall() GraphExport::Create(*lambda->output(), "f"); return std::make_tuple( - jlm::util::AssertedCast(jlm::rvsdg::node_output::node(callResults[0])), + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*callResults[0])), load[0]); }; @@ -229,7 +229,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() return std::make_tuple( lambda, - jlm::util::AssertedCast(jlm::rvsdg::node_output::node(callResults[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*callResults[0]))); }; auto g = SetupFunctionG(); @@ -317,7 +317,7 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() thetaOutputValue, thetaOutputIoState, thetaOutputMemoryState, - jlm::util::AssertedCast(jlm::rvsdg::node_output::node(callResults[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*callResults[0]))); }; auto vt = jlm::tests::valuetype::Create(); @@ -468,8 +468,8 @@ TestCallTypeClassifierRecursiveDirectCall() return std::make_tuple( lambdaOutput, - jlm::util::AssertedCast(jlm::rvsdg::node_output::node(callfibm1Results[0])), - jlm::util::AssertedCast(jlm::rvsdg::node_output::node(callfibm2Results[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*callfibm1Results[0])), + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*callfibm2Results[0]))); }; auto [fibfct, callFib1, callFib2] = SetupFib(); diff --git a/tests/jlm/llvm/ir/operators/test-sext.cpp b/tests/jlm/llvm/ir/operators/test-sext.cpp index 195047452..e4a16bf87 100644 --- a/tests/jlm/llvm/ir/operators/test-sext.cpp +++ b/tests/jlm/llvm/ir/operators/test-sext.cpp @@ -37,7 +37,7 @@ test_bitunary_reduction() // jlm::rvsdg::view(graph, stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex.origin()))); } static inline void @@ -65,7 +65,7 @@ test_bitbinary_reduction() // jlm::rvsdg::view(graph, stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex.origin()))); } static inline void diff --git a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp index 50dd5cd64..3a33b98b9 100644 --- a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp @@ -50,19 +50,19 @@ TestSuccess() // jlm::rvsdg::view(graph.root(), stdout); // Assert - auto load = jlm::rvsdg::node_output::node(ex1.origin()); + auto load = jlm::rvsdg::output::GetNode(*ex1.origin()); assert(is(load)); assert(load->ninputs() == 4); assert(load->input(1)->origin() == s1); assert(load->input(2)->origin() == s2); assert(load->input(3)->origin() == s3); - auto merge = jlm::rvsdg::node_output::node(ex2.origin()); + auto merge = jlm::rvsdg::output::GetNode(*ex2.origin()); assert(is(merge)); assert(merge->ninputs() == 3); for (size_t n = 0; n < merge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(merge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*merge->input(n)->origin()); assert(node == load); } } @@ -144,7 +144,7 @@ TestLoadWithoutStates() jlm::rvsdg::view(graph.root(), stdout); // Assert - auto load = jlm::rvsdg::node_output::node(ex.origin()); + auto load = jlm::rvsdg::output::GetNode(*ex.origin()); assert(is(load)); assert(load->ninputs() == 1); } diff --git a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp index 6c26558c4..85454a147 100644 --- a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp @@ -52,10 +52,10 @@ TestLoadStoreReductionWithDifferentValueOperandType() jlm::rvsdg::view(graph.root(), stdout); // Assert - auto load = jlm::rvsdg::node_output::node(exportedValue.origin()); + auto load = jlm::rvsdg::output::GetNode(*exportedValue.origin()); assert(is(load)); assert(load->ninputs() == 2); - auto store = jlm::rvsdg::node_output::node(load->input(1)->origin()); + auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); assert(is(store)); return 0; diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 0a3559114..9324b6ca0 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -79,7 +79,7 @@ ValidateStoreTest1SteensgaardAgnostic(const jlm::tests::StoreTest1 & test) assert(test.lambda->subregion()->nnodes() == 10); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 6, 1)); assert(test.alloca_d->output(1)->nusers() == 1); @@ -112,7 +112,7 @@ ValidateStoreTest1SteensgaardRegionAware(const jlm::tests::StoreTest1 & test) assert(test.lambda->subregion()->nnodes() == 9); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 4, 1)); assert(test.alloca_d->output(1)->nusers() == 1); @@ -145,12 +145,12 @@ ValidateStoreTest1SteensgaardAgnosticTopDown(const jlm::tests::StoreTest1 & test assert(test.lambda->subregion()->nnodes() == 2); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); - assert(lambdaEntrySplit == jlm::rvsdg::node_output::node(lambdaExitMerge->input(1)->origin())); + assert(lambdaEntrySplit == jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(1)->origin())); } static void @@ -160,7 +160,7 @@ ValidateStoreTest2SteensgaardAgnostic(const jlm::tests::StoreTest2 & test) assert(test.lambda->subregion()->nnodes() == 12); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 7, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -179,8 +179,8 @@ ValidateStoreTest2SteensgaardAgnostic(const jlm::tests::StoreTest2 & test) auto storeB = jlm::rvsdg::input::GetNode(**test.alloca_b->output(0)->begin()); assert(is(*storeB, 4, 2)); assert(storeB->input(0)->origin() == test.alloca_y->output(0)); - assert(jlm::rvsdg::node_output::node(storeB->input(2)->origin()) == storeA); - assert(jlm::rvsdg::node_output::node(storeB->input(3)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(2)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(3)->origin()) == storeA); auto storeX = jlm::rvsdg::input::GetNode(**test.alloca_p->output(1)->begin()); assert(is(*storeX, 3, 1)); @@ -200,7 +200,7 @@ ValidateStoreTest2SteensgaardRegionAware(const jlm::tests::StoreTest2 & test) assert(test.lambda->subregion()->nnodes() == 11); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 5, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -219,8 +219,8 @@ ValidateStoreTest2SteensgaardRegionAware(const jlm::tests::StoreTest2 & test) auto storeB = jlm::rvsdg::input::GetNode(**test.alloca_b->output(0)->begin()); assert(is(*storeB, 4, 2)); assert(storeB->input(0)->origin() == test.alloca_y->output(0)); - assert(jlm::rvsdg::node_output::node(storeB->input(2)->origin()) == storeA); - assert(jlm::rvsdg::node_output::node(storeB->input(3)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(2)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(3)->origin()) == storeA); auto storeX = jlm::rvsdg::input::GetNode(**test.alloca_p->output(1)->begin()); assert(is(*storeX, 3, 1)); @@ -240,12 +240,12 @@ ValidateStoreTest2SteensgaardAgnosticTopDown(const jlm::tests::StoreTest2 & test assert(test.lambda->subregion()->nnodes() == 2); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); - assert(lambdaEntrySplit == jlm::rvsdg::node_output::node(lambdaExitMerge->input(1)->origin())); + assert(lambdaEntrySplit == jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(1)->origin())); } static void @@ -255,21 +255,21 @@ ValidateLoadTest1SteensgaardAgnostic(const jlm::tests::LoadTest1 & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda->fctargument(1)->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto loadA = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); - auto loadX = jlm::rvsdg::node_output::node(loadA->input(0)->origin()); + auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto loadX = jlm::rvsdg::output::GetNode(*loadA->input(0)->origin()); assert(is(*loadA, 3, 3)); - assert(jlm::rvsdg::node_output::node(loadA->input(1)->origin()) == loadX); + assert(jlm::rvsdg::output::GetNode(*loadA->input(1)->origin()) == loadX); assert(is(*loadX, 3, 3)); assert(loadX->input(0)->origin() == test.lambda->fctargument(0)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); } static void @@ -279,21 +279,21 @@ ValidateLoadTest1SteensgaardRegionAware(const jlm::tests::LoadTest1 & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda->fctargument(1)->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto loadA = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); - auto loadX = jlm::rvsdg::node_output::node(loadA->input(0)->origin()); + auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto loadX = jlm::rvsdg::output::GetNode(*loadA->input(0)->origin()); assert(is(*loadA, 3, 3)); - assert(jlm::rvsdg::node_output::node(loadA->input(1)->origin()) == loadX); + assert(jlm::rvsdg::output::GetNode(*loadA->input(1)->origin()) == loadX); assert(is(*loadX, 3, 3)); assert(loadX->input(0)->origin() == test.lambda->fctargument(0)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); } static void @@ -303,21 +303,21 @@ ValidateLoadTest1SteensgaardAgnosticTopDown(const jlm::tests::LoadTest1 & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda->fctargument(1)->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto loadA = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); - auto loadX = jlm::rvsdg::node_output::node(loadA->input(0)->origin()); + auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto loadX = jlm::rvsdg::output::GetNode(*loadA->input(0)->origin()); assert(is(*loadA, 3, 3)); - assert(jlm::rvsdg::node_output::node(loadA->input(1)->origin()) == loadX); + assert(jlm::rvsdg::output::GetNode(*loadA->input(1)->origin()) == loadX); assert(is(*loadX, 3, 3)); assert(loadX->input(0)->origin() == test.lambda->fctargument(0)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); } static void @@ -327,7 +327,7 @@ ValidateLoadTest2SteensgaardAgnostic(const jlm::tests::LoadTest2 & test) assert(test.lambda->subregion()->nnodes() == 14); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 7, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -346,8 +346,8 @@ ValidateLoadTest2SteensgaardAgnostic(const jlm::tests::LoadTest2 & test) auto storeB = jlm::rvsdg::input::GetNode(**test.alloca_b->output(0)->begin()); assert(is(*storeB, 4, 2)); assert(storeB->input(0)->origin() == test.alloca_y->output(0)); - assert(jlm::rvsdg::node_output::node(storeB->input(2)->origin()) == storeA); - assert(jlm::rvsdg::node_output::node(storeB->input(3)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(2)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(3)->origin()) == storeA); auto storeX = jlm::rvsdg::input::GetNode(**test.alloca_p->output(1)->begin()); assert(is(*storeX, 3, 1)); @@ -360,14 +360,14 @@ ValidateLoadTest2SteensgaardAgnostic(const jlm::tests::LoadTest2 & test) auto loadXY = jlm::rvsdg::input::GetNode(**loadP->output(0)->begin()); assert(is(*loadXY, 3, 3)); - assert(jlm::rvsdg::node_output::node(loadXY->input(1)->origin()) == storeB); - assert(jlm::rvsdg::node_output::node(loadXY->input(2)->origin()) == storeB); + assert(jlm::rvsdg::output::GetNode(*loadXY->input(1)->origin()) == storeB); + assert(jlm::rvsdg::output::GetNode(*loadXY->input(2)->origin()) == storeB); auto storeY = jlm::rvsdg::input::GetNode(**loadXY->output(0)->begin()); assert(is(*storeY, 4, 2)); assert(storeY->input(0)->origin() == test.alloca_y->output(0)); - assert(jlm::rvsdg::node_output::node(storeY->input(2)->origin()) == loadXY); - assert(jlm::rvsdg::node_output::node(storeY->input(3)->origin()) == loadXY); + assert(jlm::rvsdg::output::GetNode(*storeY->input(2)->origin()) == loadXY); + assert(jlm::rvsdg::output::GetNode(*storeY->input(3)->origin()) == loadXY); } static void @@ -377,7 +377,7 @@ ValidateLoadTest2SteensgaardRegionAware(const jlm::tests::LoadTest2 & test) assert(test.lambda->subregion()->nnodes() == 13); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 5, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -393,8 +393,8 @@ ValidateLoadTest2SteensgaardRegionAware(const jlm::tests::LoadTest2 & test) auto storeB = jlm::rvsdg::input::GetNode(**test.alloca_b->output(0)->begin()); assert(is(*storeB, 4, 2)); assert(storeB->input(0)->origin() == test.alloca_y->output(0)); - assert(jlm::rvsdg::node_output::node(storeB->input(2)->origin()) == storeA); - assert(jlm::rvsdg::node_output::node(storeB->input(3)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(2)->origin()) == storeA); + assert(jlm::rvsdg::output::GetNode(*storeB->input(3)->origin()) == storeA); auto storeX = jlm::rvsdg::input::GetNode(**test.alloca_p->output(1)->begin()); assert(is(*storeX, 3, 1)); @@ -407,14 +407,14 @@ ValidateLoadTest2SteensgaardRegionAware(const jlm::tests::LoadTest2 & test) auto loadXY = jlm::rvsdg::input::GetNode(**loadP->output(0)->begin()); assert(is(*loadXY, 3, 3)); - assert(jlm::rvsdg::node_output::node(loadXY->input(1)->origin()) == storeB); - assert(jlm::rvsdg::node_output::node(loadXY->input(2)->origin()) == storeB); + assert(jlm::rvsdg::output::GetNode(*loadXY->input(1)->origin()) == storeB); + assert(jlm::rvsdg::output::GetNode(*loadXY->input(2)->origin()) == storeB); auto storeY = jlm::rvsdg::input::GetNode(**loadXY->output(0)->begin()); assert(is(*storeY, 4, 2)); assert(storeY->input(0)->origin() == test.alloca_y->output(0)); - assert(jlm::rvsdg::node_output::node(storeY->input(2)->origin()) == loadXY); - assert(jlm::rvsdg::node_output::node(storeY->input(3)->origin()) == loadXY); + assert(jlm::rvsdg::output::GetNode(*storeY->input(2)->origin()) == loadXY); + assert(jlm::rvsdg::output::GetNode(*storeY->input(3)->origin()) == loadXY); } static void @@ -424,12 +424,12 @@ ValidateLoadTest2SteensgaardAgnosticTopDown(const jlm::tests::LoadTest2 & test) assert(test.lambda->subregion()->nnodes() == 2); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); - assert(lambdaEntrySplit == jlm::rvsdg::node_output::node(lambdaExitMerge->input(1)->origin())); + assert(lambdaEntrySplit == jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(1)->origin())); } static void @@ -439,10 +439,10 @@ ValidateLoadFromUndefSteensgaardAgnostic(const jlm::tests::LoadFromUndefTest & t assert(test.Lambda().subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.Lambda().fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto load = jlm::rvsdg::node_output::node(test.Lambda().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(0)->origin()); assert(is(*load, 1, 1)); auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.Lambda().fctargument(0)->begin()); @@ -456,10 +456,10 @@ ValidateLoadFromUndefSteensgaardRegionAware(const jlm::tests::LoadFromUndefTest assert(test.Lambda().subregion()->nnodes() == 3); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.Lambda().fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(1)->origin()); assert(is(*lambdaExitMerge, 0, 1)); - auto load = jlm::rvsdg::node_output::node(test.Lambda().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(0)->origin()); assert(is(*load, 1, 1)); } @@ -470,10 +470,10 @@ ValidateLoadFromUndefSteensgaardAgnosticTopDown(const jlm::tests::LoadFromUndefT assert(test.Lambda().subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.Lambda().fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto load = jlm::rvsdg::node_output::node(test.Lambda().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(0)->origin()); assert(is(*load, 1, 1)); auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.Lambda().fctargument(0)->begin()); @@ -488,7 +488,7 @@ ValidateCallTest1SteensgaardAgnostic(const jlm::tests::CallTest1 & test) /* validate f */ { auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_f->fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f->fctresult(2)->origin()); auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(0)->begin()); auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(1)->begin()); @@ -496,16 +496,16 @@ ValidateCallTest1SteensgaardAgnostic(const jlm::tests::CallTest1 & test) assert(is(*lambdaEntrySplit, 1, 7)); assert(is(*loadX, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); assert(is(*loadY, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadY->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadY->input(1)->origin()) == lambdaEntrySplit); } /* validate g */ { auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_g->fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_g->fctresult(2)->origin()); auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(1)->begin()); @@ -513,21 +513,21 @@ ValidateCallTest1SteensgaardAgnostic(const jlm::tests::CallTest1 & test) assert(is(*lambdaEntrySplit, 1, 7)); assert(is(*loadX, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); assert(is(*loadY, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadY->input(1)->origin()) == loadX); + assert(jlm::rvsdg::output::GetNode(*loadY->input(1)->origin()) == loadX); } /* validate h */ { - auto callEntryMerge = jlm::rvsdg::node_output::node(test.CallF().input(4)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.CallF().input(4)->origin()); auto callExitSplit = jlm::rvsdg::input::GetNode(**test.CallF().output(2)->begin()); assert(is(*callEntryMerge, 7, 1)); assert(is(*callExitSplit, 1, 7)); - callEntryMerge = jlm::rvsdg::node_output::node(test.CallG().input(4)->origin()); + callEntryMerge = jlm::rvsdg::output::GetNode(*test.CallG().input(4)->origin()); callExitSplit = jlm::rvsdg::input::GetNode(**test.CallG().output(2)->begin()); assert(is(*callEntryMerge, 7, 1)); @@ -543,7 +543,7 @@ ValidateCallTest1SteensgaardRegionAware(const jlm::tests::CallTest1 & test) /* validate f */ { auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_f->fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f->fctresult(2)->origin()); auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(0)->begin()); auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(1)->begin()); @@ -551,16 +551,16 @@ ValidateCallTest1SteensgaardRegionAware(const jlm::tests::CallTest1 & test) assert(is(*lambdaEntrySplit, 1, 2)); assert(is(*loadX, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); assert(is(*loadY, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadY->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadY->input(1)->origin()) == lambdaEntrySplit); } /* validate g */ { auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_g->fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_g->fctresult(2)->origin()); auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(1)->begin()); @@ -568,21 +568,21 @@ ValidateCallTest1SteensgaardRegionAware(const jlm::tests::CallTest1 & test) assert(is(*lambdaEntrySplit, 1, 1)); assert(is(*loadX, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); assert(is(*loadY, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadY->input(1)->origin()) == loadX); + assert(jlm::rvsdg::output::GetNode(*loadY->input(1)->origin()) == loadX); } /* validate h */ { - auto callEntryMerge = jlm::rvsdg::node_output::node(test.CallF().input(4)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.CallF().input(4)->origin()); auto callExitSplit = jlm::rvsdg::input::GetNode(**test.CallF().output(2)->begin()); assert(is(*callEntryMerge, 2, 1)); assert(is(*callExitSplit, 1, 2)); - callEntryMerge = jlm::rvsdg::node_output::node(test.CallG().input(4)->origin()); + callEntryMerge = jlm::rvsdg::output::GetNode(*test.CallG().input(4)->origin()); callExitSplit = jlm::rvsdg::input::GetNode(**test.CallG().output(2)->begin()); assert(is(*callEntryMerge, 1, 1)); @@ -598,7 +598,7 @@ ValidateCallTest1SteensgaardAgnosticTopDown(const jlm::tests::CallTest1 & test) // validate function f { auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_f->fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f->fctresult(2)->origin()); auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(0)->begin()); auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(1)->begin()); @@ -606,16 +606,16 @@ ValidateCallTest1SteensgaardAgnosticTopDown(const jlm::tests::CallTest1 & test) assert(is(*lambdaEntrySplit, 1, 7)); assert(is(*loadX, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); assert(is(*loadY, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadY->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadY->input(1)->origin()) == lambdaEntrySplit); } // validate function g { auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_g->fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_g->fctresult(2)->origin()); auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(1)->begin()); @@ -623,21 +623,21 @@ ValidateCallTest1SteensgaardAgnosticTopDown(const jlm::tests::CallTest1 & test) assert(is(*lambdaEntrySplit, 1, 7)); assert(is(*loadX, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadX->input(1)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); assert(is(*loadY, 2, 2)); - assert(jlm::rvsdg::node_output::node(loadY->input(1)->origin()) == loadX); + assert(jlm::rvsdg::output::GetNode(*loadY->input(1)->origin()) == loadX); } // validate function h { - auto callEntryMerge = jlm::rvsdg::node_output::node(test.CallF().input(4)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.CallF().input(4)->origin()); auto callExitSplit = jlm::rvsdg::input::GetNode(**test.CallF().output(2)->begin()); assert(is(*callEntryMerge, 7, 1)); assert(is(*callExitSplit, 1, 7)); - callEntryMerge = jlm::rvsdg::node_output::node(test.CallG().input(4)->origin()); + callEntryMerge = jlm::rvsdg::output::GetNode(*test.CallG().input(4)->origin()); callExitSplit = jlm::rvsdg::input::GetNode(**test.CallG().output(2)->begin()); assert(is(*callEntryMerge, 7, 1)); @@ -657,7 +657,7 @@ ValidateCallTest2SteensgaardAgnostic(const jlm::tests::CallTest2 & test) auto stateMerge = jlm::rvsdg::input::GetNode(**test.malloc->output(1)->begin()); assert(is(*stateMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(stateMerge->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*stateMerge->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); auto lambdaExitMerge = jlm::rvsdg::input::GetNode(**stateMerge->output(0)->begin()); @@ -691,7 +691,7 @@ ValidateCallTest2SteensgaardRegionAware(const jlm::tests::CallTest2 & test) auto stateMerge = jlm::rvsdg::input::GetNode(**test.malloc->output(1)->begin()); assert(is(*stateMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(stateMerge->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*stateMerge->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 1)); auto lambdaExitMerge = jlm::rvsdg::input::GetNode(**stateMerge->output(0)->begin()); @@ -725,7 +725,7 @@ ValidateCallTest2SteensgaardAgnosticTopDown(const jlm::tests::CallTest2 & test) auto stateMerge = jlm::rvsdg::input::GetNode(**test.malloc->output(1)->begin()); assert(is(*stateMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(stateMerge->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*stateMerge->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); auto lambdaExitMerge = jlm::rvsdg::input::GetNode(**stateMerge->output(0)->begin()); @@ -753,19 +753,19 @@ ValidateIndirectCallTest1SteensgaardAgnostic(const jlm::tests::IndirectCallTest1 assert(test.GetLambdaIndcall().subregion()->nnodes() == 5); auto lambda_exit_mux = - jlm::rvsdg::node_output::node(test.GetLambdaIndcall().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().fctresult(2)->origin()); assert(is(*lambda_exit_mux, 5, 1)); - auto call_exit_mux = jlm::rvsdg::node_output::node(lambda_exit_mux->input(0)->origin()); + auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); assert(is(*call_exit_mux, 1, 5)); - auto call = jlm::rvsdg::node_output::node(call_exit_mux->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*call_exit_mux->input(0)->origin()); assert(is(*call, 3, 3)); - auto call_entry_mux = jlm::rvsdg::node_output::node(call->input(2)->origin()); + auto call_entry_mux = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*call_entry_mux, 5, 1)); - auto lambda_entry_mux = jlm::rvsdg::node_output::node(call_entry_mux->input(2)->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*call_entry_mux->input(2)->origin()); assert(is(*lambda_entry_mux, 1, 5)); } @@ -774,28 +774,28 @@ ValidateIndirectCallTest1SteensgaardAgnostic(const jlm::tests::IndirectCallTest1 assert(test.GetLambdaTest().subregion()->nnodes() == 9); auto lambda_exit_mux = - jlm::rvsdg::node_output::node(test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); assert(is(*lambda_exit_mux, 5, 1)); - auto call_exit_mux = jlm::rvsdg::node_output::node(lambda_exit_mux->input(0)->origin()); + auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); assert(is(*call_exit_mux, 1, 5)); - auto call = jlm::rvsdg::node_output::node(call_exit_mux->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*call_exit_mux->input(0)->origin()); assert(is(*call, 4, 3)); - auto call_entry_mux = jlm::rvsdg::node_output::node(call->input(3)->origin()); + auto call_entry_mux = jlm::rvsdg::output::GetNode(*call->input(3)->origin()); assert(is(*call_entry_mux, 5, 1)); - call_exit_mux = jlm::rvsdg::node_output::node(call_entry_mux->input(0)->origin()); + call_exit_mux = jlm::rvsdg::output::GetNode(*call_entry_mux->input(0)->origin()); assert(is(*call_exit_mux, 1, 5)); - call = jlm::rvsdg::node_output::node(call_exit_mux->input(0)->origin()); + call = jlm::rvsdg::output::GetNode(*call_exit_mux->input(0)->origin()); assert(is(*call, 4, 3)); - call_entry_mux = jlm::rvsdg::node_output::node(call->input(3)->origin()); + call_entry_mux = jlm::rvsdg::output::GetNode(*call->input(3)->origin()); assert(is(*call_entry_mux, 5, 1)); - auto lambda_entry_mux = jlm::rvsdg::node_output::node(call_entry_mux->input(2)->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*call_entry_mux->input(2)->origin()); assert(is(*lambda_entry_mux, 1, 5)); } } @@ -810,19 +810,19 @@ ValidateIndirectCallTest1SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaIndcall().subregion()->nnodes() == 5); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaIndcall().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 1, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 1)); - auto call = jlm::rvsdg::node_output::node(callExitSplit->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*callExitSplit->input(0)->origin()); assert(is(*call, 3, 3)); - auto callEntryMerge = jlm::rvsdg::node_output::node(call->input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*callEntryMerge, 1, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 1)); } @@ -831,28 +831,28 @@ ValidateIndirectCallTest1SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaTest().subregion()->nnodes() == 9); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 1, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 1)); - auto call = jlm::rvsdg::node_output::node(callExitSplit->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*callExitSplit->input(0)->origin()); assert(is(*call, 4, 3)); - auto callEntryMerge = jlm::rvsdg::node_output::node(call->input(3)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(3)->origin()); assert(is(*callEntryMerge, 1, 1)); - callExitSplit = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + callExitSplit = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 1)); - call = jlm::rvsdg::node_output::node(callExitSplit->input(0)->origin()); + call = jlm::rvsdg::output::GetNode(*callExitSplit->input(0)->origin()); assert(is(*call, 4, 3)); - callEntryMerge = jlm::rvsdg::node_output::node(call->input(3)->origin()); + callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(3)->origin()); assert(is(*callEntryMerge, 1, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 1)); } } @@ -867,19 +867,19 @@ ValidateIndirectCallTest1SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaIndcall().subregion()->nnodes() == 5); auto lambda_exit_mux = - jlm::rvsdg::node_output::node(test.GetLambdaIndcall().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().fctresult(2)->origin()); assert(is(*lambda_exit_mux, 5, 1)); - auto call_exit_mux = jlm::rvsdg::node_output::node(lambda_exit_mux->input(0)->origin()); + auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); assert(is(*call_exit_mux, 1, 5)); - auto call = jlm::rvsdg::node_output::node(call_exit_mux->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*call_exit_mux->input(0)->origin()); assert(is(*call, 3, 3)); - auto call_entry_mux = jlm::rvsdg::node_output::node(call->input(2)->origin()); + auto call_entry_mux = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*call_entry_mux, 5, 1)); - auto lambda_entry_mux = jlm::rvsdg::node_output::node(call_entry_mux->input(2)->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*call_entry_mux->input(2)->origin()); assert(is(*lambda_entry_mux, 1, 5)); } @@ -888,28 +888,28 @@ ValidateIndirectCallTest1SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaTest().subregion()->nnodes() == 9); auto lambda_exit_mux = - jlm::rvsdg::node_output::node(test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); assert(is(*lambda_exit_mux, 5, 1)); - auto call_exit_mux = jlm::rvsdg::node_output::node(lambda_exit_mux->input(0)->origin()); + auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); assert(is(*call_exit_mux, 1, 5)); - auto call = jlm::rvsdg::node_output::node(call_exit_mux->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*call_exit_mux->input(0)->origin()); assert(is(*call, 4, 3)); - auto call_entry_mux = jlm::rvsdg::node_output::node(call->input(3)->origin()); + auto call_entry_mux = jlm::rvsdg::output::GetNode(*call->input(3)->origin()); assert(is(*call_entry_mux, 5, 1)); - call_exit_mux = jlm::rvsdg::node_output::node(call_entry_mux->input(0)->origin()); + call_exit_mux = jlm::rvsdg::output::GetNode(*call_entry_mux->input(0)->origin()); assert(is(*call_exit_mux, 1, 5)); - call = jlm::rvsdg::node_output::node(call_exit_mux->input(0)->origin()); + call = jlm::rvsdg::output::GetNode(*call_exit_mux->input(0)->origin()); assert(is(*call, 4, 3)); - call_entry_mux = jlm::rvsdg::node_output::node(call->input(3)->origin()); + call_entry_mux = jlm::rvsdg::output::GetNode(*call->input(3)->origin()); assert(is(*call_entry_mux, 5, 1)); - auto lambda_entry_mux = jlm::rvsdg::node_output::node(call_entry_mux->input(2)->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*call_entry_mux->input(2)->origin()); assert(is(*lambda_entry_mux, 1, 5)); } } @@ -924,7 +924,7 @@ ValidateIndirectCallTest2SteensgaardAgnostic(const jlm::tests::IndirectCallTest2 assert(test.GetLambdaThree().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaThree().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaThree().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = @@ -937,7 +937,7 @@ ValidateIndirectCallTest2SteensgaardAgnostic(const jlm::tests::IndirectCallTest2 assert(test.GetLambdaFour().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaFour().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaFour().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = @@ -949,16 +949,16 @@ ValidateIndirectCallTest2SteensgaardAgnostic(const jlm::tests::IndirectCallTest2 { assert(test.GetLambdaI().subregion()->nnodes() == 5); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaI().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaI().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 13)); - auto callEntryMerge = jlm::rvsdg::node_output::node(test.GetIndirectCall().input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetIndirectCall().input(2)->origin()); assert(is(*callEntryMerge, 13, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 13)); } } @@ -973,7 +973,7 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaThree().subregion()->nnodes() == 2); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaThree().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaThree().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 0, 1)); } @@ -982,7 +982,7 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaFour().subregion()->nnodes() == 2); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaFour().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaFour().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 0, 1)); } @@ -990,16 +990,16 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe { assert(test.GetLambdaI().subregion()->nnodes() == 5); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaI().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaI().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 6, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 6)); - auto callEntryMerge = jlm::rvsdg::node_output::node(test.GetIndirectCall().input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetIndirectCall().input(2)->origin()); assert(is(*callEntryMerge, 6, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 6)); } @@ -1007,21 +1007,20 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe { assert(test.GetLambdaX().subregion()->nnodes() == 7); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaX().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaX().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 6, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 6)); - auto callEntryMerge = - jlm::rvsdg::node_output::node(test.GetCallIWithThree().input(3)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithThree().input(3)->origin()); assert(is(*callEntryMerge, 6, 1)); const jlm::rvsdg::node * storeNode = nullptr; const jlm::rvsdg::node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(callEntryMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); if (is(node)) { storeNode = node; @@ -1044,20 +1043,20 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe { assert(test.GetLambdaY().subregion()->nnodes() == 7); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaY().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaY().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 6, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 6)); - auto callEntryMerge = jlm::rvsdg::node_output::node(test.GetCallIWithFour().input(3)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithFour().input(3)->origin()); assert(is(*callEntryMerge, 6, 1)); const jlm::rvsdg::node * storeNode = nullptr; const jlm::rvsdg::node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(callEntryMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); if (is(node)) { storeNode = node; @@ -1081,7 +1080,7 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaTest().subregion()->nnodes() == 16); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 6, 1)); auto loadG1 = jlm::rvsdg::input::GetNode(**test.GetLambdaTest().cvargument(2)->begin()); @@ -1100,7 +1099,7 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaTest2().subregion()->nnodes() == 7); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaTest2().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest2().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 6, 1)); auto lambdaEntrySplit = @@ -1119,7 +1118,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaThree().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaThree().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaThree().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = @@ -1132,7 +1131,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaFour().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaFour().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaFour().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = @@ -1144,16 +1143,16 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa { assert(test.GetLambdaI().subregion()->nnodes() == 5); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaI().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaI().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 13)); - auto callEntryMerge = jlm::rvsdg::node_output::node(test.GetIndirectCall().input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetIndirectCall().input(2)->origin()); assert(is(*callEntryMerge, 13, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 13)); } @@ -1161,21 +1160,20 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa { assert(test.GetLambdaX().subregion()->nnodes() == 7); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaX().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaX().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 13, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 13)); - auto callEntryMerge = - jlm::rvsdg::node_output::node(test.GetCallIWithThree().input(3)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithThree().input(3)->origin()); assert(is(*callEntryMerge, 13, 1)); const jlm::rvsdg::node * storeNode = nullptr; const jlm::rvsdg::node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(callEntryMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); if (is(node)) { storeNode = node; @@ -1198,13 +1196,13 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa { assert(test.GetLambdaY().subregion()->nnodes() == 8); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.GetLambdaY().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaY().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 12, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 13)); - auto callEntryMerge = jlm::rvsdg::node_output::node(test.GetCallIWithFour().input(3)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithFour().input(3)->origin()); assert(is(*callEntryMerge, 13, 1)); jlm::rvsdg::node * undefNode = nullptr; @@ -1212,7 +1210,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa const jlm::rvsdg::node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(callEntryMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); if (is(node)) { assert(storeNode == nullptr); @@ -1242,13 +1240,13 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaTest().subregion()->nnodes() == 17); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 10, 1)); auto loadG1 = jlm::rvsdg::input::GetNode(**test.GetLambdaTest().cvargument(2)->begin()); assert(is(*loadG1, 2, 2)); - auto callXEntryMerge = jlm::rvsdg::node_output::node(test.GetTestCallX().input(3)->origin()); + auto callXEntryMerge = jlm::rvsdg::output::GetNode(*test.GetTestCallX().input(3)->origin()); assert(is(*callXEntryMerge, 13, 1)); auto callXExitSplit = jlm::rvsdg::input::GetNode(**test.GetTestCallX().output(2)->begin()); @@ -1280,10 +1278,10 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaTest2().subregion()->nnodes() == 8); auto lambdaExitMerge = - jlm::rvsdg::node_output::node(test.GetLambdaTest2().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest2().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 10, 1)); - auto callXEntryMerge = jlm::rvsdg::node_output::node(test.GetTest2CallX().input(3)->origin()); + auto callXEntryMerge = jlm::rvsdg::output::GetNode(*test.GetTest2CallX().input(3)->origin()); assert(is(*callXEntryMerge, 13, 1)); auto callXExitSplit = jlm::rvsdg::input::GetNode(**test.GetTest2CallX().output(2)->begin()); @@ -1316,16 +1314,16 @@ ValidateGammaTestSteensgaardAgnostic(const jlm::tests::GammaTest & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto loadTmp2 = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto loadTmp2 = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*loadTmp2, 3, 3)); - auto loadTmp1 = jlm::rvsdg::node_output::node(loadTmp2->input(1)->origin()); + auto loadTmp1 = jlm::rvsdg::output::GetNode(*loadTmp2->input(1)->origin()); assert(is(*loadTmp1, 3, 3)); - auto gamma = jlm::rvsdg::node_output::node(loadTmp1->input(1)->origin()); + auto gamma = jlm::rvsdg::output::GetNode(*loadTmp1->input(1)->origin()); assert(gamma == test.gamma); } @@ -1334,16 +1332,16 @@ ValidateGammaTestSteensgaardRegionAware(const jlm::tests::GammaTest & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto loadTmp2 = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto loadTmp2 = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*loadTmp2, 3, 3)); - auto loadTmp1 = jlm::rvsdg::node_output::node(loadTmp2->input(1)->origin()); + auto loadTmp1 = jlm::rvsdg::output::GetNode(*loadTmp2->input(1)->origin()); assert(is(*loadTmp1, 3, 3)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(loadTmp1->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadTmp1->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); } @@ -1352,16 +1350,16 @@ ValidateGammaTestSteensgaardAgnosticTopDown(const jlm::tests::GammaTest & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto loadTmp2 = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto loadTmp2 = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*loadTmp2, 3, 3)); - auto loadTmp1 = jlm::rvsdg::node_output::node(loadTmp2->input(1)->origin()); + auto loadTmp1 = jlm::rvsdg::output::GetNode(*loadTmp2->input(1)->origin()); assert(is(*loadTmp1, 3, 3)); - auto gamma = jlm::rvsdg::node_output::node(loadTmp1->input(1)->origin()); + auto gamma = jlm::rvsdg::output::GetNode(*loadTmp1->input(1)->origin()); assert(gamma == test.gamma); } @@ -1372,20 +1370,20 @@ ValidateThetaTestSteensgaardAgnostic(const jlm::tests::ThetaTest & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambda_exit_mux = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambda_exit_mux = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambda_exit_mux, 2, 1)); auto thetaOutput = jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); - auto theta = jlm::rvsdg::node_output::node(thetaOutput); + auto theta = jlm::rvsdg::output::GetNode(*thetaOutput); assert(theta == test.theta); auto storeStateOutput = thetaOutput->result()->origin(); - auto store = jlm::rvsdg::node_output::node(storeStateOutput); + auto store = jlm::rvsdg::output::GetNode(*storeStateOutput); assert(is(*store, 4, 2)); assert(store->input(storeStateOutput->index() + 2)->origin() == thetaOutput->argument()); - auto lambda_entry_mux = jlm::rvsdg::node_output::node(thetaOutput->input()->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*thetaOutput->input()->origin()); assert(is(*lambda_entry_mux, 1, 2)); } @@ -1396,20 +1394,20 @@ ValidateThetaTestSteensgaardRegionAware(const jlm::tests::ThetaTest & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto thetaOutput = jlm::util::AssertedCast(lambdaExitMerge->input(0)->origin()); - auto theta = jlm::rvsdg::node_output::node(thetaOutput); + auto theta = jlm::rvsdg::output::GetNode(*thetaOutput); assert(theta == test.theta); auto storeStateOutput = thetaOutput->result()->origin(); - auto store = jlm::rvsdg::node_output::node(storeStateOutput); + auto store = jlm::rvsdg::output::GetNode(*storeStateOutput); assert(is(*store, 4, 2)); assert(store->input(storeStateOutput->index() + 2)->origin() == thetaOutput->argument()); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(thetaOutput->input()->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*thetaOutput->input()->origin()); assert(is(*lambdaEntrySplit, 1, 2)); } @@ -1420,20 +1418,20 @@ ValidateThetaTestSteensgaardAgnosticTopDown(const jlm::tests::ThetaTest & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambda_exit_mux = jlm::rvsdg::node_output::node(test.lambda->fctresult(0)->origin()); + auto lambda_exit_mux = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); assert(is(*lambda_exit_mux, 2, 1)); auto thetaOutput = jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); - auto theta = jlm::rvsdg::node_output::node(thetaOutput); + auto theta = jlm::rvsdg::output::GetNode(*thetaOutput); assert(theta == test.theta); auto storeStateOutput = thetaOutput->result()->origin(); - auto store = jlm::rvsdg::node_output::node(storeStateOutput); + auto store = jlm::rvsdg::output::GetNode(*storeStateOutput); assert(is(*store, 4, 2)); assert(store->input(storeStateOutput->index() + 2)->origin() == thetaOutput->argument()); - auto lambda_entry_mux = jlm::rvsdg::node_output::node(thetaOutput->input()->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*thetaOutput->input()->origin()); assert(is(*lambda_entry_mux, 1, 2)); } @@ -1449,7 +1447,7 @@ ValidateDeltaTest1SteensgaardAgnostic(const jlm::tests::DeltaTest1 & test) auto storeF = jlm::rvsdg::input::GetNode(**test.constantFive->output(0)->begin()); assert(is(*storeF, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeF->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeF->input(2)->origin()) == lambdaEntrySplit); auto deltaStateIndex = storeF->input(2)->origin()->index(); @@ -1470,7 +1468,7 @@ ValidateDeltaTest1SteensgaardRegionAware(const jlm::tests::DeltaTest1 & test) auto storeF = jlm::rvsdg::input::GetNode(**test.constantFive->output(0)->begin()); assert(is(*storeF, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeF->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeF->input(2)->origin()) == lambdaEntrySplit); auto deltaStateIndex = storeF->input(2)->origin()->index(); @@ -1491,7 +1489,7 @@ ValidateDeltaTest1SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest1 & test auto storeF = jlm::rvsdg::input::GetNode(**test.constantFive->output(0)->begin()); assert(is(*storeF, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeF->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeF->input(2)->origin()) == lambdaEntrySplit); auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); assert(is(*loadF, 2, 2)); @@ -1509,7 +1507,7 @@ ValidateDeltaTest2SteensgaardAgnostic(const jlm::tests::DeltaTest2 & test) auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); assert(is(*storeD1InF2, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD1InF2->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); auto d1StateIndex = storeD1InF2->input(2)->origin()->index(); @@ -1533,13 +1531,13 @@ ValidateDeltaTest2SteensgaardRegionAware(const jlm::tests::DeltaTest2 & test) { assert(test.lambda_f1->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_f1->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f1->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 1, 1)); - auto storeNode = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto storeNode = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*storeNode, 3, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(storeNode->input(2)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*storeNode->input(2)->origin()); assert(is(*lambdaEntrySplit, 1, 1)); } @@ -1552,11 +1550,11 @@ ValidateDeltaTest2SteensgaardRegionAware(const jlm::tests::DeltaTest2 & test) auto storeD1 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); assert(is(*storeD1, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD1->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD1->input(2)->origin()) == lambdaEntrySplit); auto storeD2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); assert(is(*storeD2, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD2->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD2->input(2)->origin()) == lambdaEntrySplit); auto callEntryMerge = jlm::rvsdg::input::GetNode(**storeD1->output(0)->begin()); assert(is(*callEntryMerge, 1, 1)); @@ -1584,7 +1582,7 @@ ValidateDeltaTest2SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest2 & test auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); assert(is(*storeD1InF2, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD1InF2->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); auto d1StateIndex = storeD1InF2->input(2)->origin()->index(); @@ -1606,23 +1604,23 @@ ValidateDeltaTest3SteensgaardAgnostic(const jlm::tests::DeltaTest3 & test) { assert(test.LambdaF().subregion()->nnodes() == 6); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto truncNode = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(0)->origin()); + auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); assert(is(*truncNode, 1, 1)); - auto loadG1Node = jlm::rvsdg::node_output::node(truncNode->input(0)->origin()); + auto loadG1Node = jlm::rvsdg::output::GetNode(*truncNode->input(0)->origin()); assert(is(*loadG1Node, 2, 2)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(loadG1Node->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadG1Node->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); jlm::rvsdg::node * storeG2Node = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto input = lambdaExitMerge->input(n); - auto node = jlm::rvsdg::node_output::node(input->origin()); + auto node = jlm::rvsdg::output::GetNode(*input->origin()); if (is(node)) { storeG2Node = node; @@ -1631,10 +1629,10 @@ ValidateDeltaTest3SteensgaardAgnostic(const jlm::tests::DeltaTest3 & test) } assert(storeG2Node != nullptr); - auto loadG2Node = jlm::rvsdg::node_output::node(storeG2Node->input(2)->origin()); + auto loadG2Node = jlm::rvsdg::output::GetNode(*storeG2Node->input(2)->origin()); assert(is(*loadG2Node, 2, 2)); - auto node = jlm::rvsdg::node_output::node(loadG2Node->input(1)->origin()); + auto node = jlm::rvsdg::output::GetNode(*loadG2Node->input(1)->origin()); assert(node == lambdaEntrySplit); } } @@ -1648,23 +1646,23 @@ ValidateDeltaTest3SteensgaardRegionAware(const jlm::tests::DeltaTest3 & test) { assert(test.LambdaF().subregion()->nnodes() == 6); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto truncNode = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(0)->origin()); + auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); assert(is(*truncNode, 1, 1)); - auto loadG1Node = jlm::rvsdg::node_output::node(truncNode->input(0)->origin()); + auto loadG1Node = jlm::rvsdg::output::GetNode(*truncNode->input(0)->origin()); assert(is(*loadG1Node, 2, 2)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(loadG1Node->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadG1Node->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); jlm::rvsdg::node * storeG2Node = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto input = lambdaExitMerge->input(n); - auto node = jlm::rvsdg::node_output::node(input->origin()); + auto node = jlm::rvsdg::output::GetNode(*input->origin()); if (is(node)) { storeG2Node = node; @@ -1673,10 +1671,10 @@ ValidateDeltaTest3SteensgaardRegionAware(const jlm::tests::DeltaTest3 & test) } assert(storeG2Node != nullptr); - auto loadG2Node = jlm::rvsdg::node_output::node(storeG2Node->input(2)->origin()); + auto loadG2Node = jlm::rvsdg::output::GetNode(*storeG2Node->input(2)->origin()); assert(is(*loadG2Node, 2, 2)); - auto node = jlm::rvsdg::node_output::node(loadG2Node->input(1)->origin()); + auto node = jlm::rvsdg::output::GetNode(*loadG2Node->input(1)->origin()); assert(node == lambdaEntrySplit); } } @@ -1690,23 +1688,23 @@ ValidateDeltaTest3SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest3 & test { assert(test.LambdaF().subregion()->nnodes() == 6); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto truncNode = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(0)->origin()); + auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); assert(is(*truncNode, 1, 1)); - auto loadG1Node = jlm::rvsdg::node_output::node(truncNode->input(0)->origin()); + auto loadG1Node = jlm::rvsdg::output::GetNode(*truncNode->input(0)->origin()); assert(is(*loadG1Node, 2, 2)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(loadG1Node->input(1)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadG1Node->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); jlm::rvsdg::node * storeG2Node = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto input = lambdaExitMerge->input(n); - auto node = jlm::rvsdg::node_output::node(input->origin()); + auto node = jlm::rvsdg::output::GetNode(*input->origin()); if (is(node)) { storeG2Node = node; @@ -1715,10 +1713,10 @@ ValidateDeltaTest3SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest3 & test } assert(storeG2Node != nullptr); - auto loadG2Node = jlm::rvsdg::node_output::node(storeG2Node->input(2)->origin()); + auto loadG2Node = jlm::rvsdg::output::GetNode(*storeG2Node->input(2)->origin()); assert(is(*loadG2Node, 2, 2)); - auto node = jlm::rvsdg::node_output::node(loadG2Node->input(1)->origin()); + auto node = jlm::rvsdg::output::GetNode(*loadG2Node->input(1)->origin()); assert(node == lambdaEntrySplit); } } @@ -1735,7 +1733,7 @@ ValidateImportTestSteensgaardAgnostic(const jlm::tests::ImportTest & test) auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); assert(is(*storeD1InF2, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD1InF2->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); auto d1StateIndex = storeD1InF2->input(2)->origin()->index(); @@ -1759,13 +1757,13 @@ ValidateImportTestSteensgaardRegionAware(const jlm::tests::ImportTest & test) { assert(test.lambda_f1->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_f1->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f1->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 1, 1)); - auto storeNode = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto storeNode = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*storeNode, 3, 1)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(storeNode->input(2)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*storeNode->input(2)->origin()); assert(is(*lambdaEntrySplit, 1, 1)); } @@ -1778,11 +1776,11 @@ ValidateImportTestSteensgaardRegionAware(const jlm::tests::ImportTest & test) auto storeD1 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); assert(is(*storeD1, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD1->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD1->input(2)->origin()) == lambdaEntrySplit); auto storeD2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); assert(is(*storeD2, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD2->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD2->input(2)->origin()) == lambdaEntrySplit); auto callEntryMerge = jlm::rvsdg::input::GetNode(**storeD1->output(0)->begin()); assert(is(*callEntryMerge, 1, 1)); @@ -1810,7 +1808,7 @@ ValidateImportTestSteensgaardAgnosticTopDown(const jlm::tests::ImportTest & test auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); assert(is(*storeD1InF2, 3, 1)); - assert(jlm::rvsdg::node_output::node(storeD1InF2->input(2)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); assert(storeD1InF2->output(0)->nusers() == 1); auto d1StateIndexEntry = (*storeD1InF2->output(0)->begin())->index(); @@ -1834,22 +1832,22 @@ ValidatePhiTestSteensgaardAgnostic(const jlm::tests::PhiTest1 & test) auto arrayStateIndex = (*test.alloca->output(1)->begin())->index(); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_fib->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_fib->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 4, 1)); - auto store = jlm::rvsdg::node_output::node(lambdaExitMerge->input(arrayStateIndex)->origin()); + auto store = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(arrayStateIndex)->origin()); assert(is(*store, 3, 1)); - auto gamma = jlm::rvsdg::node_output::node(store->input(2)->origin()); + auto gamma = jlm::rvsdg::output::GetNode(*store->input(2)->origin()); assert(gamma == test.gamma); auto gammaStateIndex = store->input(2)->origin()->index(); auto load1 = - jlm::rvsdg::node_output::node(test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); + jlm::rvsdg::output::GetNode(*test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); assert(is(*load1, 2, 2)); - auto load2 = jlm::rvsdg::node_output::node(load1->input(1)->origin()); + auto load2 = jlm::rvsdg::output::GetNode(*load1->input(1)->origin()); assert(is(*load2, 2, 2)); assert(load2->input(1)->origin()->index() == arrayStateIndex); @@ -1862,22 +1860,22 @@ ValidatePhiTestSteensgaardRegionAware(const jlm::tests::PhiTest1 & test) auto arrayStateIndex = (*test.alloca->output(1)->begin())->index(); - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_fib->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_fib->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 1, 1)); - auto store = jlm::rvsdg::node_output::node(lambdaExitMerge->input(arrayStateIndex)->origin()); + auto store = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(arrayStateIndex)->origin()); assert(is(*store, 3, 1)); - auto gamma = jlm::rvsdg::node_output::node(store->input(2)->origin()); + auto gamma = jlm::rvsdg::output::GetNode(*store->input(2)->origin()); assert(gamma == test.gamma); auto gammaStateIndex = store->input(2)->origin()->index(); auto load1 = - jlm::rvsdg::node_output::node(test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); + jlm::rvsdg::output::GetNode(*test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); assert(is(*load1, 2, 2)); - auto load2 = jlm::rvsdg::node_output::node(load1->input(1)->origin()); + auto load2 = jlm::rvsdg::output::GetNode(*load1->input(1)->origin()); assert(is(*load2, 2, 2)); assert(load2->input(1)->origin()->index() == arrayStateIndex); @@ -1888,14 +1886,14 @@ ValidatePhiTestSteensgaardAgnosticTopDown(const jlm::tests::PhiTest1 & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.lambda_fib->fctresult(1)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_fib->fctresult(1)->origin()); assert(is(*lambdaExitMerge, 4, 1)); const StoreNonVolatileNode * storeNode = nullptr; const jlm::rvsdg::GammaNode * gammaNode = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(lambdaExitMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(n)->origin()); if (auto castedStoreNode = dynamic_cast(node)) { storeNode = castedStoreNode; @@ -1916,10 +1914,10 @@ ValidatePhiTestSteensgaardAgnosticTopDown(const jlm::tests::PhiTest1 & test) auto gammaStateIndex = storeNode->input(2)->origin()->index(); auto load1 = - jlm::rvsdg::node_output::node(test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); + jlm::rvsdg::output::GetNode(*test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); assert(is(*load1, 2, 2)); - auto load2 = jlm::rvsdg::node_output::node(load1->input(1)->origin()); + auto load2 = jlm::rvsdg::output::GetNode(*load1->input(1)->origin()); assert(is(*load2, 2, 2)); } @@ -1932,16 +1930,16 @@ ValidateMemcpySteensgaardAgnostic(const jlm::tests::MemcpyTest & test) * Validate function f */ { - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto load = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); assert(is(*load, 3, 3)); - auto store = jlm::rvsdg::node_output::node(load->input(1)->origin()); + auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); assert(is(*store, 4, 2)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(store->input(2)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*store->input(2)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); } @@ -1949,29 +1947,29 @@ ValidateMemcpySteensgaardAgnostic(const jlm::tests::MemcpyTest & test) * Validate function g */ { - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaG().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaG().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 5)); - auto call = jlm::rvsdg::node_output::node(callExitSplit->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*callExitSplit->input(0)->origin()); assert(is(*call, 3, 3)); - auto callEntryMerge = jlm::rvsdg::node_output::node(call->input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*callEntryMerge, 5, 1)); jlm::rvsdg::node * memcpy = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(callEntryMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); if (is(node)) memcpy = node; } assert(memcpy != nullptr); assert(is(*memcpy, 7, 4)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(memcpy->input(5)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*memcpy->input(5)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); } } @@ -1985,16 +1983,16 @@ ValidateMemcpySteensgaardRegionAware(const jlm::tests::MemcpyTest & test) * Validate function f */ { - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto load = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); assert(is(*load, 3, 3)); - auto store = jlm::rvsdg::node_output::node(load->input(1)->origin()); + auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); assert(is(*store, 4, 2)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(store->input(2)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*store->input(2)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); } @@ -2005,18 +2003,18 @@ ValidateMemcpySteensgaardRegionAware(const jlm::tests::MemcpyTest & test) auto callNode = jlm::rvsdg::input::GetNode(**test.LambdaG().cvargument(2)->begin()); assert(is(*callNode, 3, 3)); - auto callEntryMerge = jlm::rvsdg::node_output::node(callNode->input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*callNode->input(2)->origin()); assert(is(*callEntryMerge, 2, 1)); auto callExitSplit = jlm::rvsdg::input::GetNode(**callNode->output(2)->begin()); assert(is(*callExitSplit, 1, 2)); - auto memcpyNode = jlm::rvsdg::node_output::node(callEntryMerge->input(0)->origin()); + auto memcpyNode = jlm::rvsdg::output::GetNode(*callEntryMerge->input(0)->origin()); assert(is(*memcpyNode, 7, 4)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(memcpyNode->input(4)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*memcpyNode->input(4)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); - assert(jlm::rvsdg::node_output::node(memcpyNode->input(5)->origin()) == lambdaEntrySplit); + assert(jlm::rvsdg::output::GetNode(*memcpyNode->input(5)->origin()) == lambdaEntrySplit); auto lambdaExitMerge = jlm::rvsdg::input::GetNode(**callExitSplit->output(0)->begin()); assert(is(*lambdaExitMerge, 2, 1)); @@ -2030,44 +2028,44 @@ ValidateMemcpyTestSteensgaardAgnosticTopDown(const jlm::tests::MemcpyTest & test // Validate function f { - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto load = jlm::rvsdg::node_output::node(test.LambdaF().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); assert(is(*load, 3, 3)); - auto store = jlm::rvsdg::node_output::node(load->input(1)->origin()); + auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); assert(is(*store, 4, 2)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(store->input(2)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*store->input(2)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); } // Validate function g { - auto lambdaExitMerge = jlm::rvsdg::node_output::node(test.LambdaG().fctresult(2)->origin()); + auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaG().fctresult(2)->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto callExitSplit = jlm::rvsdg::node_output::node(lambdaExitMerge->input(0)->origin()); + auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*callExitSplit, 1, 5)); - auto call = jlm::rvsdg::node_output::node(callExitSplit->input(0)->origin()); + auto call = jlm::rvsdg::output::GetNode(*callExitSplit->input(0)->origin()); assert(is(*call, 3, 3)); - auto callEntryMerge = jlm::rvsdg::node_output::node(call->input(2)->origin()); + auto callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*callEntryMerge, 5, 1)); jlm::rvsdg::node * memcpy = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { - auto node = jlm::rvsdg::node_output::node(callEntryMerge->input(n)->origin()); + auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); if (is(node)) memcpy = node; } assert(memcpy != nullptr); assert(is(*memcpy, 7, 4)); - auto lambdaEntrySplit = jlm::rvsdg::node_output::node(memcpy->input(5)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*memcpy->input(5)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); } } @@ -2078,13 +2076,14 @@ ValidateFreeNullTestSteensgaardAgnostic(const jlm::tests::FreeNullTest & test) using namespace jlm::llvm; using namespace jlm::rvsdg; - auto lambdaExitMerge = node_output::node(test.LambdaMain().GetMemoryStateRegionResult().origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaMain().GetMemoryStateRegionResult().origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto free = node_output::node(test.LambdaMain().fctresult(0)->origin()); + auto free = jlm::rvsdg::output::GetNode(*test.LambdaMain().fctresult(0)->origin()); assert(is(*free, 2, 1)); - auto lambdaEntrySplit = node_output::node(lambdaExitMerge->input(0)->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); } diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 363023a01..38204e832 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -172,9 +172,9 @@ test_theta() cne.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - auto un1 = jlm::rvsdg::node_output::node(u1); - auto un2 = jlm::rvsdg::node_output::node(u2); - auto bn1 = jlm::rvsdg::node_output::node(b1); + auto un1 = jlm::rvsdg::output::GetNode(*u1); + auto un2 = jlm::rvsdg::output::GetNode(*u2); + auto bn1 = jlm::rvsdg::output::GetNode(*b1); assert(un1->input(0)->origin() == un2->input(0)->origin()); assert(bn1->input(0)->origin() == un1->input(0)->origin()); assert(bn1->input(1)->origin() == region->argument(3)); @@ -416,7 +416,7 @@ test_lambda() cne.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - auto bn1 = jlm::rvsdg::node_output::node(b1); + auto bn1 = jlm::rvsdg::output::GetNode(*b1); assert(bn1->input(0)->origin() == bn1->input(1)->origin()); } diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index 37b657b4d..0e4ea0ff4 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -166,7 +166,7 @@ test2() // Assert // Function f1 should not have been inlined. - assert(is(jlm::rvsdg::node_output::node(f2->node()->fctresult(0)->origin()))); + assert(is(jlm::rvsdg::output::GetNode(*f2->node()->fctresult(0)->origin()))); } static int diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index 008dc6da9..2ef65955b 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -70,9 +70,9 @@ test1() tginversion.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex1.origin()))); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex2.origin()))); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex3.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex1.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex2.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex3.origin()))); } static inline void @@ -117,7 +117,7 @@ test2() tginversion.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(jlm::rvsdg::is(jlm::rvsdg::node_output::node(ex.origin()))); + assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex.origin()))); } static int diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index 3eb5b4326..b786bbab6 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -80,7 +80,7 @@ test_pullin_bottom() jlm::llvm::pullin_bottom(gamma); // jlm::rvsdg::view(graph, stdout); - assert(jlm::rvsdg::node_output::node(xp.origin()) == gamma); + assert(jlm::rvsdg::output::GetNode(*xp.origin()) == gamma); assert(gamma->subregion(0)->nnodes() == 2); assert(gamma->subregion(1)->nnodes() == 2); } diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 8a2f4aa18..97f319471 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -136,13 +136,13 @@ test_push_theta_bottom() jlm::llvm::push_bottom(theta); jlm::rvsdg::view(graph, stdout); - auto storenode = jlm::rvsdg::node_output::node(ex.origin()); + auto storenode = jlm::rvsdg::output::GetNode(*ex.origin()); assert(jlm::rvsdg::is(storenode)); assert(storenode->input(0)->origin() == a); assert(jlm::rvsdg::is( - jlm::rvsdg::node_output::node(storenode->input(1)->origin()))); + jlm::rvsdg::output::GetNode(*storenode->input(1)->origin()))); assert(jlm::rvsdg::is( - jlm::rvsdg::node_output::node(storenode->input(2)->origin()))); + jlm::rvsdg::output::GetNode(*storenode->input(2)->origin()))); } static int diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 219cfc95a..d0f0ca652 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -262,9 +262,9 @@ test_unknown_boundaries() loopunroll.run(rm, statisticsCollector); // jlm::rvsdg::view(graph, stdout); - auto node = jlm::rvsdg::node_output::node(ex1.origin()); + auto node = jlm::rvsdg::output::GetNode(*ex1.origin()); assert(jlm::rvsdg::is(node)); - node = jlm::rvsdg::node_output::node(node->input(1)->origin()); + node = jlm::rvsdg::output::GetNode(*node->input(1)->origin()); assert(jlm::rvsdg::is(node)); /* Create cleaner output */ diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 4f86d00c8..a170932da 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -36,8 +36,8 @@ types_bitstring_arithmetic_test_bitand(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(and0)->operation() == bitand_op(32)); - assert(node_output::node(and1)->operation() == int_constant_op(32, +1)); + assert(output::GetNode(*and0)->operation() == bitand_op(32)); + assert(output::GetNode(*and1)->operation() == int_constant_op(32, +1)); return 0; } @@ -72,11 +72,11 @@ types_bitstring_arithmetic_test_bitashr(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(ashr0)->operation() == bitashr_op(32)); - assert(node_output::node(ashr1)->operation() == int_constant_op(32, 4)); - assert(node_output::node(ashr2)->operation() == int_constant_op(32, 0)); - assert(node_output::node(ashr3)->operation() == int_constant_op(32, -4)); - assert(node_output::node(ashr4)->operation() == int_constant_op(32, -1)); + assert(output::GetNode(*ashr0)->operation() == bitashr_op(32)); + assert(output::GetNode(*ashr1)->operation() == int_constant_op(32, 4)); + assert(output::GetNode(*ashr2)->operation() == int_constant_op(32, 0)); + assert(output::GetNode(*ashr3)->operation() == int_constant_op(32, -4)); + assert(output::GetNode(*ashr4)->operation() == int_constant_op(32, -1)); return 0; } @@ -99,7 +99,7 @@ types_bitstring_arithmetic_test_bitdifference(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(diff)->operation() == bitsub_op(32)); + assert(output::GetNode(*diff)->operation() == bitsub_op(32)); return 0; } @@ -125,9 +125,9 @@ types_bitstring_arithmetic_test_bitnegate(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(neg0)->operation() == bitneg_op(32)); - assert(node_output::node(neg1)->operation() == int_constant_op(32, -3)); - assert(node_output::node(neg2)->operation() == int_constant_op(32, 3)); + assert(output::GetNode(*neg0)->operation() == bitneg_op(32)); + assert(output::GetNode(*neg1)->operation() == int_constant_op(32, -3)); + assert(output::GetNode(*neg2)->operation() == int_constant_op(32, 3)); return 0; } @@ -153,9 +153,9 @@ types_bitstring_arithmetic_test_bitnot(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(not0)->operation() == bitnot_op(32)); - assert(node_output::node(not1)->operation() == int_constant_op(32, -4)); - assert(node_output::node(not2)->operation() == int_constant_op(32, 3)); + assert(output::GetNode(*not0)->operation() == bitnot_op(32)); + assert(output::GetNode(*not1)->operation() == int_constant_op(32, -4)); + assert(output::GetNode(*not2)->operation() == int_constant_op(32, 3)); return 0; } @@ -182,8 +182,8 @@ types_bitstring_arithmetic_test_bitor(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(or0)->operation() == bitor_op(32)); - assert(node_output::node(or1)->operation() == uint_constant_op(32, 7)); + assert(output::GetNode(*or0)->operation() == bitor_op(32)); + assert(output::GetNode(*or1)->operation() == uint_constant_op(32, 7)); return 0; } @@ -211,8 +211,8 @@ types_bitstring_arithmetic_test_bitproduct(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(product0)->operation() == bitmul_op(32)); - assert(node_output::node(product1)->operation() == uint_constant_op(32, 15)); + assert(output::GetNode(*product0)->operation() == bitmul_op(32)); + assert(output::GetNode(*product1)->operation() == uint_constant_op(32, 15)); return 0; } @@ -235,7 +235,7 @@ types_bitstring_arithmetic_test_bitshiproduct(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(shiproduct)->operation() == bitsmulh_op(32)); + assert(output::GetNode(*shiproduct)->operation() == bitsmulh_op(32)); return 0; } @@ -265,9 +265,9 @@ types_bitstring_arithmetic_test_bitshl(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(shl0)->operation() == bitshl_op(32)); - assert(node_output::node(shl1)->operation() == uint_constant_op(32, 64)); - assert(node_output::node(shl2)->operation() == uint_constant_op(32, 0)); + assert(output::GetNode(*shl0)->operation() == bitshl_op(32)); + assert(output::GetNode(*shl1)->operation() == uint_constant_op(32, 64)); + assert(output::GetNode(*shl2)->operation() == uint_constant_op(32, 0)); return 0; } @@ -297,9 +297,9 @@ types_bitstring_arithmetic_test_bitshr(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(shr0)->operation() == bitshr_op(32)); - assert(node_output::node(shr1)->operation() == uint_constant_op(32, 4)); - assert(node_output::node(shr2)->operation() == uint_constant_op(32, 0)); + assert(output::GetNode(*shr0)->operation() == bitshr_op(32)); + assert(output::GetNode(*shr1)->operation() == uint_constant_op(32, 4)); + assert(output::GetNode(*shr2)->operation() == uint_constant_op(32, 0)); return 0; } @@ -327,8 +327,8 @@ types_bitstring_arithmetic_test_bitsmod(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(smod0)->operation() == bitsmod_op(32)); - assert(node_output::node(smod1)->operation() == int_constant_op(32, -1)); + assert(output::GetNode(*smod0)->operation() == bitsmod_op(32)); + assert(output::GetNode(*smod1)->operation() == int_constant_op(32, -1)); return 0; } @@ -356,8 +356,8 @@ types_bitstring_arithmetic_test_bitsquotient(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(squot0)->operation() == bitsdiv_op(32)); - assert(node_output::node(squot1)->operation() == int_constant_op(32, -2)); + assert(output::GetNode(*squot0)->operation() == bitsdiv_op(32)); + assert(output::GetNode(*squot1)->operation() == int_constant_op(32, -2)); return 0; } @@ -385,8 +385,8 @@ types_bitstring_arithmetic_test_bitsum(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(sum0)->operation() == bitadd_op(32)); - assert(node_output::node(sum1)->operation() == int_constant_op(32, 8)); + assert(output::GetNode(*sum0)->operation() == bitadd_op(32)); + assert(output::GetNode(*sum1)->operation() == int_constant_op(32, 8)); return 0; } @@ -409,7 +409,7 @@ types_bitstring_arithmetic_test_bituhiproduct(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(uhiproduct)->operation() == bitumulh_op(32)); + assert(output::GetNode(*uhiproduct)->operation() == bitumulh_op(32)); return 0; } @@ -437,8 +437,8 @@ types_bitstring_arithmetic_test_bitumod(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(umod0)->operation() == bitumod_op(32)); - assert(node_output::node(umod1)->operation() == int_constant_op(32, 1)); + assert(output::GetNode(*umod0)->operation() == bitumod_op(32)); + assert(output::GetNode(*umod1)->operation() == int_constant_op(32, 1)); return 0; } @@ -466,8 +466,8 @@ types_bitstring_arithmetic_test_bituquotient(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(uquot0)->operation() == bitudiv_op(32)); - assert(node_output::node(uquot1)->operation() == int_constant_op(32, 2)); + assert(output::GetNode(*uquot0)->operation() == bitudiv_op(32)); + assert(output::GetNode(*uquot1)->operation() == int_constant_op(32, 2)); return 0; } @@ -494,8 +494,8 @@ types_bitstring_arithmetic_test_bitxor(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(xor0)->operation() == bitxor_op(32)); - assert(node_output::node(xor1)->operation() == int_constant_op(32, 6)); + assert(output::GetNode(*xor0)->operation() == bitxor_op(32)); + assert(output::GetNode(*xor1)->operation() == int_constant_op(32, 6)); return 0; } @@ -503,7 +503,7 @@ types_bitstring_arithmetic_test_bitxor(void) static inline void expect_static_true(jlm::rvsdg::output * port) { - auto node = jlm::rvsdg::node_output::node(port); + auto node = jlm::rvsdg::output::GetNode(*port); auto op = dynamic_cast(&node->operation()); assert(op && op->value().nbits() == 1 && op->value().str() == "1"); } @@ -511,7 +511,7 @@ expect_static_true(jlm::rvsdg::output * port) static inline void expect_static_false(jlm::rvsdg::output * port) { - auto node = jlm::rvsdg::node_output::node(port); + auto node = jlm::rvsdg::output::GetNode(*port); auto op = dynamic_cast(&node->operation()); assert(op && op->value().nbits() == 1 && op->value().str() == "0"); } @@ -542,10 +542,10 @@ types_bitstring_comparison_test_bitequal(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(equal0)->operation() == biteq_op(32)); + assert(output::GetNode(*equal0)->operation() == biteq_op(32)); expect_static_true(equal1); expect_static_false(equal2); - assert(node_output::node(equal3)->operation() == biteq_op(32)); + assert(output::GetNode(*equal3)->operation() == biteq_op(32)); return 0; } @@ -576,10 +576,10 @@ types_bitstring_comparison_test_bitnotequal(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(nequal0)->operation() == bitne_op(32)); + assert(output::GetNode(*nequal0)->operation() == bitne_op(32)); expect_static_false(nequal1); expect_static_true(nequal2); - assert(node_output::node(nequal3)->operation() == bitne_op(32)); + assert(output::GetNode(*nequal3)->operation() == bitne_op(32)); return 0; } @@ -613,7 +613,7 @@ types_bitstring_comparison_test_bitsgreater(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(sgreater0)->operation() == bitsgt_op(32)); + assert(output::GetNode(*sgreater0)->operation() == bitsgt_op(32)); expect_static_false(sgreater1); expect_static_true(sgreater2); expect_static_false(sgreater3); @@ -653,7 +653,7 @@ types_bitstring_comparison_test_bitsgreatereq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(sgreatereq0)->operation() == bitsge_op(32)); + assert(output::GetNode(*sgreatereq0)->operation() == bitsge_op(32)); expect_static_false(sgreatereq1); expect_static_true(sgreatereq2); expect_static_true(sgreatereq3); @@ -692,7 +692,7 @@ types_bitstring_comparison_test_bitsless(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(sless0)->operation() == bitslt_op(32)); + assert(output::GetNode(*sless0)->operation() == bitslt_op(32)); expect_static_true(sless1); expect_static_false(sless2); expect_static_false(sless3); @@ -732,7 +732,7 @@ types_bitstring_comparison_test_bitslesseq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(slesseq0)->operation() == bitsle_op(32)); + assert(output::GetNode(*slesseq0)->operation() == bitsle_op(32)); expect_static_true(slesseq1); expect_static_true(slesseq2); expect_static_false(slesseq3); @@ -771,7 +771,7 @@ types_bitstring_comparison_test_bitugreater(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(ugreater0)->operation() == bitugt_op(32)); + assert(output::GetNode(*ugreater0)->operation() == bitugt_op(32)); expect_static_false(ugreater1); expect_static_true(ugreater2); expect_static_false(ugreater3); @@ -811,7 +811,7 @@ types_bitstring_comparison_test_bitugreatereq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(ugreatereq0)->operation() == bituge_op(32)); + assert(output::GetNode(*ugreatereq0)->operation() == bituge_op(32)); expect_static_false(ugreatereq1); expect_static_true(ugreatereq2); expect_static_true(ugreatereq3); @@ -850,7 +850,7 @@ types_bitstring_comparison_test_bituless(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(uless0)->operation() == bitult_op(32)); + assert(output::GetNode(*uless0)->operation() == bitult_op(32)); expect_static_true(uless1); expect_static_false(uless2); expect_static_false(uless3); @@ -890,7 +890,7 @@ types_bitstring_comparison_test_bitulesseq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(node_output::node(ulesseq0)->operation() == bitule_op(32)); + assert(output::GetNode(*ulesseq0)->operation() == bitule_op(32)); expect_static_true(ulesseq1); expect_static_true(ulesseq2); expect_static_false(ulesseq3); @@ -935,10 +935,10 @@ types_bitstring_test_constant(void) jlm::rvsdg::graph graph; - auto b1 = node_output::node(create_bitconstant(graph.root(), "00110011")); - auto b2 = node_output::node(create_bitconstant(graph.root(), 8, 204)); - auto b3 = node_output::node(create_bitconstant(graph.root(), 8, 204)); - auto b4 = node_output::node(create_bitconstant(graph.root(), "001100110")); + auto b1 = output::GetNode(*create_bitconstant(graph.root(), "00110011")); + auto b2 = output::GetNode(*create_bitconstant(graph.root(), 8, 204)); + auto b3 = output::GetNode(*create_bitconstant(graph.root(), 8, 204)); + auto b4 = output::GetNode(*create_bitconstant(graph.root(), "001100110")); assert(b1->operation() == uint_constant_op(8, 204)); assert(b1->operation() == int_constant_op(8, -52)); @@ -952,11 +952,11 @@ types_bitstring_test_constant(void) assert(b4->operation() == uint_constant_op(9, 204)); assert(b4->operation() == int_constant_op(9, 204)); - auto plus_one_128 = node_output::node(create_bitconstant(graph.root(), ONE_64 ZERO_64)); + auto plus_one_128 = output::GetNode(*create_bitconstant(graph.root(), ONE_64 ZERO_64)); assert(plus_one_128->operation() == uint_constant_op(128, 1)); assert(plus_one_128->operation() == int_constant_op(128, 1)); - auto minus_one_128 = node_output::node(create_bitconstant(graph.root(), MONE_64 MONE_64)); + auto minus_one_128 = output::GetNode(*create_bitconstant(graph.root(), MONE_64 MONE_64)); assert(minus_one_128->operation() == int_constant_op(128, -1)); jlm::rvsdg::view(graph.root(), stdout); @@ -981,11 +981,11 @@ types_bitstring_test_normalize(void) assert(sum_nf); sum_nf->set_mutable(false); - auto sum0 = node_output::node(bitadd_op::create(32, imp, c0)); + auto sum0 = output::GetNode(*bitadd_op::create(32, imp, c0)); assert(sum0->operation() == bitadd_op(32)); assert(sum0->ninputs() == 2); - auto sum1 = node_output::node(bitadd_op::create(32, sum0->output(0), c1)); + auto sum1 = output::GetNode(*bitadd_op::create(32, sum0->output(0), c1)); assert(sum1->operation() == bitadd_op(32)); assert(sum1->ninputs() == 2); @@ -1007,7 +1007,7 @@ types_bitstring_test_normalize(void) op2 = tmp; } /* FIXME: the graph traversers are currently broken, that is why it won't normalize */ - assert(node_output::node(op1)->operation() == int_constant_op(32, 3 + 4)); + assert(output::GetNode(*op1)->operation() == int_constant_op(32, 3 + 4)); assert(op2 == imp); jlm::rvsdg::view(graph.root(), stdout); @@ -1018,7 +1018,7 @@ types_bitstring_test_normalize(void) static void assert_constant(jlm::rvsdg::output * bitstr, size_t nbits, const char bits[]) { - auto node = jlm::rvsdg::node_output::node(bitstr); + auto node = jlm::rvsdg::output::GetNode(*bitstr); auto op = dynamic_cast(node->operation()); assert(op.value() == jlm::rvsdg::bitvalue_repr(std::string(bits, nbits).c_str())); } @@ -1049,7 +1049,7 @@ types_bitstring_test_reduction(void) { auto concat = jlm::rvsdg::bitconcat({ x, y }); - auto node = node_output::node(jlm::rvsdg::bitslice(concat, 8, 24)); + auto node = output::GetNode(*jlm::rvsdg::bitslice(concat, 8, 24)); auto o0 = dynamic_cast(node->input(0)->origin()); auto o1 = dynamic_cast(node->input(1)->origin()); assert(dynamic_cast(&node->operation())); @@ -1093,7 +1093,7 @@ types_bitstring_test_slice_concat(void) { /* slice of constant */ - auto a = node_output::node(jlm::rvsdg::bitslice(base_const1, 2, 6)); + auto a = output::GetNode(*jlm::rvsdg::bitslice(base_const1, 2, 6)); auto & op = dynamic_cast(a->operation()); assert(op.value() == bitvalue_repr("1101")); @@ -1102,7 +1102,7 @@ types_bitstring_test_slice_concat(void) { /* slice of slice */ auto a = jlm::rvsdg::bitslice(base_x, 2, 6); - auto b = node_output::node(jlm::rvsdg::bitslice(a, 1, 3)); + auto b = output::GetNode(*jlm::rvsdg::bitslice(a, 1, 3)); assert(dynamic_cast(&b->operation())); const bitslice_op * attrs; @@ -1130,7 +1130,7 @@ types_bitstring_test_slice_concat(void) { /* concat flattening */ auto a = jlm::rvsdg::bitconcat({ base_x, base_y }); - auto b = node_output::node(jlm::rvsdg::bitconcat({ a, base_z })); + auto b = output::GetNode(*jlm::rvsdg::bitconcat({ a, base_z })); assert(dynamic_cast(&b->operation())); assert(b->ninputs() == 3); @@ -1157,7 +1157,7 @@ types_bitstring_test_slice_concat(void) { /* concat of constants */ - auto a = node_output::node(jlm::rvsdg::bitconcat({ base_const1, base_const2 })); + auto a = output::GetNode(*jlm::rvsdg::bitconcat({ base_const1, base_const2 })); auto & op = dynamic_cast(a->operation()); assert(op.value() == bitvalue_repr("0011011111001000")); diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 1d18630ba..24c1de723 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -41,13 +41,13 @@ test_flattened_binary_reduction() assert(graph.root()->nnodes() == 3); - auto node0 = node_output::node(ex.origin()); + auto node0 = output::GetNode(*ex.origin()); assert(is(node0)); - auto node1 = node_output::node(node0->input(0)->origin()); + auto node1 = output::GetNode(*node0->input(0)->origin()); assert(is(node1)); - auto node2 = node_output::node(node0->input(1)->origin()); + auto node2 = output::GetNode(*node0->input(1)->origin()); assert(is(node2)); } @@ -75,13 +75,13 @@ test_flattened_binary_reduction() assert(graph.root()->nnodes() == 3); - auto node0 = node_output::node(ex.origin()); + auto node0 = output::GetNode(*ex.origin()); assert(is(node0)); - auto node1 = node_output::node(node0->input(0)->origin()); + auto node1 = output::GetNode(*node0->input(0)->origin()); assert(is(node1)); - auto node2 = node_output::node(node1->input(0)->origin()); + auto node2 = output::GetNode(*node1->input(0)->origin()); assert(is(node2)); } } diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 1202bec05..9a4a4e2e5 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -137,12 +137,12 @@ test_control_constant_reduction() graph.normalize(); jlm::rvsdg::view(graph.root(), stdout); - auto match = node_output::node(ex1.origin()); + auto match = output::GetNode(*ex1.origin()); assert(match && is(match->operation())); auto & match_op = to_match_op(match->operation()); assert(match_op.default_alternative() == 0); - assert(node_output::node(ex2.origin()) == gamma); + assert(output::GetNode(*ex2.origin()) == gamma); } static void @@ -172,7 +172,7 @@ test_control_constant_reduction2() graph.normalize(); jlm::rvsdg::view(graph.root(), stdout); - auto match = node_output::node(ex.origin()); + auto match = output::GetNode(*ex.origin()); assert(is(match)); } diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index ce03f73e2..77a618a1c 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -45,7 +45,7 @@ test_mux_mux_reduction() // jlm::rvsdg::view(graph.root(), stdout); - auto node = node_output::node(ex.origin()); + auto node = output::GetNode(*ex.origin()); assert(node->ninputs() == 4); assert(node->input(0)->origin() == x); assert(node->input(1)->origin() == y); @@ -79,7 +79,7 @@ test_multiple_origin_reduction() view(graph.root(), stdout); - assert(node_output::node(ex.origin())->ninputs() == 1); + assert(output::GetNode(*ex.origin())->ninputs() == 1); } static int From 3aacb25a59f4a2f1f7ac93759c647ff2f56c35d1 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 6 Oct 2024 15:10:00 +0200 Subject: [PATCH 102/170] Improve output of Cfg::ToAscii() method (#647) The PR does the following: 1. Ensure that CFG arguments are labeled after RVSDG to CFG conversion 2. Give readable labels to CFG nodes 3. Add missing newline after CFG printing This PR is part of issue #586 --- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 4 +- jlm/llvm/ir/cfg.cpp | 49 ++++++++++++++++++------ jlm/llvm/ir/cfg.hpp | 10 +++-- jlm/llvm/ir/print.cpp | 2 +- 4 files changed, 48 insertions(+), 17 deletions(-) diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index edff1dfc3..deea34f56 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -127,9 +127,11 @@ create_cfg(const lambda::node & lambda, context & ctx) ctx.set_cfg(cfg.get()); /* add arguments */ + size_t n = 0; for (auto & fctarg : lambda.fctarguments()) { - auto argument = llvm::argument::create("", fctarg.Type(), fctarg.attributes()); + auto name = util::strfmt("_a", n++, "_"); + auto argument = llvm::argument::create(name, fctarg.Type(), fctarg.attributes()); auto v = cfg->entry()->append_argument(std::move(argument)); ctx.insert(&fctarg, v); } diff --git a/jlm/llvm/ir/cfg.cpp b/jlm/llvm/ir/cfg.cpp index aad3183d1..d5adc1e75 100644 --- a/jlm/llvm/ir/cfg.cpp +++ b/jlm/llvm/ir/cfg.cpp @@ -74,9 +74,10 @@ cfg::ToAscii(const cfg & controlFlowGraph) { std::string str; auto nodes = breadth_first(controlFlowGraph); + auto labels = CreateLabels(nodes); for (const auto & node : nodes) { - str += CreateLabel(*node) + ":"; + str += labels.at(node) + ":"; str += (is(node) ? "\n" : " "); if (auto entryNode = dynamic_cast(node)) @@ -89,7 +90,7 @@ cfg::ToAscii(const cfg & controlFlowGraph) } else if (auto basicBlock = dynamic_cast(node)) { - str += ToAscii(*basicBlock); + str += ToAscii(*basicBlock, labels); } else { @@ -125,7 +126,9 @@ cfg::ToAscii(const exit_node & exitNode) } std::string -cfg::ToAscii(const basic_block & basicBlock) +cfg::ToAscii( + const basic_block & basicBlock, + const std::unordered_map & labels) { auto & threeAddressCodes = basicBlock.tacs(); @@ -140,26 +143,28 @@ cfg::ToAscii(const basic_block & basicBlock) if (threeAddressCodes.last()) { if (is(threeAddressCodes.last()->operation())) - str += " " + CreateTargets(basicBlock); + str += " " + CreateTargets(basicBlock, labels); else - str += "\n\t" + CreateTargets(basicBlock); + str += "\n\t" + CreateTargets(basicBlock, labels); } else { - str += "\t" + CreateTargets(basicBlock); + str += "\t" + CreateTargets(basicBlock, labels); } return str + "\n"; } std::string -cfg::CreateTargets(const cfg_node & node) +cfg::CreateTargets( + const cfg_node & node, + const std::unordered_map & labels) { size_t n = 0; std::string str("["); for (auto it = node.begin_outedges(); it != node.end_outedges(); it++, n++) { - str += CreateLabel(*it->sink()); + str += labels.at(it->sink()); if (n != node.noutedges() - 1) str += ", "; } @@ -168,10 +173,32 @@ cfg::CreateTargets(const cfg_node & node) return str; } -std::string -cfg::CreateLabel(const cfg_node & node) +std::unordered_map +cfg::CreateLabels(const std::vector & nodes) { - return util::strfmt(&node); + std::unordered_map map; + for (size_t n = 0; n < nodes.size(); n++) + { + auto node = nodes[n]; + if (is(node)) + { + map[node] = "entry"; + } + else if (is(node)) + { + map[node] = "exit"; + } + else if (is(node)) + { + map[node] = util::strfmt("bb", n); + } + else + { + JLM_UNREACHABLE("Unhandled control flow graph node type!"); + } + } + + return map; } /* supporting functions */ diff --git a/jlm/llvm/ir/cfg.hpp b/jlm/llvm/ir/cfg.hpp index 8e4b5cefc..e3917f4d3 100644 --- a/jlm/llvm/ir/cfg.hpp +++ b/jlm/llvm/ir/cfg.hpp @@ -387,13 +387,15 @@ class cfg final ToAscii(const exit_node & exitNode); static std::string - ToAscii(const basic_block & basicBlock); + ToAscii( + const basic_block & basicBlock, + const std::unordered_map & labels); static std::string - CreateTargets(const cfg_node & node); + CreateTargets(const cfg_node & node, const std::unordered_map & labels); - static std::string - CreateLabel(const cfg_node & node); + static std::unordered_map + CreateLabels(const std::vector & nodes); ipgraph_module & module_; std::unique_ptr exit_; diff --git a/jlm/llvm/ir/print.cpp b/jlm/llvm/ir/print.cpp index 926fcd732..af7107c7b 100644 --- a/jlm/llvm/ir/print.cpp +++ b/jlm/llvm/ir/print.cpp @@ -59,7 +59,7 @@ emit_function_node(const ipgraph_node & clg_node) std::string cfg = node.cfg() ? cfg::ToAscii(*node.cfg()) : ""; std::string exported = !is_externally_visible(node.linkage()) ? "static" : ""; - return exported + results + " " + node.name() + " " + operands + "\n{\n" + cfg + "}\n"; + return exported + results + " " + node.name() + " " + operands + "\n{\n" + cfg + "\n}\n"; } static std::string From ed49c50e7151e61a9f159ed055429f48ee334640 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 6 Oct 2024 16:39:13 +0200 Subject: [PATCH 103/170] Simplify annotation computations in RvsdgTreePrinter class (#649) Use `std::count_if()` instead of explicit iteration to compute relevant annotations in `RvsdgTreePrinter` class. --------- Co-authored-by: HKrogstie --- jlm/llvm/opt/RvsdgTreePrinter.cpp | 36 +++++++++++++++---------------- jlm/llvm/opt/RvsdgTreePrinter.hpp | 8 +++++++ 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 6a1da32f5..5a88bde83 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -130,26 +130,14 @@ RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( std::function annotateRegion = [&](const rvsdg::Region & region) { - size_t numMemoryStateArguments = 0; - for (size_t n = 0; n < region.narguments(); n++) - { - auto argument = region.argument(n); - if (rvsdg::is(argument->type())) - { - numMemoryStateArguments++; - } - } + auto argumentRange = region.Arguments(); + auto numMemoryStateArguments = + std::count_if(argumentRange.begin(), argumentRange.end(), IsMemoryStateOutput); annotationMap.AddAnnotation(®ion, { argumentLabel, numMemoryStateArguments }); - size_t numMemoryStateResults = 0; - for (size_t n = 0; n < region.nresults(); n++) - { - auto result = region.result(n); - if (rvsdg::is(result->type())) - { - numMemoryStateResults++; - } - } + auto resultRange = region.Results(); + auto numMemoryStateResults = + std::count_if(resultRange.begin(), resultRange.end(), IsMemoryStateInput); annotationMap.AddAnnotation(®ion, { resultLabel, numMemoryStateResults }); for (auto & node : region.nodes) @@ -220,4 +208,16 @@ RvsdgTreePrinter::GetOutputFileNameCounter(const RvsdgModule & rvsdgModule) return RvsdgModuleCounterMap_[rvsdgModule.SourceFileName().to_str()]++; } +bool +RvsdgTreePrinter::IsMemoryStateInput(const rvsdg::input * input) noexcept +{ + return rvsdg::is(input->Type()); +} + +bool +RvsdgTreePrinter::IsMemoryStateOutput(const rvsdg::output * output) noexcept +{ + return rvsdg::is(output->Type()); +} + } diff --git a/jlm/llvm/opt/RvsdgTreePrinter.hpp b/jlm/llvm/opt/RvsdgTreePrinter.hpp index ddeaf5c94..a0a1c622c 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.hpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.hpp @@ -14,6 +14,8 @@ namespace jlm::rvsdg { class graph; +class input; +class output; } namespace jlm::util @@ -166,6 +168,12 @@ class RvsdgTreePrinter final : public optimization static uint64_t GetOutputFileNameCounter(const RvsdgModule & rvsdgModule); + [[nodiscard]] static bool + IsMemoryStateInput(const rvsdg::input * input) noexcept; + + [[nodiscard]] static bool + IsMemoryStateOutput(const rvsdg::output * output) noexcept; + Configuration Configuration_; }; From 3dc7b57b253e7a363ca9cbfc37e3fcd7bee18ab8 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 6 Oct 2024 17:37:32 +0200 Subject: [PATCH 104/170] Remove outdated begin() and end() methods from Region class (#650) --- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 2 +- jlm/rvsdg/region.hpp | 24 ------------------------ 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 0bd7102b4..b51418343 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -258,7 +258,7 @@ decouple_load( } // copy nodes std::vector> context(loopNode->subregion()->nnodes()); - for (auto & node : *loopNode->subregion()) + for (auto & node : loopNode->subregion()->Nodes()) { JLM_ASSERT(node.depth() < context.size()); context[node.depth()].push_back(&node); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 18cbad422..d119e72dd 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -297,30 +297,6 @@ class Region return { bottom_nodes.begin(), bottom_nodes.end() }; } - inline region_nodes_list::iterator - begin() - { - return nodes.begin(); - } - - inline region_nodes_list::const_iterator - begin() const - { - return nodes.begin(); - } - - inline region_nodes_list::iterator - end() - { - return nodes.end(); - } - - inline region_nodes_list::const_iterator - end() const - { - return nodes.end(); - } - inline jlm::rvsdg::graph * graph() const noexcept { From 9c7f2cf47888490640376a16854a8ba5d5069feb Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 9 Oct 2024 06:02:18 +0200 Subject: [PATCH 105/170] Privatize bottom nodes in Region class (#651) --- .../rvsdg2rhls/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/rvsdg/graph.cpp | 4 +- jlm/rvsdg/node.cpp | 12 +++-- jlm/rvsdg/node.hpp | 21 +++++++++ jlm/rvsdg/region.cpp | 29 ++++++++++-- jlm/rvsdg/region.hpp | 44 +++++++++++++++++-- jlm/rvsdg/traverser.cpp | 6 ++- tests/jlm/rvsdg/RegionTests.cpp | 40 +++++++++++++++++ 9 files changed, 143 insertions(+), 17 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp index 99a976748..560b84e7f 100644 --- a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp +++ b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp @@ -106,7 +106,7 @@ EliminateDeadNodesInRegion(rvsdg::Region & region) anyChanged |= changed; } while (changed); - JLM_ASSERT(region.bottom_nodes.empty()); + JLM_ASSERT(region.NumBottomNodes() == 0); return anyChanged; } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index c970fad32..80dc5d2a3 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -336,7 +336,7 @@ DeadNodeElimination::SweepRegion(rvsdg::Region & region) const } } - JLM_ASSERT(region.bottom_nodes.empty()); + JLM_ASSERT(region.NumBottomNodes() == 0); } void diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index d8ea29621..8530f91e7 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -93,9 +93,9 @@ graph::ExtractTailNodes(const graph & rvsdg) auto & rootRegion = *rvsdg.root(); std::vector nodes; - for (auto & node : rootRegion.bottom_nodes) + for (auto & bottomNode : rootRegion.BottomNodes()) { - nodes.push_back(&node); + nodes.push_back(&bottomNode); } for (size_t n = 0; n < rootRegion.nresults(); n++) diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index e49c70b3d..e7296ef33 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -112,7 +112,9 @@ output::remove_user(jlm::rvsdg::input * user) if (auto node = output::GetNode(*this)) { if (!node->has_users()) - region()->bottom_nodes.push_back(node); + { + JLM_ASSERT(region()->AddBottomNode(*node)); + } } } @@ -124,7 +126,9 @@ output::add_user(jlm::rvsdg::input * user) if (auto node = output::GetNode(*this)) { if (!node->has_users()) - region()->bottom_nodes.erase(node); + { + JLM_ASSERT(region()->RemoveBottomNode(*node)); + } } users_.insert(user); } @@ -176,7 +180,7 @@ node::node(std::unique_ptr op, rvsdg::Region * region) region_(region), operation_(std::move(op)) { - region->bottom_nodes.push_back(this); + JLM_ASSERT(region->AddBottomNode(*this)); region->top_nodes.push_back(this); region->nodes.push_back(this); } @@ -184,7 +188,7 @@ node::node(std::unique_ptr op, rvsdg::Region * region) node::~node() { outputs_.clear(); - region()->bottom_nodes.erase(this); + JLM_ASSERT(region()->RemoveBottomNode(*this)); if (ninputs() == 0) region()->top_nodes.erase(this); diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 971da855f..962d59a7c 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -696,6 +696,27 @@ class node inline void recompute_depth() noexcept; + /** + * \brief Determines whether the node is dead. + * + * A node is considered dead if all its outputs are dead. + * + * @return True, if the node is dead, otherwise false. + * + * \see output::IsDead() + */ + [[nodiscard]] bool + IsDead() const noexcept + { + for (auto & output : outputs_) + { + if (!output->IsDead()) + return false; + } + + return true; + } + protected: node_input * add_input(std::unique_ptr input); diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 064e5d122..14f461fbd 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -83,7 +83,7 @@ Region::~Region() noexcept prune(false); JLM_ASSERT(nodes.empty()); JLM_ASSERT(top_nodes.empty()); - JLM_ASSERT(bottom_nodes.empty()); + JLM_ASSERT(NumBottomNodes() == 0); while (arguments_.size()) RemoveArgument(arguments_.size() - 1); @@ -177,6 +177,29 @@ Region::remove_node(jlm::rvsdg::node * node) delete node; } +bool +Region::AddBottomNode(rvsdg::node & node) +{ + if (node.region() != this) + return false; + + if (!node.IsDead()) + return false; + + // FIXME: We should check that a node is not already part of the bottom nodes before adding it. + BottomNodes_.push_back(&node); + + return true; +} + +bool +Region::RemoveBottomNode(rvsdg::node & node) +{ + auto numBottomNodes = NumBottomNodes(); + BottomNodes_.erase(&node); + return numBottomNodes != NumBottomNodes(); +} + void Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool copy_results) const { @@ -227,8 +250,8 @@ Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool void Region::prune(bool recursive) { - while (bottom_nodes.first()) - remove_node(bottom_nodes.first()); + while (BottomNodes_.first()) + remove_node(BottomNodes_.first()); if (!recursive) return; diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index d119e72dd..fd4a41a4a 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -284,7 +284,7 @@ class Region [[nodiscard]] BottomNodeRange BottomNodes() noexcept { - return { bottom_nodes.begin(), bottom_nodes.end() }; + return { BottomNodes_.begin(), BottomNodes_.end() }; } /** @@ -294,7 +294,7 @@ class Region [[nodiscard]] BottomNodeConstRange BottomNodes() const noexcept { - return { bottom_nodes.begin(), bottom_nodes.end() }; + return { BottomNodes_.begin(), BottomNodes_.end() }; } inline jlm::rvsdg::graph * @@ -459,9 +459,46 @@ class Region return nodes.size(); } + /** + * @return The number of bottom nodes in the region. + */ + [[nodiscard]] size_t + NumBottomNodes() const noexcept + { + return BottomNodes_.size(); + } + void remove_node(jlm::rvsdg::node * node); + /** + * \brief Adds \p node to the bottom nodes of the region. + * + * The node \p node is only added to the bottom nodes of this region, iff: + * 1. The node \p node belongs to the same region instance. + * 2. All the outputs of \p node are dead. See node::IsDead() for more details. + * + * @param node The node that is added. + * @return True, if \p node was added, otherwise false. + * + * @note This method is automatically invoked when a node is created or becomes dead. There is + * no need to invoke it manually. + */ + bool + AddBottomNode(rvsdg::node & node); + + /** + * Removes \p node from the bottom nodes in the region. + * + * @param node The node that is removed. + * @return True, if \p node was a bottom node and removed, otherwise false. + * + * @note This method is automatically invoked when a node cedes to be dead. There is no need to + * invoke it manually. + */ + bool + RemoveBottomNode(rvsdg::node & node); + /** \brief Copy a region with substitutions \param target Target region to create nodes in @@ -561,8 +598,6 @@ class Region region_top_node_list top_nodes; - region_bottom_node_list bottom_nodes; - private: static void ToTree( @@ -592,6 +627,7 @@ class Region jlm::rvsdg::structural_node * node_; std::vector results_; std::vector arguments_; + region_bottom_node_list BottomNodes_; }; static inline void diff --git a/jlm/rvsdg/traverser.cpp b/jlm/rvsdg/traverser.cpp index cb8507561..25d30ef02 100644 --- a/jlm/rvsdg/traverser.cpp +++ b/jlm/rvsdg/traverser.cpp @@ -128,8 +128,10 @@ bottomup_traverser::bottomup_traverser(rvsdg::Region * region, bool revisit) tracker_(region->graph()), new_node_state_(revisit ? traversal_nodestate::frontier : traversal_nodestate::behind) { - for (auto & node : region->bottom_nodes) - tracker_.set_nodestate(&node, traversal_nodestate::frontier); + for (auto & bottomNode : region->BottomNodes()) + { + tracker_.set_nodestate(&bottomNode, traversal_nodestate::frontier); + } for (size_t n = 0; n < region->nresults(); n++) { diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 1b1b4330d..4677f8417 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -441,3 +441,43 @@ ToTree_RvsdgWithStructuralNodesAndAnnotations() JLM_UNIT_TEST_REGISTER( "jlm/rvsdg/RegionTests-ToTree_RvsdgWithStructuralNodesAndAnnotations", ToTree_RvsdgWithStructuralNodesAndAnnotations) + +static int +BottomNodeTests() +{ + using namespace jlm::rvsdg; + using namespace jlm::tests; + + auto valueType = valuetype::Create(); + + // Arrange + graph rvsdg; + + // Act & Assert + // A newly created node without any users should automatically be added to the bottom nodes + auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 1); + assert(structuralNode->IsDead()); + assert(rvsdg.root()->NumBottomNodes() == 1); + assert(&*(rvsdg.root()->BottomNodes().begin()) == structuralNode); + + // The node cedes to be dead + auto & output = structuralNode->AddOutput(valueType); + jlm::tests::GraphExport::Create(output, "x"); + assert(structuralNode->IsDead() == false); + assert(rvsdg.root()->NumBottomNodes() == 0); + assert(rvsdg.root()->BottomNodes().begin() == rvsdg.root()->BottomNodes().end()); + + // And it becomes dead again + rvsdg.root()->RemoveResultsWhere( + [](const RegionResult & result) + { + return true; + }); + assert(structuralNode->IsDead()); + assert(rvsdg.root()->NumBottomNodes() == 1); + assert(&*(rvsdg.root()->BottomNodes().begin()) == structuralNode); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/RegionTests-BottomNodeTests", BottomNodeTests) From d6fdf5e82b5e1c25d542467f7db43c518a22a71f Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Thu, 10 Oct 2024 08:14:39 +0200 Subject: [PATCH 106/170] [AndersenAgnostic] Backport fixes and expanded statistics for Andersen analysis (#652) During work on the `andersen-no-flags` branch, some other changes were made as I discovered issues or wanted more statistics: - There was a bug where Hybrid Cycle Detection could invalidate an iterator while using it. - The topological Worklist algorithm has been made slightly less terrible by actually tracking work items now, and only visiting nodes that need to be visited during the sweep. The `ObserverWorklist` is now called the `WorkSet` instead, as that name describes its new job (an unordered worklist) better. This is likely closer to what has been done by Pearce07, but it still does not make it the best worklist iteration order. - Make the `JLM_ANDERSEN_TEST_ALL_CONFIGS` environment variable take an integer controlling the amount of iterations each config should be used. The bulk of this PR is related to getting more statistics: - Count attempts at growing points-to sets, as well as how many items are removed from points-to sets during solving (e.g due to unification or PIP). - Add code to the `PointsToGraph` class to count the number of edges, as well as the number of pointer-pointee relations, since `RegisterNode`s can represent multiple registers at once. - Add the flag `CanPoint()` to `PointerObject`s to formalize the existence of memory objects whose points-to sets we do not track (such as `int a`). This makes the statistics more useful and comparable, as well as removing potentially tons of edges in the `PointsToGraph` going out from these memory objects. - Add statistics for counting pointer-pointee relations to be able to say something about precision, regardless of unification. This PR ended up being a bit larger than I wanted, let me know if you want it split up --- jlm/llvm/opt/alias-analyses/Andersen.cpp | 240 ++++++++++++++---- jlm/llvm/opt/alias-analyses/Andersen.hpp | 1 + .../opt/alias-analyses/PointerObjectSet.cpp | 139 ++++++---- .../opt/alias-analyses/PointerObjectSet.hpp | 76 ++++-- jlm/llvm/opt/alias-analyses/PointsToGraph.cpp | 32 +++ jlm/llvm/opt/alias-analyses/PointsToGraph.hpp | 12 + jlm/util/Statistics.hpp | 4 + jlm/util/Worklist.hpp | 38 ++- .../llvm/opt/alias-analyses/TestAndersen.cpp | 19 +- .../TestDifferencePropagation.cpp | 8 +- .../alias-analyses/TestPointerObjectSet.cpp | 110 ++++---- tests/jlm/util/TestWorklist.cpp | 30 ++- 12 files changed, 494 insertions(+), 215 deletions(-) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 8587cd7ae..1c13a7ed9 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -154,9 +154,10 @@ Andersen::Configuration::GetAllConfigurations() class Andersen::Statistics final : public util::Statistics { static constexpr const char * NumPointerObjects_ = "#PointerObjects"; - static constexpr const char * NumPointerObjectsWithImplicitPointees_ = - "#PointerObjectsWithImplicitPointees"; + static constexpr const char * NumMemoryPointerObjects_ = "#MemoryPointerObjects"; + static constexpr const char * NumMemoryPointerObjectsCanPoint_ = "#MemoryPointerObjectsCanPoint"; static constexpr const char * NumRegisterPointerObjects_ = "#RegisterPointerObjects"; + // A PointerObject of Register kind can represent multiple outputs in RVSDG. Sum them up. static constexpr const char * NumRegistersMappedToPointerObject_ = "#RegistersMappedToPointerObject"; @@ -169,12 +170,12 @@ class Andersen::Statistics final : public util::Statistics static constexpr const char * Configuration_ = "Configuration"; - // Offline technique statistics + // ====== Offline technique statistics ====== static constexpr const char * NumUnificationsOvs_ = "#Unifications(OVS)"; static constexpr const char * NumConstraintsRemovedOfflineNorm_ = "#ConstraintsRemoved(OfflineNorm)"; - // Solver statistics + // ====== Solver statistics ====== static constexpr const char * NumNaiveSolverIterations_ = "#NaiveSolverIterations"; static constexpr const char * WorklistPolicy_ = "WorklistPolicy"; @@ -184,7 +185,7 @@ class Andersen::Statistics final : public util::Statistics "#WorklistSolverWorkItemsNewPointees"; static constexpr const char * NumTopologicalWorklistSweeps_ = "#TopologicalWorklistSweeps"; - // Online technique statistics + // ====== Online technique statistics ====== static constexpr const char * NumOnlineCyclesDetected_ = "#OnlineCyclesDetected"; static constexpr const char * NumOnlineCycleUnifications_ = "#OnlineCycleUnifications"; @@ -196,15 +197,56 @@ class Andersen::Statistics final : public util::Statistics static constexpr const char * NumPIPExplicitPointeesRemoved_ = "#PIPExplicitPointeesRemoved"; - // After solving statistics - static constexpr const char * NumEscapedMemoryObjects_ = "#EscapedMemoryObjects"; + // ====== During solving points-to set statistics ====== + // How many times a pointee has been attempted inserted into an explicit points-to set. + // If a set with 10 elements is unioned into another set, that counts as 10 insertion attempts. + static constexpr const char * NumSetInsertionAttempts_ = "#PointsToSetInsertionAttempts"; + // How many explicit pointees have been removed from points-to sets during solving. + // Removal can only happen due to unification, or explicitly when using PIP + static constexpr const char * NumExplicitPointeesRemoved_ = "#ExplicitPointeesRemoved"; + + // ====== After solving statistics ====== + // How many disjoint sets of PointerObjects exist static constexpr const char * NumUnificationRoots_ = "#UnificationRoots"; - // These next measurements only count flags and pointees of unification roots + // How many memory objects where CanPoint() == true have escaped + static constexpr const char * NumCanPointsEscaped_ = "#CanPointsEscaped"; + // How many memory objects where CanPoint() == false have escaped + static constexpr const char * NumCantPointsEscaped_ = "#CantPointsEscaped"; + + // The number of explicit pointees, counting only unification roots + static constexpr const char * NumExplicitPointees_ = "#ExplicitPointees"; + // Only unification roots may have explicit pointees, but all PointerObjects in the unification + // marked CanPoint effectively have those explicit pointees. Add up the number of such relations. + static constexpr const char * NumExplicitPointsToRelations_ = "#ExplicitPointsToRelations"; + + // The number of PointsToExternal flags, counting only unification roots static constexpr const char * NumPointsToExternalFlags_ = "#PointsToExternalFlags"; + // Among all PointerObjects marked CanPoint, how many are in a unification pointing to external + static constexpr const char * NumPointsToExternalRelations_ = "#PointsToExternalRelations"; + + // Among all PointerObjects marked CanPoint and NOT flagged as pointing to external, + // add up how many pointer-pointee relations they have. + static constexpr const char * NumExplicitPointsToRelationsAmongPrecise_ = + "#ExplicitPointsToRelationsAmongPrecise"; + + // The number of PointeesEscaping flags, counting only unification roots static constexpr const char * NumPointeesEscapingFlags_ = "#PointeesEscapingFlags"; - static constexpr const char * NumExplicitPointees_ = "#ExplicitPointees"; - // If a pointee is both implicit (through PointsToExternal flag) and explicit + // Among all PointerObjects marked CanPoint, how many are in a unification where pointees escape. + static constexpr const char * NumPointeesEscapingRelations_ = "#PointeesEscapingRelations"; + + // The total number of pointer-pointee relations, counting both explicit and implicit. + // In the case of doubled up pointees, the same pointer-pointee relation is not counted twice. + static constexpr const char * NumPointsToRelations_ = "#PointsToRelations"; + + // The number of doubled up pointees, only counting unification roots static constexpr const char * NumDoubledUpPointees_ = "#DoubledUpPointees"; + // The number of doubled up pointees, counting all PointerObjects marked CanPoint() + static constexpr const char * NumDoubledUpPointsToRelations_ = "#DoubledUpPointsToRelations"; + + // Number of unifications where no members have the CanPoint flag + static constexpr const char * NumCantPointUnifications_ = "#CantPointUnifications"; + // In unifications where no member CanPoint, add up their explicit pointees + static constexpr const char * NumCantPointExplicitPointees_ = "#CantPointExplicitPointees"; static constexpr const char * AnalysisTimer_ = "AnalysisTimer"; static constexpr const char * SetAndConstraintBuildingTimer_ = "SetAndConstraintBuildingTimer"; @@ -244,12 +286,9 @@ class Andersen::Statistics final : public util::Statistics GetTimer(SetAndConstraintBuildingTimer_).stop(); AddMeasurement(NumPointerObjects_, set.NumPointerObjects()); - AddMeasurement( - NumPointerObjectsWithImplicitPointees_, - set.NumPointerObjectsWithImplicitPointees()); - AddMeasurement( - NumRegisterPointerObjects_, - set.NumPointerObjectsOfKind(PointerObjectKind::Register)); + AddMeasurement(NumMemoryPointerObjects_, set.NumMemoryPointerObjects()); + AddMeasurement(NumMemoryPointerObjectsCanPoint_, set.NumMemoryPointerObjectsCanPoint()); + AddMeasurement(NumRegisterPointerObjects_, set.NumRegisterPointerObjects()); AddMeasurement(NumRegistersMappedToPointerObject_, set.GetRegisterMap().size()); size_t numSupersetConstraints = 0; @@ -352,8 +391,8 @@ class Andersen::Statistics final : public util::Statistics if (statistics.NumLazyCycleUnifications) AddMeasurement(NumLazyCycleUnifications_, *statistics.NumLazyCycleUnifications); - if (statistics.NumExplicitPointeesRemoved) - AddMeasurement(NumPIPExplicitPointeesRemoved_, *statistics.NumExplicitPointeesRemoved); + if (statistics.NumPipExplicitPointeesRemoved) + AddMeasurement(NumPIPExplicitPointeesRemoved_, *statistics.NumPipExplicitPointeesRemoved); } void @@ -365,18 +404,67 @@ class Andersen::Statistics final : public util::Statistics void AddStatisticsFromSolution(const PointerObjectSet & set) { - size_t numEscapedMemoryObjects = 0; + AddMeasurement(NumSetInsertionAttempts_, set.GetNumSetInsertionAttempts()); + AddMeasurement(NumExplicitPointeesRemoved_, set.GetNumExplicitPointeesRemoved()); + size_t numUnificationRoots = 0; + + size_t numCanPointEscaped = 0; + size_t numCantPointEscaped = 0; + + size_t numExplicitPointees = 0; + size_t numExplicitPointsToRelations = 0; + size_t numExplicitPointeeRelationsAmongPrecise = 0; + size_t numPointsToExternalFlags = 0; + size_t numPointsToExternalRelations = 0; size_t numPointeesEscapingFlags = 0; - size_t numExplicitPointees = 0; - size_t numDoubleUpPointees = 0; + size_t numPointeesEscapingRelations = 0; + + size_t numDoubledUpPointees = 0; + size_t numDoubledUpPointsToRelations = 0; + + std::vector unificationHasCanPoint(set.NumPointerObjects(), false); for (PointerObjectIndex i = 0; i < set.NumPointerObjects(); i++) { if (set.HasEscaped(i)) - numEscapedMemoryObjects++; + { + if (set.CanPoint(i)) + numCanPointEscaped++; + else + numCantPointEscaped++; + } + const auto & pointees = set.GetPointsToSet(i); + + if (set.CanPoint(i)) + { + numExplicitPointsToRelations += pointees.Size(); + numPointeesEscapingRelations += set.HasPointeesEscaping(i); + + if (set.IsPointingToExternal(i)) + { + numPointsToExternalRelations++; + for (auto pointee : pointees.Items()) + { + if (set.HasEscaped(pointee)) + numDoubledUpPointsToRelations++; + } + } + else + { + // When comparing precision, the number of explicit pointees is more interesting among + // pointers that do not also point to external. + numExplicitPointeeRelationsAmongPrecise += pointees.Size(); + } + + // This unification has at least one CanPoint member + unificationHasCanPoint[set.GetUnificationRoot(i)] = true; + } + + // The rest of this loop is only concerned with unification roots, as they are the only + // PointerObjects that actually have explicit pointees or flags if (!set.IsUnificationRoot(i)) continue; @@ -386,21 +474,56 @@ class Andersen::Statistics final : public util::Statistics if (set.HasPointeesEscaping(i)) numPointeesEscapingFlags++; - const auto & pointees = set.GetPointsToSet(i); numExplicitPointees += pointees.Size(); // If the PointsToExternal flag is set, any explicit pointee that has escaped is doubled up if (set.IsPointingToExternal(i)) for (auto pointee : pointees.Items()) if (set.HasEscaped(pointee)) - numDoubleUpPointees++; + numDoubledUpPointees++; + } + + // Now find unifications where no member is marked CanPoint, as any explicit pointee is a waste + size_t numCantPointUnifications = 0; + size_t numCantPointExplicitPointees = 0; + for (PointerObjectIndex i = 0; i < set.NumPointerObjects(); i++) + { + if (!set.IsUnificationRoot(i)) + continue; + if (unificationHasCanPoint[i]) + continue; + numCantPointUnifications++; + numCantPointExplicitPointees += set.GetPointsToSet(i).Size(); } - AddMeasurement(NumEscapedMemoryObjects_, numEscapedMemoryObjects); + AddMeasurement(NumUnificationRoots_, numUnificationRoots); + AddMeasurement(NumCanPointsEscaped_, numCanPointEscaped); + AddMeasurement(NumCantPointsEscaped_, numCantPointEscaped); + + AddMeasurement(NumExplicitPointees_, numExplicitPointees); + AddMeasurement(NumExplicitPointsToRelations_, numExplicitPointsToRelations); + AddMeasurement( + NumExplicitPointsToRelationsAmongPrecise_, + numExplicitPointeeRelationsAmongPrecise); + AddMeasurement(NumPointsToExternalFlags_, numPointsToExternalFlags); + AddMeasurement(NumPointsToExternalRelations_, numPointsToExternalRelations); AddMeasurement(NumPointeesEscapingFlags_, numPointeesEscapingFlags); - AddMeasurement(NumExplicitPointees_, numExplicitPointees); - AddMeasurement(NumDoubledUpPointees_, numDoubleUpPointees); + AddMeasurement(NumPointeesEscapingRelations_, numPointeesEscapingRelations); + + // Calculate the total number of pointer-pointee relations by adding up all explicit and + // implicit relations, and removing the doubled up relations. + size_t numPointsToRelations = + numExplicitPointsToRelations - numDoubledUpPointsToRelations + + numPointsToExternalRelations * (numCanPointEscaped + numCantPointEscaped); + + AddMeasurement(NumPointsToRelations_, numPointsToRelations); + + AddMeasurement(NumDoubledUpPointees_, numDoubledUpPointees); + AddMeasurement(NumDoubledUpPointsToRelations_, numDoubledUpPointsToRelations); + + AddMeasurement(NumCantPointUnifications_, numCantPointUnifications); + AddMeasurement(NumCantPointExplicitPointees_, numCantPointExplicitPointees); } void @@ -440,6 +563,9 @@ class Andersen::Statistics final : public util::Statistics AddMeasurement( Label::NumPointsToGraphExternalMemorySources, pointsToGraph.GetExternalMemoryNode().NumSources()); + auto [numEdges, numPointsToRelations] = pointsToGraph.NumEdges(); + AddMeasurement(Label::NumPointsToGraphEdges, numEdges); + AddMeasurement(Label::NumPointsToGraphPointsToRelations, numPointsToRelations); } void @@ -511,11 +637,13 @@ Andersen::AnalyzeSimpleNode(const rvsdg::simple_node & node) void Andersen::AnalyzeAlloca(const rvsdg::simple_node & node) { - JLM_ASSERT(is(&node)); + const auto allocaOp = util::AssertedCast(&node.operation()); const auto & outputRegister = *node.output(0); const auto outputRegisterPO = Set_->CreateRegisterPointerObject(outputRegister); - const auto allocaPO = Set_->CreateAllocaMemoryObject(node); + + const bool canPoint = IsOrContainsPointerType(*allocaOp->ValueType()); + const auto allocaPO = Set_->CreateAllocaMemoryObject(node, canPoint); Constraints_->AddPointerPointeeConstraint(outputRegisterPO, allocaPO); } @@ -526,7 +654,9 @@ Andersen::AnalyzeMalloc(const rvsdg::simple_node & node) const auto & outputRegister = *node.output(0); const auto outputRegisterPO = Set_->CreateRegisterPointerObject(outputRegister); - const auto mallocPO = Set_->CreateMallocMemoryObject(node); + + // We do not know what types will be stored in the malloc, so let it track pointers + const auto mallocPO = Set_->CreateMallocMemoryObject(node, true); Constraints_->AddPointerPointeeConstraint(outputRegisterPO, mallocPO); } @@ -863,11 +993,14 @@ Andersen::AnalyzeDelta(const delta::node & delta) // Get the result register from the subregion auto & resultRegister = *delta.result()->origin(); + // If the type of the delta can point, the analysis should track its set of possible pointees + bool canPoint = IsOrContainsPointerType(delta.type()); + // Create a global memory object representing the global variable - const auto globalPO = Set_->CreateGlobalMemoryObject(delta); + const auto globalPO = Set_->CreateGlobalMemoryObject(delta, canPoint); - // If the subregion result is a pointer, make the global point to the same variables - if (IsOrContainsPointerType(resultRegister.type())) + // If the initializer subregion result is a pointer, make the global point to what it points to + if (canPoint) { const auto resultRegisterPO = Set_->GetRegisterPointerObject(resultRegister); Constraints_->AddConstraint(SupersetConstraint(globalPO, resultRegisterPO)); @@ -1054,12 +1187,10 @@ Andersen::AnalyzeRvsdg(const rvsdg::graph & graph) if (!IsOrContainsPointerType(argument.type())) continue; - // TODO: Mark the created ImportMemoryObject based on it being a function or a variable - // Functions and non-pointer typed globals can not point to other MemoryObjects, - // so letting them be ShouldTrackPointees() == false aids analysis. - // Create a memory PointerObject representing the target of the external symbol // We can assume that two external symbols don't alias, clang does. + // Imported memory objects are always marked as CanPoint() == false, due to the fact that + // the analysis can't ever hope to track points-to sets of external memory with any precision. const auto importObjectPO = Set_->CreateImportMemoryObject(argument); // Create a register PointerObject representing the address value itself @@ -1157,7 +1288,9 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis statistics->StartAndersenStatistics(module.Rvsdg()); // Check environment variables for debugging flags - const bool testAllConfigs = std::getenv(ENV_TEST_ALL_CONFIGS); + size_t testAllConfigsIterations = 0; + if (auto testAllConfigsString = std::getenv(ENV_TEST_ALL_CONFIGS)) + testAllConfigsIterations = std::stoi(testAllConfigsString); const bool doubleCheck = std::getenv(ENV_DOUBLE_CHECK); const bool dumpGraphs = std::getenv(ENV_DUMP_SUBSET_GRAPH); @@ -1167,7 +1300,7 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis // If solving multiple times, make a copy of the original constraint set std::pair, std::unique_ptr> copy; - if (testAllConfigs || doubleCheck) + if (testAllConfigsIterations || doubleCheck) copy = Constraints_->Clone(); // Draw subset graph both before and after solving @@ -1180,7 +1313,7 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis if (dumpGraphs) { auto & graph = Constraints_->DrawSubsetGraph(writer); - graph.AppendToLabel("After Solving"); + graph.AppendToLabel("After Solving with " + Config_.ToString()); writer.OutputAllGraphs(std::cout, util::GraphOutputFormat::Dot); } @@ -1190,23 +1323,23 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis statisticsCollector.CollectDemandedStatistics(std::move(statistics)); // Solve again if double-checking against naive is enabled - if (testAllConfigs || doubleCheck) + if (testAllConfigsIterations || doubleCheck) { if (doubleCheck) std::cerr << "Double checking Andersen analysis using naive solving" << std::endl; - // If double-checking, only use the naive configuration. Otherwise try all configurations + // If double-checking, only use the naive configuration. Otherwise, try all configurations std::vector configs; - if (testAllConfigs) + if (testAllConfigsIterations) configs = Configuration::GetAllConfigurations(); else configs.push_back(Configuration::NaiveSolverConfiguration()); - // If testing all, benchmarking is being done, so do 50 iterations of all configurations. - // Double-checking against Set_ only needs to be done once per configuration - const auto iterations = testAllConfigs ? 50 : 1; + // If testing all configurations, do it as many times as requested. + // Otherwise, do it at least once + const auto iterations = std::max(testAllConfigsIterations, 1); - for (auto i = 0; i < iterations; i++) + for (size_t i = 0; i < iterations; i++) { for (const auto & config : configs) { @@ -1296,10 +1429,6 @@ Andersen::ConstructPointsToGraphFromPointerObjectSet( // PointerObject's points-to set. auto applyPointsToSet = [&](PointsToGraph::Node & node, PointerObjectIndex index) { - // PointerObjects marked as not tracking pointees should not point to anything - if (!set.ShouldTrackPointees(index)) - return; - // Add all PointsToGraph nodes who should point to external to the list if (set.IsPointingToExternal(index)) pointsToExternal.push_back(&node); @@ -1336,7 +1465,9 @@ Andersen::ConstructPointsToGraphFromPointerObjectSet( if (memoryNodes[idx] == nullptr) continue; // Skip all nodes that are not MemoryNodes - applyPointsToSet(*memoryNodes[idx], idx); + // Add outgoing edges to nodes representing pointer values + if (set.CanPoint(idx)) + applyPointsToSet(*memoryNodes[idx], idx); if (set.HasEscaped(idx)) { @@ -1358,6 +1489,11 @@ Andersen::ConstructPointsToGraphFromPointerObjectSet( } statistics.StopExternalToAllEscapedStatistics(); + // We do not use the unknown node, and do not give the external node any targets + JLM_ASSERT(pointsToGraph->GetExternalMemoryNode().NumTargets() == 0); + JLM_ASSERT(pointsToGraph->GetUnknownMemoryNode().NumSources() == 0); + JLM_ASSERT(pointsToGraph->GetUnknownMemoryNode().NumTargets() == 0); + statistics.StopPointsToGraphConstructionStatistics(*pointsToGraph); return pointsToGraph; } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index e37c326a1..c2ced42cf 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -30,6 +30,7 @@ class Andersen final : public AliasAnalysis /** * Environment variable that when set, triggers analyzing the program with every single * valid combination of Configuration flags. + * Must be set to a number, that determines how many times each config is used. */ static inline const char * const ENV_TEST_ALL_CONFIGS = "JLM_ANDERSEN_TEST_ALL_CONFIGS"; diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index 0400416e7..ab73d4f48 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -26,12 +26,12 @@ namespace jlm::llvm::aa static constexpr bool ENABLE_UNIFICATION = true; PointerObjectIndex -PointerObjectSet::AddPointerObject(PointerObjectKind kind) +PointerObjectSet::AddPointerObject(PointerObjectKind kind, bool canPoint) { JLM_ASSERT(PointerObjects_.size() < std::numeric_limits::max()); PointerObjectIndex index = PointerObjects_.size(); - PointerObjects_.emplace_back(kind); + PointerObjects_.emplace_back(kind, canPoint); if constexpr (ENABLE_UNIFICATION) { PointerObjectParents_.push_back(index); @@ -48,23 +48,35 @@ PointerObjectSet::NumPointerObjects() const noexcept } size_t -PointerObjectSet::NumPointerObjectsWithImplicitPointees() const noexcept +PointerObjectSet::NumPointerObjectsOfKind(PointerObjectKind kind) const noexcept { size_t count = 0; for (auto & pointerObject : PointerObjects_) { - count += pointerObject.CanTrackPointeesImplicitly(); + count += pointerObject.Kind == kind; } return count; } size_t -PointerObjectSet::NumPointerObjectsOfKind(PointerObjectKind kind) const noexcept +PointerObjectSet::NumRegisterPointerObjects() const noexcept +{ + return NumPointerObjectsOfKind(PointerObjectKind::Register); +} + +size_t +PointerObjectSet::NumMemoryPointerObjects() const noexcept +{ + return NumPointerObjects() - NumRegisterPointerObjects(); +} + +size_t +PointerObjectSet::NumMemoryPointerObjectsCanPoint() const noexcept { size_t count = 0; for (auto & pointerObject : PointerObjects_) { - count += pointerObject.Kind == kind; + count += !pointerObject.IsRegister() && pointerObject.CanPoint(); } return count; } @@ -73,7 +85,7 @@ PointerObjectIndex PointerObjectSet::CreateRegisterPointerObject(const rvsdg::output & rvsdgOutput) { JLM_ASSERT(RegisterMap_.count(&rvsdgOutput) == 0); - return RegisterMap_[&rvsdgOutput] = AddPointerObject(PointerObjectKind::Register); + return RegisterMap_[&rvsdgOutput] = AddPointerObject(PointerObjectKind::Register, true); } PointerObjectIndex @@ -105,35 +117,37 @@ PointerObjectSet::MapRegisterToExistingPointerObject( PointerObjectIndex PointerObjectSet::CreateDummyRegisterPointerObject() { - return AddPointerObject(PointerObjectKind::Register); + return AddPointerObject(PointerObjectKind::Register, true); } PointerObjectIndex -PointerObjectSet::CreateAllocaMemoryObject(const rvsdg::node & allocaNode) +PointerObjectSet::CreateAllocaMemoryObject(const rvsdg::node & allocaNode, bool canPoint) { JLM_ASSERT(AllocaMap_.count(&allocaNode) == 0); - return AllocaMap_[&allocaNode] = AddPointerObject(PointerObjectKind::AllocaMemoryObject); + return AllocaMap_[&allocaNode] = + AddPointerObject(PointerObjectKind::AllocaMemoryObject, canPoint); } PointerObjectIndex -PointerObjectSet::CreateMallocMemoryObject(const rvsdg::node & mallocNode) +PointerObjectSet::CreateMallocMemoryObject(const rvsdg::node & mallocNode, bool canPoint) { JLM_ASSERT(MallocMap_.count(&mallocNode) == 0); - return MallocMap_[&mallocNode] = AddPointerObject(PointerObjectKind::MallocMemoryObject); + return MallocMap_[&mallocNode] = + AddPointerObject(PointerObjectKind::MallocMemoryObject, canPoint); } PointerObjectIndex -PointerObjectSet::CreateGlobalMemoryObject(const delta::node & deltaNode) +PointerObjectSet::CreateGlobalMemoryObject(const delta::node & deltaNode, bool canPoint) { JLM_ASSERT(GlobalMap_.count(&deltaNode) == 0); - return GlobalMap_[&deltaNode] = AddPointerObject(PointerObjectKind::GlobalMemoryObject); + return GlobalMap_[&deltaNode] = AddPointerObject(PointerObjectKind::GlobalMemoryObject, canPoint); } PointerObjectIndex PointerObjectSet::CreateFunctionMemoryObject(const lambda::node & lambdaNode) { JLM_ASSERT(!FunctionMap_.HasKey(&lambdaNode)); - const auto pointerObject = AddPointerObject(PointerObjectKind::FunctionMemoryObject); + const auto pointerObject = AddPointerObject(PointerObjectKind::FunctionMemoryObject, false); FunctionMap_.Insert(&lambdaNode, pointerObject); return pointerObject; } @@ -156,7 +170,10 @@ PointerObjectIndex PointerObjectSet::CreateImportMemoryObject(const GraphImport & importNode) { JLM_ASSERT(ImportMap_.count(&importNode) == 0); - auto importMemoryObject = AddPointerObject(PointerObjectKind::ImportMemoryObject); + + // All import memory objects are marked as CanPoint() == false, as the analysis has no chance at + // tracking the points-to set of pointers located in separate modules + auto importMemoryObject = AddPointerObject(PointerObjectKind::ImportMemoryObject, false); ImportMap_[&importNode] = importMemoryObject; // Memory objects defined in other modules are definitely not private to this module @@ -208,10 +225,10 @@ PointerObjectSet::GetPointerObjectKind(PointerObjectIndex index) const noexcept } bool -PointerObjectSet::ShouldTrackPointees(PointerObjectIndex index) const noexcept +PointerObjectSet::CanPoint(PointerObjectIndex index) const noexcept { JLM_ASSERT(index < NumPointerObjects()); - return PointerObjects_[index].ShouldTrackPointees(); + return PointerObjects_[index].CanPoint(); } bool @@ -339,8 +356,13 @@ PointerObjectSet::UnifyPointerObjects(PointerObjectIndex object1, PointerObjectI PointerObjectParents_[oldRoot] = newRoot; // Copy over all pointees, and clean the pointee set from the old root - PointsToSets_[newRoot].UnionWith(PointsToSets_[oldRoot]); - PointsToSets_[oldRoot].Clear(); + auto & oldRootPointees = PointsToSets_[oldRoot]; + + NumSetInsertionAttempts_ += oldRootPointees.Size(); + PointsToSets_[newRoot].UnionWith(oldRootPointees); + + NumExplicitPointeesRemoved_ += oldRootPointees.Size(); + oldRootPointees.Clear(); return newRoot; } @@ -362,6 +384,7 @@ PointerObjectSet::AddToPointsToSet(PointerObjectIndex pointer, PointerObjectInde const auto pointerRoot = GetUnificationRoot(pointer); + NumSetInsertionAttempts_++; return PointsToSets_[pointerRoot].Insert(pointee); } @@ -382,6 +405,8 @@ PointerObjectSet::PropagateNewPointees( auto & P_super = PointsToSets_[supersetRoot]; auto & P_sub = PointsToSets_[subsetRoot]; + NumSetInsertionAttempts_ += P_sub.Size(); + bool modified = false; for (PointerObjectIndex pointee : P_sub.Items()) { @@ -426,6 +451,7 @@ void PointerObjectSet::RemoveAllPointees(PointerObjectIndex index) { auto root = GetUnificationRoot(index); + NumExplicitPointeesRemoved_ += PointsToSets_[root].Size(); PointsToSets_[root].Clear(); } @@ -485,6 +511,18 @@ PointerObjectSet::HasIdenticalSolAs(const PointerObjectSet & other) const return true; } +size_t +PointerObjectSet::GetNumSetInsertionAttempts() const noexcept +{ + return NumSetInsertionAttempts_; +} + +size_t +PointerObjectSet::GetNumExplicitPointeesRemoved() const noexcept +{ + return NumExplicitPointeesRemoved_; +} + // Makes P(superset) a superset of P(subset) bool SupersetConstraint::ApplyDirectly(PointerObjectSet & set) @@ -792,18 +830,18 @@ HandleEscapedFunction( markAsPointsToExternal(argumentPO.value()); } - // All results of pointer type need to be flagged as HasEscaped + // All results of pointer type need to be flagged as pointees escaping for (auto & result : lambdaNode.fctresults()) { const auto resultPO = set.TryGetRegisterPointerObject(*result.origin()); if (!resultPO) continue; - // Nothing to be done if it is already marked as escaped - if (set.HasEscaped(resultPO.value())) + // Nothing to be done if it is already marked as pointees escaping + if (set.HasPointeesEscaping(resultPO.value())) continue; - // Mark the result register as escaping any pointees it may have + // Mark the result register as making any pointees it may have escape markAsPointeesEscaping(resultPO.value()); } } @@ -961,8 +999,8 @@ CreateSubsetGraphNodeLabel(PointerObjectSet & set, PointerObjectIndex index) label << "#" << set.GetUnificationRoot(index); } - if (!set.ShouldTrackPointees(index)) - label << "\nNOTRACK"; + if (!set.CanPoint(index)) + label << "\nCantPoint"; return label.str(); } @@ -1610,7 +1648,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) statistics.NumHybridCycleUnifications = 0; if constexpr (EnablePreferImplicitPointees) - statistics.NumExplicitPointeesRemoved = 0; + statistics.NumPipExplicitPointeesRemoved = 0; // The worklist, initialized with every unification root Worklist worklist; @@ -1757,7 +1795,10 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) // if any unification happens, the result must be added to the worklist bool anyUnification = false; - for (const auto pointee : newPointees.Items()) + + // Make a copy of the set, as the node itself may be unified, invalidating newPointees + auto unificationMembers = newPointees; + for (const auto pointee : unificationMembers.Items()) { const auto pointeeRoot = Set_.GetUnificationRoot(pointee); if (pointeeRoot == refUnificationRoot) @@ -1772,8 +1813,8 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { JLM_ASSERT(Set_.IsUnificationRoot(refUnificationRoot)); worklist.PushWorkItem(refUnificationRoot); - // If the current node became unified due to HCD, stop the current work item visit. - if (Set_.GetUnificationRoot(node) == refUnificationRoot) + // If the node itself was unified, the new root has been added to the worklist, so exit + if (refUnificationRoot == Set_.GetUnificationRoot(node)) return; } } @@ -1840,7 +1881,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) // If this node can track all pointees implicitly, remove its explicit nodes if (EnablePreferImplicitPointees && Set_.CanTrackPointeesImplicitly(node)) { - *(statistics.NumExplicitPointeesRemoved) += Set_.GetPointsToSet(node).Size(); + *(statistics.NumPipExplicitPointeesRemoved) += Set_.GetPointsToSet(node).Size(); // This also causes newPointees to become empty RemoveAllPointees(node); } @@ -1961,13 +2002,12 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) FlushNewSupersetEdges(); }; - // The observer worklist only contains one bit of state: - // "has anything been pushed since last reset?" + // The Workset worklist only remembers which work items have been pushed. // It does not provide an iteration order, so if any work item need to be revisited, - // we do a topological traversal over all work items instead, called a "sweep". + // we do a topological traversal over all work items instead, visiting ones in the Workset. // Performing topological sorting also detects all cycles, which are unified away. constexpr bool useTopologicalTraversal = - std::is_same_v>; + std::is_same_v>; if constexpr (useTopologicalTraversal) { @@ -1976,10 +2016,9 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) statistics.NumTopologicalWorklistSweeps = 0; - while (worklist.HasPushBeenMade()) + while (worklist.HasMoreWorkItems()) { (*statistics.NumTopologicalWorklistSweeps)++; - worklist.ResetPush(); // First perform a topological sort of the entire subset graph, with respect to simple edges util::FindStronglyConnectedComponents( @@ -1988,7 +2027,7 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) sccIndex, topologicalOrder); - // Visit all nodes in topological order + // Visit nodes in topological order, if they are in the workset. // cycles will result in neighbouring nodes in the topologicalOrder sharing sccIndex for (size_t i = 0; i < topologicalOrder.size(); i++) { @@ -2001,13 +2040,19 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) { // This node is in a cycle with the next node, unify them nextNode = UnifyPointerObjects(node, nextNode); + // Make sure the new root is visited + worklist.RemoveWorkItem(node); + worklist.PushWorkItem(nextNode); continue; } } - // Otherwise handle the work item (only unification roots) - if (Set_.IsUnificationRoot(node)) + // If this work item is in the workset, handle it. Repeat immediately if it gets re-added. + while (worklist.HasWorkItem(node)) + { + worklist.RemoveWorkItem(node); HandleWorkItem(node); + } } } } @@ -2060,7 +2105,7 @@ PointerObjectConstraintSet::SolveUsingWorklist( constexpr bool vPreferImplicitPointees = decltype(tPreferImplicitPointees)::value; if constexpr ( - std::is_same_v> + std::is_same_v> && (vOnlineCycleDetection || vHybridCycleDetection || vLazyCycleDetection)) { JLM_UNREACHABLE("Can not enable online, hybrid or lazy cycle detection with the topo policy"); @@ -2084,11 +2129,11 @@ PointerObjectConstraintSet::SolveUsingWorklist( }; std::variant< - typename util::LrfWorklist *, - typename util::TwoPhaseLrfWorklist *, - typename util::ObserverWorklist *, - typename util::LifoWorklist *, - typename util::FifoWorklist *> + util::LrfWorklist *, + util::TwoPhaseLrfWorklist *, + util::Workset *, + util::LifoWorklist *, + util::FifoWorklist *> policyVariant; if (policy == WorklistSolverPolicy::LeastRecentlyFired) @@ -2096,7 +2141,7 @@ PointerObjectConstraintSet::SolveUsingWorklist( else if (policy == WorklistSolverPolicy::TwoPhaseLeastRecentlyFired) policyVariant = (util::TwoPhaseLrfWorklist *)nullptr; else if (policy == WorklistSolverPolicy::TopologicalSort) - policyVariant = (util::ObserverWorklist *)nullptr; + policyVariant = (util::Workset *)nullptr; else if (policy == WorklistSolverPolicy::LastInFirstOut) policyVariant = (util::LifoWorklist *)nullptr; else if (policy == WorklistSolverPolicy::FirstInFirstOut) diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index ff303dfbd..09f7cabf1 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -59,6 +59,11 @@ class PointerObjectSet final // The kind of pointer object PointerObjectKind Kind : util::BitWidthOfEnum(PointerObjectKind::COUNT); + // If set, this pointer object may point to other pointer objects. + // If unset, the analysis should make no attempt at tracking what this PointerObject may target. + // The final PointsToGraph will not have any outgoing edges for this object. + const uint8_t CanPointFlag : 1; + // This memory object's address is known outside the module. // Can only be true on memory objects. uint8_t HasEscaped : 1; @@ -73,15 +78,23 @@ class PointerObjectSet final // This flag is implied by HasEscaped uint8_t PointsToExternal : 1; - explicit PointerObject(PointerObjectKind kind) + explicit PointerObject(PointerObjectKind kind, bool canPoint) : Kind(kind), + CanPointFlag(canPoint), HasEscaped(0), PointeesEscaping(0), PointsToExternal(0) { JLM_ASSERT(kind != PointerObjectKind::COUNT); - if (!ShouldTrackPointees()) + // Ensure that certain kinds of PointerObject always CanPoint or never CanPoint + if (kind == PointerObjectKind::FunctionMemoryObject + || kind == PointerObjectKind::ImportMemoryObject) + JLM_ASSERT(!CanPoint()); + else if (kind == PointerObjectKind::Register) + JLM_ASSERT(CanPoint()); + + if (!CanPoint()) { // No attempt is made at tracking pointees, so use these flags to inform others PointeesEscaping = 1; @@ -104,16 +117,13 @@ class PointerObjectSet final /** * Some memory objects can only be pointed to, but never themselves contain pointers. - * To avoid tracking their pointees, they are instead marked as both PointsToExternal and - * PointeesEscaping. This makes their points-to set equivalent to the set of all escaped - * memory objects, which means the set of explicit pointees can be empty. * When converting the analysis result to a PointsToGraph, these PointerObjects get no pointees. - * @return true if the analysis should attempt track the points-to set of this PointerObject. + * @return true if the analysis tracks the points-to set of this PointerObject. */ [[nodiscard]] bool - ShouldTrackPointees() const noexcept + CanPoint() const noexcept { - return Kind != PointerObjectKind::FunctionMemoryObject; + return CanPointFlag; } /** @@ -157,11 +167,18 @@ class PointerObjectSet final std::unordered_map ImportMap_; + // How many items have been attempted added to explicit points-to sets + size_t NumSetInsertionAttempts_ = 0; + + // How many pointees have been removed from points-to sets. + // Explicit pointees can only be removed through unification, and the remove method + size_t NumExplicitPointeesRemoved_ = 0; + /** * Internal helper function for adding PointerObjects, use the Create* methods instead */ [[nodiscard]] PointerObjectIndex - AddPointerObject(PointerObjectKind kind); + AddPointerObject(PointerObjectKind kind, bool canPoint); /** * Internal helper function for making P(superset) a superset of P(subset), with a callback. @@ -175,20 +192,28 @@ class PointerObjectSet final NewPointeeFunctor & onNewPointee); public: + PointerObjectSet() = default; + [[nodiscard]] size_t NumPointerObjects() const noexcept; /** - * @return the number of PointerObjects where CanTrackPointeesImplicitly() is true + * @return the number of PointerObjects in the set matching the specified \p kind. */ [[nodiscard]] size_t - NumPointerObjectsWithImplicitPointees() const noexcept; + NumPointerObjectsOfKind(PointerObjectKind kind) const noexcept; /** - * @return the number of PointerObjects in the set matching the specified \p kind. + * @return the number of PointerObjects in the set representing virtual registers */ [[nodiscard]] size_t - NumPointerObjectsOfKind(PointerObjectKind kind) const noexcept; + NumRegisterPointerObjects() const noexcept; + + [[nodiscard]] size_t + NumMemoryPointerObjects() const noexcept; + + [[nodiscard]] size_t + NumMemoryPointerObjectsCanPoint() const noexcept; /** * Creates a PointerObject of Register kind and maps the rvsdg output to the new PointerObject. @@ -238,13 +263,13 @@ class PointerObjectSet final CreateDummyRegisterPointerObject(); [[nodiscard]] PointerObjectIndex - CreateAllocaMemoryObject(const rvsdg::node & allocaNode); + CreateAllocaMemoryObject(const rvsdg::node & allocaNode, bool canPoint); [[nodiscard]] PointerObjectIndex - CreateMallocMemoryObject(const rvsdg::node & mallocNode); + CreateMallocMemoryObject(const rvsdg::node & mallocNode, bool canPoint); [[nodiscard]] PointerObjectIndex - CreateGlobalMemoryObject(const delta::node & deltaNode); + CreateGlobalMemoryObject(const delta::node & deltaNode, bool canPoint); /** * Creates a PointerObject of Function kind associated with the given \p lambdaNode. @@ -302,7 +327,7 @@ class PointerObjectSet final * @return true if the PointerObject with the given \p index can point, otherwise false */ [[nodiscard]] bool - ShouldTrackPointees(PointerObjectIndex index) const noexcept; + CanPoint(PointerObjectIndex index) const noexcept; /** * @return true if the PointerObject with the given \p index is a Register @@ -461,6 +486,21 @@ class PointerObjectSet final */ [[nodiscard]] bool HasIdenticalSolAs(const PointerObjectSet & other) const; + + /** + * @return the number of pointees that have been inserted, or were attempted inserted + * but already existed, among all points-to sets in this PointerObjectSet. + * Unioning a set x into another makes |x| insertion attempts. + */ + [[nodiscard]] size_t + GetNumSetInsertionAttempts() const noexcept; + + /** + * @return the number of pointees that have been removed from points-to sets, + * due to either unification, or the RemoveAllPointees() method. + */ + [[nodiscard]] size_t + GetNumExplicitPointeesRemoved() const noexcept; }; /** @@ -872,7 +912,7 @@ class PointerObjectConstraintSet final * When Prefer Implicit Pointees is enabled, and a node's pointees can be tracked fully * implicitly, its set of explicit pointees is cleared. */ - std::optional NumExplicitPointeesRemoved; + std::optional NumPipExplicitPointeesRemoved; }; explicit PointerObjectConstraintSet(PointerObjectSet & set) diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp index 6c9388149..078476ebb 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp @@ -161,6 +161,38 @@ PointsToGraph::AddImportNode(std::unique_ptr node) return *tmp; } +std::pair +PointsToGraph::NumEdges() const noexcept +{ + size_t numEdges = 0; + + auto countMemoryNodes = [&](auto iterable) + { + for (const MemoryNode & node : iterable) + { + numEdges += node.NumTargets(); + } + }; + + countMemoryNodes(AllocaNodes()); + countMemoryNodes(DeltaNodes()); + countMemoryNodes(ImportNodes()); + countMemoryNodes(LambdaNodes()); + countMemoryNodes(MallocNodes()); + + numEdges += GetExternalMemoryNode().NumTargets(); + + // For register nodes, the number of edges and number of points-to relations is different + size_t numPointsToRelations = numEdges; + for (auto & registerNode : RegisterNodes()) + { + numEdges += registerNode.NumTargets(); + numPointsToRelations += registerNode.NumTargets() * registerNode.GetOutputs().Size(); + } + + return std::make_pair(numEdges, numPointsToRelations); +} + bool PointsToGraph::IsSupergraphOf(const jlm::llvm::aa::PointsToGraph & subgraph) const { diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp index 67c1c5a65..740df1da0 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp @@ -359,6 +359,18 @@ class PointsToGraph final PointsToGraph::ImportNode & AddImportNode(std::unique_ptr node); + /** + * Gets the total number of edges in the PointsToGraph. + * + * In addition, RegisterNodes can represent multiple registers, + * in which case each outgoing edge represents multiple points-to relations. + * The total number of points-to relations is also returned. + * + * @return a pair (number of edges, number of points-to relations) + */ + [[nodiscard]] std::pair + NumEdges() const noexcept; + /** * Checks if this PointsToGraph is a supergraph of \p subgraph. * Every node and every edge in the subgraph needs to have corresponding nodes and edges diff --git a/jlm/util/Statistics.hpp b/jlm/util/Statistics.hpp index 4f808ea0f..7664456f4 100644 --- a/jlm/util/Statistics.hpp +++ b/jlm/util/Statistics.hpp @@ -223,6 +223,10 @@ class Statistics inline static const char * NumPointsToGraphUnknownMemorySources = "#PointsToGraphUnknownMemorySources"; + inline static const char * NumPointsToGraphEdges = "#PointsToGraphEdges"; + inline static const char * NumPointsToGraphPointsToRelations = + "#PointsToGraphPointsToRelations"; + static inline const char * Timer = "Time"; }; diff --git a/jlm/util/Worklist.hpp b/jlm/util/Worklist.hpp index a3ed2c159..d5184474a 100644 --- a/jlm/util/Worklist.hpp +++ b/jlm/util/Worklist.hpp @@ -295,59 +295,53 @@ class TwoPhaseLrfWorklist final : public Worklist }; /** - * A fake worklist that only holds a single bit of information: - * "Has any item been pushed since the last reset?" - * Used to implement the Topological worklist policy, which is not technically a worklist policy + * A fake worklist that remembers which work items have been pushed, + * but without providing any kind of iteration interface for accessing them. + * Each work item must be explicitly removed by name. + * Used to implement the Topological worklist policy, which is not technically a worklist policy. * @tparam T the type of the work items. * @see Worklist */ template -class ObserverWorklist final : public Worklist +class Workset final : public Worklist { public: - ~ObserverWorklist() override = default; + ~Workset() override = default; - ObserverWorklist() = default; + Workset() = default; [[nodiscard]] bool HasMoreWorkItems() const noexcept override { - JLM_UNREACHABLE("Dummy worklist"); + return !PushedItems_.IsEmpty(); } T PopWorkItem() override { - JLM_UNREACHABLE("Dummy worklist"); + JLM_UNREACHABLE("The Workset does not provide an iteration order"); } void - PushWorkItem(T item [[maybe_unused]]) override + PushWorkItem(T item) override { - PushMade_ = true; + PushedItems_.Insert(item); } - /** - * @return true if the PushWorkItem method has been called since the last time - * ResetPush() was called. - */ [[nodiscard]] bool - HasPushBeenMade() const noexcept + HasWorkItem(T item) const noexcept { - return PushMade_; + return PushedItems_.Contains(item); } - /** - * Makes the dummy worklist forget about being pushed to. - */ void - ResetPush() + RemoveWorkItem(T item) { - PushMade_ = false; + PushedItems_.Remove(item); } private: - bool PushMade_ = false; + util::HashSet PushedItems_; }; } diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index 41f07c2d7..1980d0381 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -1059,13 +1059,13 @@ TestConstructPointsToGraph() // Arrange a very standard set of memory objects and registers PointerObjectSet set; - auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); + auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), true); auto allocaR = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput()); auto import0 = set.CreateImportMemoryObject(rvsdg.GetImportOutput()); auto importR = set.CreateRegisterPointerObject(rvsdg.GetImportOutput()); auto lambda0 = set.CreateFunctionMemoryObject(rvsdg.GetLambdaNode()); auto lambdaR = set.CreateRegisterPointerObject(rvsdg.GetLambdaOutput()); - auto malloc0 = set.CreateMallocMemoryObject(rvsdg.GetMallocNode()); + auto malloc0 = set.CreateMallocMemoryObject(rvsdg.GetMallocNode(), true); auto mallocR = set.CreateRegisterPointerObject(rvsdg.GetMallocOutput()); set.AddToPointsToSet(allocaR, alloca0); set.AddToPointsToSet(importR, import0); @@ -1073,7 +1073,7 @@ TestConstructPointsToGraph() set.AddToPointsToSet(mallocR, malloc0); // Make an exception for the delta node: Map its output to importR's PointerObject instead - [[maybe_unused]] auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode()); + [[maybe_unused]] auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode(), true); set.MapRegisterToExistingPointerObject(rvsdg.GetDeltaOutput(), importR); // Make alloca0 point to lambda0 @@ -1123,16 +1123,21 @@ TestConstructPointsToGraph() // But it does share pointees with the other nodes assert(TargetsExactly(mallocNode, { &lambdaNode })); - // deltaNode has escaped, and should be pointed to by mallocR and itself, as well as import0 - assert(deltaNode.NumSources() == 3); + // deltaNode has escaped, and should be pointed to by mallocR and itself + assert(deltaNode.NumSources() == 2); auto & externalMemory = ptg->GetExternalMemoryNode(); - // deltaNode and importNode point to everything that has escaped + // deltaNode points to everything that has escaped assert(TargetsExactly(deltaNode, { &deltaNode, &importNode, &externalMemory })); - assert(TargetsExactly(importNode, { &deltaNode, &importNode, &externalMemory })); + // importNode points to nothing, as it is not marked "CanPoint" + assert(TargetsExactly(importNode, {})); // mallocR points to mallocNode, as well as everything that has escaped assert(TargetsExactly(mallocRNode, { &mallocNode, &deltaNode, &importNode, &externalMemory })); + // Adding up the out-edges for all nodes + auto [_, numPointsToRelations] = ptg->NumEdges(); + assert(numPointsToRelations == 2 * 3 + 1 + 1 + 1 + 3 + 4); + return 0; } JLM_UNIT_TEST_REGISTER( diff --git a/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp b/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp index df00e4942..b8a9eab7e 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestDifferencePropagation.cpp @@ -25,10 +25,10 @@ TestTracksDifferences() PointerObjectSet set; auto r0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); auto r1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); - auto a0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); - auto a1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); - auto a2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); - auto a3 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(3)); + auto a0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); + auto a1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); + auto a2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); + auto a3 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(3), true); // Let r0 -> a0 and r0 -> a3 before difference tracking even begins set.AddToPointsToSet(r0, a0); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 56ff1fadf..aa2be6f9e 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -30,7 +30,7 @@ TestFlagFunctions() PointerObjectSet set; auto registerPO = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput()); - assert(set.ShouldTrackPointees(registerPO)); + assert(set.CanPoint(registerPO)); assert(set.IsPointerObjectRegister(registerPO)); // PointeesEscaping flag @@ -49,11 +49,8 @@ TestFlagFunctions() assert(!set.MarkAsPointingToExternal(registerPO)); assert(set.IsPointingToExternal(registerPO)); - // Test that Escaped implies PointsToExternal, for memory objects - auto allocaPO = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); - - // alloca may both point - assert(set.ShouldTrackPointees(allocaPO)); + // Create a new PointerObject to start with empty flags + auto allocaPO = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), true); assert(!set.IsPointerObjectRegister(allocaPO)); // Escaping means another module can write a pointer to you. @@ -67,10 +64,6 @@ TestFlagFunctions() // Already marked with these flags, trying to set them again makes no difference assert(!set.MarkAsPointingToExternal(allocaPO)); assert(!set.MarkAsPointeesEscaping(allocaPO)); - - // The analysis should not bother tracking the pointees of lambdas - auto lambdaPO = set.CreateFunctionMemoryObject(rvsdg.GetLambdaNode()); - assert(!set.ShouldTrackPointees(lambdaPO)); } // Test creating pointer objects for each type of memory node @@ -89,9 +82,9 @@ TestCreatePointerObjects() const auto dummy0 = set.CreateDummyRegisterPointerObject(); // For PointerObjects representing MemoryObjects, there is only one Create function - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); - const auto malloc0 = set.CreateMallocMemoryObject(rvsdg.GetMallocNode()); - const auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode()); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), false); + const auto malloc0 = set.CreateMallocMemoryObject(rvsdg.GetMallocNode(), true); + const auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode(), true); const auto lambda0 = set.CreateFunctionMemoryObject(rvsdg.GetLambdaNode()); const auto import0 = set.CreateImportMemoryObject(rvsdg.GetImportOutput()); @@ -107,12 +100,27 @@ TestCreatePointerObjects() assert(set.GetPointerObjectKind(import0) == PointerObjectKind::ImportMemoryObject); // Most pointer objects don't start out as escaped - assert(!set.HasEscaped(dummy0) && !set.IsPointingToExternal(dummy0)); - assert(!set.HasEscaped(alloca0) && !set.IsPointingToExternal(alloca0)); - assert(!set.HasEscaped(malloc0) && !set.IsPointingToExternal(malloc0)); - assert(!set.HasEscaped(delta0) && !set.IsPointingToExternal(delta0)); - // But import memory objects have always escaped - assert(set.HasEscaped(import0) && set.IsPointingToExternal(import0)); + assert(!set.HasEscaped(dummy0)); + assert(!set.HasEscaped(alloca0)); + assert(!set.HasEscaped(malloc0)); + assert(!set.HasEscaped(delta0)); + assert(!set.HasEscaped(lambda0)); + // ...but imported objects are always escaped + assert(set.HasEscaped(import0)); + // ...which also means it points to external, and has its pointees escaping + assert(set.IsPointingToExternal(import0) && set.HasPointeesEscaping(import0)); + + // Some kinds of PointerObjects have CanPoint() configurable is the constructor + assert(!set.CanPoint(alloca0)); + assert(set.CanPoint(malloc0)); + assert(set.CanPoint(delta0)); + // ...while others have implied values of CanPoint() + assert(set.CanPoint(register0)); + assert(!set.CanPoint(lambda0)); + assert(!set.CanPoint(import0)); + + // CanPoint() == false implies pointing to external and having all pointees escaping + assert(set.IsPointingToExternal(alloca0) && set.HasPointeesEscaping(alloca0)); // Registers have helper function for looking up existing PointerObjects assert(set.GetRegisterPointerObject(rvsdg.GetAllocaOutput()) == register0); @@ -177,8 +185,8 @@ TestPointerObjectUnificationPointees() PointerObjectSet set; auto lambda0 = set.CreateFunctionMemoryObject(rvsdg.GetLambdaNode()); - auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); - auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode()); + auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), true); + auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode(), true); set.AddToPointsToSet(alloca0, lambda0); assert(set.GetPointsToSet(alloca0).Size() == 1); @@ -222,7 +230,7 @@ TestAddToPointsToSet() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), false); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); assert(set.GetPointsToSet(reg0).Size() == 0); @@ -245,11 +253,11 @@ TestMakePointsToSetSuperset() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), false); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), false); const auto reg1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); - const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); + const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), false); set.AddToPointsToSet(reg0, alloca0); set.AddToPointsToSet(reg1, alloca1); @@ -281,9 +289,9 @@ TestClonePointerObjectSet() PointerObjectSet set; const auto register0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput()); const auto dummy0 = set.CreateDummyRegisterPointerObject(); - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); - const auto malloc0 = set.CreateMallocMemoryObject(rvsdg.GetMallocNode()); - const auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode()); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), false); + const auto malloc0 = set.CreateMallocMemoryObject(rvsdg.GetMallocNode(), true); + const auto delta0 = set.CreateGlobalMemoryObject(rvsdg.GetDeltaNode(), false); const auto lambda0 = set.CreateFunctionMemoryObject(rvsdg.GetLambdaNode()); const auto import0 = set.CreateImportMemoryObject(rvsdg.GetImportOutput()); @@ -328,11 +336,11 @@ TestSupersetConstraint() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); const auto reg1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); - const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); + const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); const auto reg2 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(2)); set.AddToPointsToSet(reg0, alloca0); @@ -386,11 +394,11 @@ TestStoreConstraintDirectly() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); const auto reg1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); - const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); + const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); const auto reg2 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(2)); set.AddToPointsToSet(reg0, alloca0); @@ -427,11 +435,11 @@ TestLoadConstraintDirectly() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); const auto reg1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); - const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); + const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); const auto reg2 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(2)); set.AddToPointsToSet(reg0, alloca0); @@ -506,8 +514,8 @@ TestFunctionCallConstraint() const auto lambdaFRegister = set.CreateRegisterPointerObject(*rvsdg.lambda_f->output()); const auto lambdaFArgumentX = set.CreateRegisterPointerObject(*rvsdg.lambda_f->fctargument(0)); const auto lambdaFArgumentY = set.CreateRegisterPointerObject(*rvsdg.lambda_f->fctargument(1)); - const auto allocaX = set.CreateAllocaMemoryObject(*rvsdg.alloca_x); - const auto allocaY = set.CreateAllocaMemoryObject(*rvsdg.alloca_y); + const auto allocaX = set.CreateAllocaMemoryObject(*rvsdg.alloca_x, true); + const auto allocaY = set.CreateAllocaMemoryObject(*rvsdg.alloca_y, true); const auto allocaXRegister = set.CreateRegisterPointerObject(*rvsdg.alloca_x->output(0)); const auto allocaYRegister = set.CreateRegisterPointerObject(*rvsdg.alloca_y->output(0)); @@ -537,9 +545,9 @@ TestAddPointsToExternalConstraint() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); const auto reg1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); PointerObjectConstraintSet constraints(set); @@ -580,9 +588,9 @@ TestAddRegisterContentEscapedConstraint() rvsdg.InitializeTest(); PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), false); const auto reg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(0)); - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), false); const auto reg1 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput(1)); PointerObjectConstraintSet constraints(set); @@ -616,7 +624,7 @@ TestDrawSubsetGraph() // Arrange PointerObjectSet set; - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), true); const auto allocaReg0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput()); const auto dummy0 = set.CreateDummyRegisterPointerObject(); @@ -680,8 +688,8 @@ TestDrawSubsetGraph() // Check that the function contains the word "function0" auto & functionNode = graph.GetNode(function0); assert(StringContains(functionNode.GetLabel(), "function0")); - // Since functions don't track pointees, they should have NOTRACK - assert(StringContains(functionNode.GetLabel(), "NOTRACK")); + // Since functions don't track pointees, they should have CantPoint + assert(StringContains(functionNode.GetLabel(), "CantPoint")); // They should also both point to external, and escape all pointees assert(StringContains(functionNode.GetLabel(), "{+}e")); @@ -711,10 +719,10 @@ TestPointerObjectConstraintSetSolve(Args... args) // %2 = alloca 8 (variable v2) // %3 = alloca 8 (variable v3) // %4 = alloca 8 (variable v4) - const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0)); - const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1)); - const auto alloca3 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2)); - const auto alloca4 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(3)); + const auto alloca1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); + const auto alloca2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); + const auto alloca3 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); + const auto alloca4 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(3), true); // Now start building constraints based on instructions PointerObjectConstraintSet constraints(set); @@ -830,7 +838,7 @@ TestClonePointerObjectConstraintSet() PointerObjectSet set; const auto register0 = set.CreateRegisterPointerObject(rvsdg.GetAllocaOutput()); - const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode()); + const auto alloca0 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(), true); set.AddToPointsToSet(register0, alloca0); // Create a dummy register that will point to alloca0 after solving diff --git a/tests/jlm/util/TestWorklist.cpp b/tests/jlm/util/TestWorklist.cpp index 7286a4d58..06b848c70 100644 --- a/tests/jlm/util/TestWorklist.cpp +++ b/tests/jlm/util/TestWorklist.cpp @@ -135,22 +135,24 @@ JLM_UNIT_TEST_REGISTER( TestTwoPhaseLrfWorklist) static int -TestObserverWorklist() +TestWorkset() { - jlm::util::ObserverWorklist wl; - assert(!wl.HasPushBeenMade()); - wl.PushWorkItem(7); - assert(wl.HasPushBeenMade()); - wl.ResetPush(); - assert(!wl.HasPushBeenMade()); - wl.ResetPush(); - assert(!wl.HasPushBeenMade()); - wl.PushWorkItem(7); - assert(wl.HasPushBeenMade()); + jlm::util::Workset ws; + assert(!ws.HasMoreWorkItems()); + assert(!ws.HasWorkItem(7)); + ws.PushWorkItem(7); + assert(ws.HasMoreWorkItems()); + assert(ws.HasWorkItem(7)); + ws.PushWorkItem(5); + assert(ws.HasWorkItem(5)); + assert(ws.HasWorkItem(7)); + ws.RemoveWorkItem(7); + assert(!ws.HasWorkItem(7)); + assert(ws.HasWorkItem(5)); + ws.RemoveWorkItem(5); + assert(!ws.HasMoreWorkItems()); return 0; } -JLM_UNIT_TEST_REGISTER( - "jlm/llvm/opt/alias-analyses/TestWorklist-TestObserverWorklist", - TestObserverWorklist) +JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/alias-analyses/TestWorklist-TestWorkset", TestWorkset) From c3d9ae8a98808c2409827c5318fc39f307d547bc Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Thu, 10 Oct 2024 18:46:20 +0200 Subject: [PATCH 107/170] unbreak release build: no side effects in JLM_ASSERT (#653) Operations that have side effect should not go into JLM_ASSERT. Otherwise, they will not be executed in release builds. --- jlm/rvsdg/node.cpp | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index e7296ef33..427bb30d5 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -113,7 +113,8 @@ output::remove_user(jlm::rvsdg::input * user) { if (!node->has_users()) { - JLM_ASSERT(region()->AddBottomNode(*node)); + bool wasAdded = region()->AddBottomNode(*node); + JLM_ASSERT(wasAdded); } } } @@ -127,7 +128,8 @@ output::add_user(jlm::rvsdg::input * user) { if (!node->has_users()) { - JLM_ASSERT(region()->RemoveBottomNode(*node)); + bool wasRemoved = region()->RemoveBottomNode(*node); + JLM_ASSERT(wasRemoved); } } users_.insert(user); @@ -180,7 +182,8 @@ node::node(std::unique_ptr op, rvsdg::Region * region) region_(region), operation_(std::move(op)) { - JLM_ASSERT(region->AddBottomNode(*this)); + bool wasAdded = region->AddBottomNode(*this); + JLM_ASSERT(wasAdded); region->top_nodes.push_back(this); region->nodes.push_back(this); } @@ -188,7 +191,8 @@ node::node(std::unique_ptr op, rvsdg::Region * region) node::~node() { outputs_.clear(); - JLM_ASSERT(region()->RemoveBottomNode(*this)); + bool wasRemoved = region()->RemoveBottomNode(*this); + JLM_ASSERT(wasRemoved); if (ninputs() == 0) region()->top_nodes.erase(this); From 735ce0a85df11f82b8836e47dc755bda9f1e07c7 Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Fri, 11 Oct 2024 21:36:42 +0200 Subject: [PATCH 108/170] Introduce accessor and convenience API for node type matching (#654) Introduce input::GetOwner and output::GetOwner as the canonical and type-generic way to determine what "kind" an output/input is (either output/input to a node or region argument/result). Introduce TryGetOwnerNode and TryGetRegionParentNode as convenience API to easily match different node kinds. --- jlm/rvsdg/node.cpp | 79 ++++++++++++++++++++ jlm/rvsdg/node.hpp | 171 +++++++++++++++++++++++++++++++++++++++++++ jlm/rvsdg/region.cpp | 12 +++ jlm/rvsdg/region.hpp | 140 +++++++++++++++++++++++++++++++++++ 4 files changed, 402 insertions(+) diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 427bb30d5..ccf7b3e30 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -167,6 +167,12 @@ node_input::node_input( node_(node) {} +[[nodiscard]] std::variant +node_input::GetOwner() const noexcept +{ + return node_; +} + /* node_output class */ node_output::node_output(jlm::rvsdg::node * node, std::shared_ptr type) @@ -174,6 +180,12 @@ node_output::node_output(jlm::rvsdg::node * node, std::shared_ptr +node_output::GetOwner() const noexcept +{ + return node_; +} + /* node class */ node::node(std::unique_ptr op, rvsdg::Region * region) @@ -341,4 +353,71 @@ normalize(jlm::rvsdg::node * node) return nf->normalize_node(node); } +/** + \page def_use_inspection Inspecting the graph and matching against different operations + + When inspecting the graph for analysis it is necessary to identify + different nodes/operations and structures. Depending on the direction, + the two fundamental questions of interest are: + + - what is the origin of a value, what operation is computing it? + - what are the users of a particular value, what operations depend on it? + + This requires resolving the type of operation a specific \ref rvsdg::input + or \ref rvsdg::output belong to. Every \ref rvsdg::output is one of the following: + + - the output of a node representing an operation + - the entry argument into a region + + Likewise, every \ref rvsdg::input is one of the following: + + - the input of a node representing an operation + - the exit result of a region + + Analysis code can determine which of the two is the case using + \ref rvsdg::output::GetOwner and \ref rvsdg::input::GetOwner, respectively, + and then branch deeper based on its results. For convenience, code + can more directly match against the specific kinds of nodes using + the following convenience functions: + + - \ref rvsdg::TryGetOwnerNode checks if the owner of an output/input + is a graph node of the requested kind + - \ref rvsdg::TryGetRegionParentNode checks if the output/input is + a region entry argument / exit result, and if the parent node + of the region is of the requested kind + + Example: + \code + if (auto lambda = rvsdg::TryGetOwnerNode(def)) + { + // This is an output of a lambda node -- so this must + // be a function definition. + } + else if (auto gamma = rvsdg::TryGetOwnerNode(def)) + { + // This is an output of a gamma node -- so it is potentially + // dependent on evaluating a condition. + } + else if (auto gamma = rvsdg::TryGetRegionParentNode(def)) + { + // This is an entry argument to a region inside a gamma node. + } + \endcode + + Similarly, the following variants of the accessor functions + assert that the nodes are of requested type and will throw + an exception otherwise: + + - \ref rvsdg::AssertGetOwnerNode asserts that the owner of an + output/input is a graph node of the requested kind and + returns it. + - \ref rvsdg::AssertGetRegionParentNode asserts that the + output/input is a region entry argument / exit result, + and that the parent node of the region is of the requested + kind + + These are mostly suitable for unit tests rather, or for the + rare circumstances that the type of node can be assumed to + be known statically. +*/ } diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 962d59a7c..81f4dd8d1 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -12,6 +12,7 @@ #include #include #include +#include #include #include @@ -101,6 +102,9 @@ class input [[nodiscard]] static rvsdg::node * GetNode(const rvsdg::input & input) noexcept; + [[nodiscard]] virtual std::variant + GetOwner() const noexcept = 0; + template class iterator { @@ -374,6 +378,9 @@ class output virtual std::string debug_string() const; + [[nodiscard]] virtual std::variant + GetOwner() const noexcept = 0; + /** * Retrieve the associated node from \p output if \p output is derived from * jlm::rvsdg::node_output. @@ -585,6 +592,9 @@ class node_input : public jlm::rvsdg::input return node_; } + [[nodiscard]] std::variant + GetOwner() const noexcept override; + private: jlm::rvsdg::node * node_; }; @@ -609,6 +619,9 @@ class node_output : public jlm::rvsdg::output return no != nullptr ? no->node() : nullptr; } + [[nodiscard]] std::variant + GetOwner() const noexcept override; + private: jlm::rvsdg::node * node_; }; @@ -878,6 +891,164 @@ class node std::vector> outputs_; }; +/** + * \brief Checks if this is an input to a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param input + * Input to be checked. + * + * \returns + * Owning node of requested type or nullptr. + * + * Checks if the specified input belongs to a node of requested type. + * If this is the case, returns a pointer to the node of matched type. + * If this is not the case (because either this as a region exit + * result or its owning node is not of the requested type), returns + * nullptr. + * + * See \ref def_use_inspection. + */ +template +inline NodeType * +TryGetOwnerNode(const rvsdg::input & input) noexcept +{ + auto owner = input.GetOwner(); + if (auto node = std::get_if(&owner)) + { + return dynamic_cast(*node); + } + else + { + return nullptr; + } +} + +/** + * \brief Checks if this is an output to a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param output + * Output to be checked. + * + * \returns + * Owning node of requested type or nullptr. + * + * Checks if the specified output belongs to a node of requested type. + * If this is the case, returns a pointer to the node of matched type. + * If this is not the case (because either this as a region entry + * argument or its owning node is not of the requested type), returns + * nullptr. + * + * See \ref def_use_inspection. + */ +template +inline NodeType * +TryGetOwnerNode(const rvsdg::output & output) noexcept +{ + auto owner = output.GetOwner(); + if (auto node = std::get_if(&owner)) + { + return dynamic_cast(*node); + } + else + { + return nullptr; + } +} + +/** + * \brief Asserts that this is an input to a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param input + * Input to be checked. + * + * \returns + * Owning node of requested type. + * + * Checks if the specified input belongs to a node of requested type. + * If this is the case, returns a reference to the node of matched type, + * otherwise throws std::logic_error. + * + * See \ref def_use_inspection. + */ +template +inline NodeType & +AssertGetOwnerNode(const rvsdg::input & input) +{ + auto node = TryGetOwnerNode(input); + if (!node) + { + throw std::logic_error(std::string("expected node of type ") + typeid(NodeType).name()); + } + return *node; +} + +/** + * \brief Asserts that this is an output of a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param output + * Output to be checked. + * + * \returns + * Owning node of requested type. + * + * Checks if the specified output belongs to a node of requested type. + * If this is the case, returns a reference to the node of matched type, + * otherwise throws std::logic_error. + * + * See \ref def_use_inspection. + */ +template +inline NodeType & +AssertGetOwnerNode(const rvsdg::output & output) +{ + auto node = TryGetOwnerNode(output); + if (!node) + { + throw std::logic_error(std::string("expected node of type ") + typeid(NodeType).name()); + } + return *node; +} + +inline Region * +TryGetOwnerRegion(const rvsdg::input & input) noexcept +{ + auto owner = input.GetOwner(); + if (auto region = std::get_if(&owner)) + { + return *region; + } + else + { + return nullptr; + } +} + +inline Region * +TryGetOwnerRegion(const rvsdg::output & output) noexcept +{ + auto owner = output.GetOwner(); + if (auto region = std::get_if(&owner)) + { + return *region; + } + else + { + return nullptr; + } +} + static inline std::vector operands(const jlm::rvsdg::node * node) { diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 14f461fbd..0fe95526c 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -43,6 +43,12 @@ RegionArgument::RegionArgument( } } +[[nodiscard]] std::variant +RegionArgument::GetOwner() const noexcept +{ + return region(); +} + RegionResult::~RegionResult() noexcept { on_input_destroy(this); @@ -73,6 +79,12 @@ RegionResult::RegionResult( } } +[[nodiscard]] std::variant +RegionResult::GetOwner() const noexcept +{ + return region(); +} + Region::~Region() noexcept { on_region_destroy(this); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index fd4a41a4a..939fc4457 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -84,6 +84,9 @@ class RegionArgument : public output virtual RegionArgument & Copy(rvsdg::Region & region, structural_input * input) = 0; + [[nodiscard]] std::variant + GetOwner() const noexcept override; + private: structural_input * input_; }; @@ -143,6 +146,9 @@ class RegionResult : public input virtual RegionResult & Copy(rvsdg::output & origin, structural_output * output) = 0; + [[nodiscard]] std::variant + GetOwner() const noexcept override; + private: structural_output * output_; }; @@ -648,6 +654,140 @@ nsimpnodes(const rvsdg::Region * region) noexcept; size_t ninputs(const rvsdg::Region * region) noexcept; +/** + * \brief Checks if this is a result of a region inside a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param input + * Input to be checked. + * + * \returns + * Node of requested type to which the region belongs. + * + * Checks if the specified input is a region exit result belonging + * to a node of specified type. + * If this is the case, returns a pointer to the node of matched type. + * If this is not the case (because either this is an input to a node + * or or because the node owning the region is of a different kind, + * or because this is the root region), returns nullptr. + * + * See \ref def_use_inspection. + */ +template +inline NodeType * +TryGetRegionParentNode(const rvsdg::input & input) noexcept +{ + auto region = TryGetOwnerRegion(input); + if (region) + { + return dynamic_cast(region->node()); + } + else + { + return nullptr; + } +} + +/** + * \brief Checks if this is an argument of a region inside a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param output + * Output to be checked. + * + * \returns + * Node of requested type to which the region belongs. + * + * Checks if the specified input is a region entry argument belonging + * to a node of specified type. + * If this is the case, returns a pointer to the node of matched type. + * If this is not the case (because either this is an input to a node + * or or because the node owning the region is of a different kind, + * or because this is the root region), returns nullptr. + * + * See \ref def_use_inspection. + */ +template +inline NodeType * +TryGetRegionParentNode(const rvsdg::output & output) noexcept +{ + auto region = TryGetOwnerRegion(output); + if (region) + { + return dynamic_cast(region->node()); + } + else + { + return nullptr; + } +} + +/** + * \brief Asserts that this is a result of a region inside a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param input + * Input to be checked. + * + * \returns + * Node of requested type to which the region belongs. + * + * Checks if the specified input is a region exit result belonging + * to a node of specified type. + * If this is the case, returns a reference to the node of matched type, + * otherwise throws an exception. + * + * See \ref def_use_inspection. + */ +template +inline NodeType & +AssertGetRegionParentNode(const rvsdg::input & input) +{ + auto node = TryGetRegionParentNode(input); + if (!node) + { + throw std::logic_error(std::string("expected node of type ") + typeid(NodeType).name()); + } + return *node; +} + +/** + * \brief Asserts that this is an argument of a region inside a node of specified type. + * + * \tparam NodeType + * The node type to be matched against. + * + * \param output + * Output to be checked. + * + * \returns + * Node of requested type to which the region belongs. + * + * Checks if the specified input is a region entry argument belonging + * to a node of specified type. + * If this is the case, returns a reference to the node of matched type, + * otherwise throws an exception. + * + * See \ref def_use_inspection. + */ +template +inline NodeType & +AssertGetRegionParentNode(const rvsdg::output & output) +{ + auto node = TryGetRegionParentNode(output); + if (!node) + { + throw std::logic_error(std::string("expected node of type ") + typeid(NodeType).name()); + } + return *node; +} + } // namespace #endif From b8f81a0b69d3c3212f0708800352cc792b586596 Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Sat, 12 Oct 2024 18:00:33 +0200 Subject: [PATCH 109/170] Make RegionArgument and RegionResult instantiable (#656) Make both RegionArgument and RegionResult non-abstract and provide "Create" functions. This allows slowly phasing out the derived argument and result classes, replacing them with just RegionArgument and RegionResult. This allows to remove a lot of boilerplate code as the number of classes to be implemented and matched shrinks noticably. Later refactoring should rename the input/output classes and also allow to clean them up so they do not even need to be virtual anymore. --- jlm/rvsdg/region.cpp | 36 ++++++++++++++++++++++++++ jlm/rvsdg/region.hpp | 60 +++++++++++++++++++++++++++++++++++++++----- 2 files changed, 90 insertions(+), 6 deletions(-) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 0fe95526c..2fd3c34ff 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -49,6 +49,23 @@ RegionArgument::GetOwner() const noexcept return region(); } +RegionArgument & +RegionArgument::Copy(rvsdg::Region & region, structural_input * input) +{ + return RegionArgument::Create(region, input, Type()); +} + +RegionArgument & +RegionArgument::Create( + rvsdg::Region & region, + rvsdg::structural_input * input, + std::shared_ptr type) +{ + auto argument = new RegionArgument(®ion, input, std::move(type)); + region.append_argument(argument); + return *argument; +} + RegionResult::~RegionResult() noexcept { on_input_destroy(this); @@ -85,6 +102,25 @@ RegionResult::GetOwner() const noexcept return region(); } +RegionResult & +RegionResult::Copy(rvsdg::output & origin, structural_output * output) +{ + return RegionResult::Create(*origin.region(), origin, output, origin.Type()); +} + +RegionResult & +RegionResult::Create( + rvsdg::Region & region, + rvsdg::output & origin, + structural_output * output, + std::shared_ptr type) +{ + JLM_ASSERT(origin.region() == ®ion); + auto result = new RegionResult(®ion, &origin, output, std::move(type)); + region.append_result(result); + return *result; +} + Region::~Region() noexcept { on_region_destroy(this); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 939fc4457..5e9af4ea0 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -50,13 +50,11 @@ class RegionArgument : public output ~RegionArgument() noexcept override; -protected: RegionArgument( rvsdg::Region * region, structural_input * input, std::shared_ptr type); -public: RegionArgument(const RegionArgument &) = delete; RegionArgument(RegionArgument &&) = delete; @@ -82,11 +80,35 @@ class RegionArgument : public output * @return A reference to the copied argument. */ virtual RegionArgument & - Copy(rvsdg::Region & region, structural_input * input) = 0; + Copy(rvsdg::Region & region, structural_input * input); [[nodiscard]] std::variant GetOwner() const noexcept override; + /** + * \brief Creates region entry argument. + * + * \param region + * Region to create argument for. + * + * \param input + * (optional) input of parent node associated with this + * argument (deprecated, will be removed soon). + * + * \param type + * Result type. + * + * \returns + * Reference to the created argument. + * + * Creates an argument and registers it with the given region. + */ + static RegionArgument & + Create( + rvsdg::Region & region, + rvsdg::structural_input * input, + std::shared_ptr type); + private: structural_input * input_; }; @@ -110,14 +132,12 @@ class RegionResult : public input ~RegionResult() noexcept override; -protected: RegionResult( rvsdg::Region * region, rvsdg::output * origin, structural_output * output, std::shared_ptr type); -public: RegionResult(const RegionResult &) = delete; RegionResult(RegionResult &&) = delete; @@ -144,11 +164,39 @@ class RegionResult : public input * @return A reference to the copied result. */ virtual RegionResult & - Copy(rvsdg::output & origin, structural_output * output) = 0; + Copy(rvsdg::output & origin, structural_output * output); [[nodiscard]] std::variant GetOwner() const noexcept override; + /** + * \brief Create region exit result. + * + * \param region + * Region to create result for. + * + * \param origin + * Assigned result value. + * + * \param output + * (optional) output of parent node associated with this + * result (deprecated, will be removed soon). + * + * \param type + * Result type + * + * \returns + * Reference to the created result. + * + * Creates a result and registers it with the given region. + */ + static RegionResult & + Create( + rvsdg::Region & region, + rvsdg::output & origin, + structural_output * output, + std::shared_ptr type); + private: structural_output * output_; }; From d5a6f0a95213f569fa7af9df7daa468566cfeb46 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 13 Oct 2024 00:00:12 +0200 Subject: [PATCH 110/170] Rename structural_node class to StructuralNode (#657) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 2 +- .../backend/rvsdg2rhls/GammaConversion.cpp | 4 +- .../backend/rvsdg2rhls/ThetaConversion.cpp | 4 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-buffers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-forks.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-sinks.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/check-rhls.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 4 +- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- .../rvsdg2rhls/remove-redundant-buf.cpp | 2 +- .../rvsdg2rhls/remove-unused-state.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 +- jlm/hls/ir/hls.hpp | 6 +-- jlm/hls/opt/cne.cpp | 49 +++++++++---------- jlm/hls/util/view.cpp | 4 +- jlm/llvm/backend/dot/DotWriter.cpp | 2 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 6 +-- jlm/llvm/ir/operators/Phi.cpp | 4 +- jlm/llvm/ir/operators/Phi.hpp | 6 +-- jlm/llvm/ir/operators/delta.cpp | 4 +- jlm/llvm/ir/operators/delta.hpp | 8 +-- jlm/llvm/ir/operators/lambda.cpp | 4 +- jlm/llvm/ir/operators/lambda.hpp | 8 +-- jlm/llvm/opt/DeadNodeElimination.cpp | 6 +-- jlm/llvm/opt/DeadNodeElimination.hpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.hpp | 2 +- jlm/llvm/opt/RvsdgTreePrinter.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.hpp | 2 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 4 +- .../opt/alias-analyses/MemoryStateEncoder.hpp | 2 +- .../RegionAwareMemoryNodeProvider.cpp | 12 ++--- .../RegionAwareMemoryNodeProvider.hpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 4 +- jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 2 +- .../TopDownMemoryNodeEliminator.cpp | 4 +- .../TopDownMemoryNodeEliminator.hpp | 4 +- jlm/llvm/opt/cne.cpp | 43 ++++++++-------- jlm/llvm/opt/inversion.cpp | 4 +- jlm/llvm/opt/pull.cpp | 2 +- jlm/llvm/opt/push.cpp | 2 +- jlm/llvm/opt/unroll.cpp | 2 +- jlm/rvsdg/binary.cpp | 2 +- jlm/rvsdg/gamma.hpp | 6 +-- jlm/rvsdg/region.cpp | 18 +++---- jlm/rvsdg/region.hpp | 12 ++--- jlm/rvsdg/structural-node.cpp | 14 +++--- jlm/rvsdg/structural-node.hpp | 32 ++++++------ jlm/rvsdg/theta.cpp | 2 +- jlm/rvsdg/theta.hpp | 4 +- jlm/rvsdg/view.cpp | 6 +-- tests/jlm/rvsdg/test-gamma.cpp | 2 +- tests/jlm/rvsdg/test-theta.cpp | 2 +- tests/test-operation.cpp | 2 - tests/test-operation.hpp | 4 +- 65 files changed, 182 insertions(+), 188 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 8f9f55703..3520d76d6 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -2517,7 +2517,7 @@ RhlsToFirrtlConverter::TraceArgument(rvsdg::RegionArgument * arg) else if (auto o = dynamic_cast(origin)) { // Check if we the input of one loop_node is connected to the output of another - // structural_node, i.e., if the input is connected to the output of another loop_node + // StructuralNode, i.e., if the input is connected to the output of another loop_node return TraceStructuralOutput(o); } // Else we have reached the source diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 82440d20d..6df9bcfa4 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -134,7 +134,7 @@ static void ConvertGammaNodesInRegion(rvsdg::Region & region); static void -ConvertGammaNodesInStructuralNode(rvsdg::structural_node & structuralNode) +ConvertGammaNodesInStructuralNode(rvsdg::StructuralNode & structuralNode) { for (size_t n = 0; n < structuralNode.nsubregions(); n++) { @@ -159,7 +159,7 @@ ConvertGammaNodesInRegion(rvsdg::Region & region) { for (auto & node : rvsdg::topdown_traverser(®ion)) { - if (auto structuralNode = dynamic_cast(node)) + if (auto structuralNode = dynamic_cast(node)) { ConvertGammaNodesInStructuralNode(*structuralNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index c01ec0f83..18aa7e259 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -47,7 +47,7 @@ static void ConvertThetaNodesInRegion(rvsdg::Region & region); static void -ConvertThetaNodesInStructuralNode(jlm::rvsdg::structural_node & structuralNode) +ConvertThetaNodesInStructuralNode(rvsdg::StructuralNode & structuralNode) { for (size_t n = 0; n < structuralNode.nsubregions(); n++) { @@ -65,7 +65,7 @@ ConvertThetaNodesInRegion(rvsdg::Region & region) { for (auto & node : jlm::rvsdg::topdown_traverser(®ion)) { - if (auto structuralNode = dynamic_cast(node)) + if (auto structuralNode = dynamic_cast(node)) { ConvertThetaNodesInStructuralNode(*structuralNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 0505b22fc..b4fc8dc12 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -191,7 +191,7 @@ static void RemoveUnusedStatesInRegion(rvsdg::Region & region); static void -RemoveUnusedStatesInStructuralNode(rvsdg::structural_node & structuralNode) +RemoveUnusedStatesInStructuralNode(rvsdg::StructuralNode & structuralNode) { // Remove unused states from innermost regions first for (size_t n = 0; n < structuralNode.nsubregions(); n++) @@ -218,7 +218,7 @@ RemoveUnusedStatesInRegion(rvsdg::Region & region) { for (auto & node : rvsdg::topdown_traverser(®ion)) { - if (auto structuralNode = dynamic_cast(node)) + if (auto structuralNode = dynamic_cast(node)) { RemoveUnusedStatesInStructuralNode(*structuralNode); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp index 25dfff69b..a139c5a40 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp @@ -16,7 +16,7 @@ add_buffers(rvsdg::Region * region, bool pass_through) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp index 8996bcdda..8266c7ab3 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp @@ -30,7 +30,7 @@ add_forks(rvsdg::Region * region) } for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 0b41139c7..d3723485b 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -18,7 +18,7 @@ add_prints(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { @@ -106,7 +106,7 @@ convert_prints( { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp b/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp index b4afc740e..ffce1f6e0 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp @@ -23,7 +23,7 @@ add_sinks(rvsdg::Region * region) } for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index a964486ad..a00b27390 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -89,7 +89,7 @@ add_triggers(rvsdg::Region * region) auto trigger = get_trigger(region); for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (dynamic_cast(node)) + if (rvsdg::is(node)) { if (auto ln = dynamic_cast(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp index 80c4c8a60..4f38650d6 100644 --- a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp @@ -120,7 +120,7 @@ alloca_conv(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp index b5018e5a9..a0d2d4992 100644 --- a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp @@ -17,7 +17,7 @@ check_rhls(rvsdg::Region * sr) { for (auto & node : jlm::rvsdg::topdown_traverser(sr)) { - if (dynamic_cast(node)) + if (rvsdg::is(node)) { if (auto ln = dynamic_cast(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index b51418343..8d072be98 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -392,7 +392,7 @@ process_loopnode(loop_node * loopNode) bool can_decouple = true; for (auto sn : data_slice) { - if (dynamic_cast(sn)) + if (rvsdg::is(sn)) { // data slice may not contain loops can_decouple = false; @@ -420,7 +420,7 @@ process_loopnode(loop_node * loopNode) JLM_ASSERT(!can_decouple || !data_slice.count(simplenode)); for (auto sn : state_slice) { - if (dynamic_cast(sn)) + if (rvsdg::is(sn)) { // state slice may not contain loops can_decouple = false; diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index ba80c7b84..065aef600 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -77,7 +77,7 @@ hls::distribute_constants(rvsdg::Region * region) // buffers for them for (auto & node : rvsdg::topdown_traverser(region)) { - if (dynamic_cast(node)) + if (rvsdg::is(node)) { if (auto ln = dynamic_cast(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 9103b12a8..020c6071a 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -153,7 +153,7 @@ instrument_ref( auto void_ptr = jlm::llvm::PointerType::Create(); for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 6458dbed0..4f8caad69 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -332,7 +332,7 @@ gather_mem_nodes( { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) gather_mem_nodes( diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index cc5896362..18e761aee 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -71,7 +71,7 @@ gather_mem_nodes(rvsdg::Region * region, std::vector { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) gather_mem_nodes(structnode->subregion(n), mem_nodes); diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp index 9abe4a5bc..7feada423 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp @@ -29,7 +29,7 @@ memstate_conv(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) memstate_conv(structnode->subregion(n)); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index 16148a55f..c0beebcbb 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -201,7 +201,7 @@ merge_gamma(rvsdg::Region * region) changed = false; for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) merge_gamma(structnode->subregion(n)); diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index fa096103d..85f49215f 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -37,7 +37,7 @@ remove_redundant_buf(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index aaba59a54..09dbf890b 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -18,7 +18,7 @@ remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) // process children first so that unnecessary users get removed for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { if (auto gn = dynamic_cast(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index f2fd3d2f1..325e7c235 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -138,7 +138,7 @@ inline_calls(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { @@ -180,7 +180,7 @@ convert_alloca(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index a5930f36a..480d2b384 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -743,7 +743,7 @@ class ExitResult final : public rvsdg::RegionResult } }; -class loop_node final : public jlm::rvsdg::structural_node +class loop_node final : public rvsdg::StructuralNode { public: virtual ~loop_node() @@ -751,7 +751,7 @@ class loop_node final : public jlm::rvsdg::structural_node private: inline loop_node(rvsdg::Region * parent) - : structural_node(loop_op(), parent, 1) + : StructuralNode(loop_op(), parent, 1) {} jlm::rvsdg::node_output * _predicate_buffer; @@ -763,7 +763,7 @@ class loop_node final : public jlm::rvsdg::structural_node rvsdg::Region * subregion() const noexcept { - return structural_node::subregion(0); + return StructuralNode::subregion(0); } [[nodiscard]] rvsdg::RegionResult * diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index c63ca01cc..b660c5522 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -290,7 +290,7 @@ static void mark(jlm::rvsdg::Region *, cnectx &); static void -mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_gamma(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node->operation())); @@ -316,7 +316,7 @@ mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_theta(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); auto theta = static_cast(node); @@ -340,7 +340,7 @@ mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_loop(const rvsdg::structural_node * node, cnectx & ctx) +mark_loop(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node)); auto loop = static_cast(node); @@ -362,7 +362,7 @@ mark_loop(const rvsdg::structural_node * node, cnectx & ctx) } static void -mark_lambda(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_lambda(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); @@ -382,7 +382,7 @@ mark_lambda(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_phi(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_phi(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); @@ -402,22 +402,21 @@ mark_phi(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_delta(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_delta(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); } static void -mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark(const rvsdg::StructuralNode * node, cnectx & ctx) { - static std:: - unordered_map - map({ { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, - { std::type_index(typeid(ThetaOperation)), mark_theta }, - { std::type_index(typeid(jlm::hls::loop_op)), mark_loop }, - { typeid(llvm::lambda::operation), mark_lambda }, - { typeid(llvm::phi::operation), mark_phi }, - { typeid(llvm::delta::operation), mark_delta } }); + static std::unordered_map map( + { { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, + { std::type_index(typeid(ThetaOperation)), mark_theta }, + { std::type_index(typeid(jlm::hls::loop_op)), mark_loop }, + { typeid(llvm::lambda::operation), mark_lambda }, + { typeid(llvm::phi::operation), mark_phi }, + { typeid(llvm::delta::operation), mark_delta } }); auto & op = node->operation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); @@ -471,7 +470,7 @@ mark(rvsdg::Region * region, cnectx & ctx) if (auto simple = dynamic_cast(node)) mark(simple, ctx); else - mark(static_cast(node), ctx); + mark(static_cast(node), ctx); } } @@ -504,7 +503,7 @@ static void divert(rvsdg::Region *, cnectx &); static void -divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_gamma(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node)); auto gamma = static_cast(node); @@ -522,7 +521,7 @@ divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_theta(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); auto theta = static_cast(node); @@ -539,7 +538,7 @@ divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_loop(rvsdg::structural_node * node, cnectx & ctx) +divert_loop(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node)); auto subregion = node->subregion(0); @@ -547,7 +546,7 @@ divert_loop(rvsdg::structural_node * node, cnectx & ctx) } static void -divert_lambda(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_lambda(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); @@ -556,7 +555,7 @@ divert_lambda(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_phi(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_phi(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); @@ -565,15 +564,15 @@ divert_phi(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_delta(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_delta(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); } static void -divert(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert(rvsdg::StructuralNode * node, cnectx & ctx) { - static std::unordered_map map( + static std::unordered_map map( { { std::type_index(typeid(rvsdg::GammaOperation)), divert_gamma }, { std::type_index(typeid(ThetaOperation)), divert_theta }, { std::type_index(typeid(jlm::hls::loop_op)), divert_loop }, @@ -594,7 +593,7 @@ divert(rvsdg::Region * region, cnectx & ctx) if (auto simple = dynamic_cast(node)) divert_outputs(simple, ctx); else - divert(static_cast(node), ctx); + divert(static_cast(node), ctx); } } diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 62ab85cfe..7107813f5 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -161,7 +161,7 @@ isForbiddenChar(char c) } std::string -structural_node_to_dot(jlm::rvsdg::structural_node * structuralNode) +structural_node_to_dot(rvsdg::StructuralNode * structuralNode) { std::ostringstream dot; @@ -346,7 +346,7 @@ region_to_dot(rvsdg::Region * region) auto node_dot = simple_node_to_dot(simpleNode); dot << node_dot; } - else if (auto structuralNode = dynamic_cast(node)) + else if (auto structuralNode = dynamic_cast(node)) { auto node_dot = structural_node_to_dot(structuralNode); dot << node_dot; diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp index f3fede3b2..2993ea5dd 100644 --- a/jlm/llvm/backend/dot/DotWriter.cpp +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -172,7 +172,7 @@ CreateGraphNodes(util::Graph & graph, rvsdg::Region & region, util::Graph * type AttachNodeOutput(node.GetOutputPort(i), *rvsdgNode->output(i), typeGraph); // Structural nodes also have subgraphs - if (auto structuralNode = dynamic_cast(rvsdgNode)) + if (auto structuralNode = dynamic_cast(rvsdgNode)) { for (size_t i = 0; i < structuralNode->nsubregions(); i++) { diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index deea34f56..b5541ea25 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -332,7 +332,7 @@ phi_needed(const rvsdg::input * i, const llvm::variable * v) { auto node = rvsdg::input::GetNode(*i); JLM_ASSERT(is(node)); - auto theta = static_cast(node); + auto theta = static_cast(node); auto input = static_cast(i); auto output = theta->output(input->index()); @@ -353,7 +353,7 @@ static inline void convert_theta_node(const rvsdg::node & node, context & ctx) { JLM_ASSERT(is(&node)); - auto subregion = static_cast(&node)->subregion(0); + auto subregion = static_cast(&node)->subregion(0); auto predicate = subregion->result(0)->origin(); auto pre_entry = ctx.lpbb(); @@ -428,7 +428,7 @@ static inline void convert_phi_node(const rvsdg::node & node, context & ctx) { JLM_ASSERT(rvsdg::is(&node)); - auto phi = static_cast(&node); + auto phi = static_cast(&node); auto subregion = phi->subregion(0); auto & module = ctx.module(); auto & ipg = module.ipgraph(); diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index dc58eab70..789b034d9 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -38,13 +38,13 @@ node::~node() cvinput * node::input(size_t n) const noexcept { - return static_cast(structural_node::input(n)); + return static_cast(StructuralNode::input(n)); } rvoutput * node::output(size_t n) const noexcept { - return static_cast(structural_node::output(n)); + return static_cast(StructuralNode::output(n)); } cvargument * diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index cd40a0c24..568a81e0c 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -45,7 +45,7 @@ class cvargument; class cvinput; class rvoutput; -class node final : public jlm::rvsdg::structural_node +class node final : public rvsdg::StructuralNode { friend class phi::builder; @@ -305,7 +305,7 @@ class node final : public jlm::rvsdg::structural_node private: node(rvsdg::Region * parent, const phi::operation & op) - : structural_node(op, parent, 1) + : StructuralNode(op, parent, 1) {} static phi::node * @@ -378,7 +378,7 @@ class node final : public jlm::rvsdg::structural_node rvsdg::Region * subregion() const noexcept { - return structural_node::subregion(0); + return StructuralNode::subregion(0); } const phi::operation & diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index a7766840d..8480cb384 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -107,7 +107,7 @@ node::add_ctxvar(jlm::rvsdg::output * origin) cvinput * node::input(size_t n) const noexcept { - return static_cast(structural_node::input(n)); + return static_cast(StructuralNode::input(n)); } cvargument * @@ -119,7 +119,7 @@ node::cvargument(size_t n) const noexcept delta::output * node::output() const noexcept { - return static_cast(structural_node::output(0)); + return static_cast(StructuralNode::output(0)); } delta::result * diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index ff88f914a..70f1f324d 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -127,7 +127,7 @@ class result; * auto output = delta->finalize(...); * \endcode */ -class node final : public rvsdg::structural_node +class node final : public rvsdg::StructuralNode { class cviterator; class cvconstiterator; @@ -140,7 +140,7 @@ class node final : public rvsdg::structural_node private: node(rvsdg::Region * parent, delta::operation && op) - : structural_node(op, parent, 1) + : StructuralNode(op, parent, 1) {} public: @@ -153,13 +153,13 @@ class node final : public rvsdg::structural_node rvsdg::Region * subregion() const noexcept { - return structural_node::subregion(0); + return StructuralNode::subregion(0); } const delta::operation & operation() const noexcept { - return *static_cast(&structural_node::operation()); + return *static_cast(&StructuralNode::operation()); } [[nodiscard]] const rvsdg::ValueType & diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 00f98479e..7a138b680 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -116,13 +116,13 @@ node::fctresults() const cvinput * node::input(size_t n) const noexcept { - return util::AssertedCast(structural_node::input(n)); + return util::AssertedCast(StructuralNode::input(n)); } lambda::output * node::output() const noexcept { - return util::AssertedCast(structural_node::output(0)); + return util::AssertedCast(StructuralNode::output(0)); } lambda::fctargument * diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 31ba510d5..3d67d22c8 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -128,7 +128,7 @@ class result; * auto output = lambda->finalize(...); * \endcode */ -class node final : public jlm::rvsdg::structural_node +class node final : public rvsdg::StructuralNode { public: class CallSummary; @@ -157,7 +157,7 @@ class node final : public jlm::rvsdg::structural_node private: node(rvsdg::Region * parent, lambda::operation && op) - : structural_node(op, parent, 1) + : StructuralNode(op, parent, 1) {} public: @@ -182,13 +182,13 @@ class node final : public jlm::rvsdg::structural_node [[nodiscard]] rvsdg::Region * subregion() const noexcept { - return structural_node::subregion(0); + return StructuralNode::subregion(0); } [[nodiscard]] const lambda::operation & operation() const noexcept { - return *jlm::util::AssertedCast(&structural_node::operation()); + return *jlm::util::AssertedCast(&StructuralNode::operation()); } [[nodiscard]] const jlm::llvm::FunctionType & diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 80dc5d2a3..40fb6cb82 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -329,7 +329,7 @@ DeadNodeElimination::SweepRegion(rvsdg::Region & region) const continue; } - if (auto structuralNode = dynamic_cast(node)) + if (auto structuralNode = dynamic_cast(node)) { SweepStructuralNode(*structuralNode); } @@ -340,7 +340,7 @@ DeadNodeElimination::SweepRegion(rvsdg::Region & region) const } void -DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) const +DeadNodeElimination::SweepStructuralNode(rvsdg::StructuralNode & node) const { auto sweepGamma = [](auto & d, auto & n) { @@ -365,7 +365,7 @@ DeadNodeElimination::SweepStructuralNode(jlm::rvsdg::structural_node & node) con static std::unordered_map< std::type_index, - std::function> + std::function> map({ { typeid(rvsdg::GammaOperation), sweepGamma }, { typeid(rvsdg::ThetaOperation), sweepTheta }, { typeid(lambda::operation), sweepLambda }, diff --git a/jlm/llvm/opt/DeadNodeElimination.hpp b/jlm/llvm/opt/DeadNodeElimination.hpp index 388c939b3..d64fe280e 100644 --- a/jlm/llvm/opt/DeadNodeElimination.hpp +++ b/jlm/llvm/opt/DeadNodeElimination.hpp @@ -91,7 +91,7 @@ class DeadNodeElimination final : public optimization SweepRegion(rvsdg::Region & region) const; void - SweepStructuralNode(jlm::rvsdg::structural_node & node) const; + SweepStructuralNode(rvsdg::StructuralNode & node) const; void SweepGamma(rvsdg::GammaNode & gammaNode) const; diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 93ca2e768..c1f790061 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -125,7 +125,7 @@ InvariantValueRedirection::RedirectInRegion(rvsdg::Region & region) } void -InvariantValueRedirection::RedirectInSubregions(rvsdg::structural_node & structuralNode) +InvariantValueRedirection::RedirectInSubregions(rvsdg::StructuralNode & structuralNode) { auto isGammaNode = is(&structuralNode); auto isThetaNode = is(&structuralNode); diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index 73e56767a..3830775cc 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -64,7 +64,7 @@ class InvariantValueRedirection final : public optimization RedirectInRegion(rvsdg::Region & region); static void - RedirectInSubregions(rvsdg::structural_node & structuralNode); + RedirectInSubregions(rvsdg::StructuralNode & structuralNode); static void RedirectGammaOutputs(rvsdg::GammaNode & gammaNode); diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 5a88bde83..87788f01c 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -96,7 +96,7 @@ RvsdgTreePrinter::AnnotateNumRvsdgNodes( { for (auto & node : region.nodes) { - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { size_t numSubregionNodes = 0; for (size_t n = 0; n < structuralNode->nsubregions(); n++) @@ -142,7 +142,7 @@ RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( for (auto & node : region.nodes) { - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { size_t numMemoryStateInputs = 0; for (size_t n = 0; n < structuralNode->ninputs(); n++) diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 1c13a7ed9..83a8179ba 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -924,7 +924,7 @@ Andersen::AnalyzeValist(const rvsdg::simple_node & node) } void -Andersen::AnalyzeStructuralNode(const rvsdg::structural_node & node) +Andersen::AnalyzeStructuralNode(const rvsdg::StructuralNode & node) { if (const auto lambdaNode = dynamic_cast(&node)) AnalyzeLambda(*lambdaNode); @@ -1158,7 +1158,7 @@ Andersen::AnalyzeRegion(rvsdg::Region & region) { if (auto simpleNode = dynamic_cast(node)) AnalyzeSimpleNode(*simpleNode); - else if (auto structuralNode = dynamic_cast(node)) + else if (auto structuralNode = dynamic_cast(node)) AnalyzeStructuralNode(*structuralNode); else JLM_UNREACHABLE("Unknown node type"); diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index c2ced42cf..bc59f805e 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -395,7 +395,7 @@ class Andersen final : public AliasAnalysis AnalyzeValist(const rvsdg::simple_node & node); void - AnalyzeStructuralNode(const rvsdg::structural_node & node); + AnalyzeStructuralNode(const rvsdg::StructuralNode & node); void AnalyzeLambda(const lambda::node & node); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index cfe5bade4..60b8f16a0 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -496,7 +496,7 @@ MemoryStateEncoder::EncodeRegion(rvsdg::Region & region) { EncodeSimpleNode(*simpleNode); } - else if (auto structuralNode = dynamic_cast(node)) + else if (auto structuralNode = dynamic_cast(node)) { EncodeStructuralNode(*structuralNode); } @@ -508,7 +508,7 @@ MemoryStateEncoder::EncodeRegion(rvsdg::Region & region) } void -MemoryStateEncoder::EncodeStructuralNode(rvsdg::structural_node & structuralNode) +MemoryStateEncoder::EncodeStructuralNode(rvsdg::StructuralNode & structuralNode) { if (auto lambdaNode = dynamic_cast(&structuralNode)) { diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index f4d3df1b2..f3f2bff37 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -86,7 +86,7 @@ class MemoryStateEncoder final EncodeRegion(rvsdg::Region & region); void - EncodeStructuralNode(rvsdg::structural_node & structuralNode); + EncodeStructuralNode(rvsdg::StructuralNode & structuralNode); void EncodeSimpleNode(const rvsdg::simple_node & simpleNode); diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index 1978f945f..e52b940d0 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -186,7 +186,7 @@ class RegionSummary final return RecursiveCalls_; } - const util::HashSet & + const util::HashSet & GetStructuralNodes() const { return StructuralNodes_; @@ -223,7 +223,7 @@ class RegionSummary final } void - AddStructuralNode(const rvsdg::structural_node & structuralNode) + AddStructuralNode(const rvsdg::StructuralNode & structuralNode) { StructuralNodes_.Insert(&structuralNode); } @@ -254,7 +254,7 @@ class RegionSummary final util::HashSet RecursiveCalls_; util::HashSet NonRecursiveCalls_; - util::HashSet StructuralNodes_; + util::HashSet StructuralNodes_; }; /** \brief Memory node provisioning of region-aware memory node provider @@ -677,7 +677,7 @@ RegionAwareMemoryNodeProvider::AnnotateRegion(rvsdg::Region & region) for (auto & node : region.nodes) { - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { AnnotateStructuralNode(*structuralNode); } @@ -830,7 +830,7 @@ RegionAwareMemoryNodeProvider::AnnotateMemcpy(const rvsdg::simple_node & memcpyN } void -RegionAwareMemoryNodeProvider::AnnotateStructuralNode(const rvsdg::structural_node & structuralNode) +RegionAwareMemoryNodeProvider::AnnotateStructuralNode(const rvsdg::StructuralNode & structuralNode) { if (is(&structuralNode)) { @@ -1046,7 +1046,7 @@ RegionAwareMemoryNodeProvider::ToRegionTree( for (const auto & node : region->nodes) { - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { subtree += util::strfmt(indent(depth), structuralNode->operation().debug_string(), "\n"); for (size_t n = 0; n < structuralNode->nsubregions(); n++) diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp index 65b6a1644..08fac9106 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp @@ -110,7 +110,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider AnnotateSimpleNode(const rvsdg::simple_node & provider); void - AnnotateStructuralNode(const rvsdg::structural_node & structuralNode); + AnnotateStructuralNode(const rvsdg::StructuralNode & structuralNode); void AnnotateLoad(const LoadNode & loadNode); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 51dd6589d..558bd1463 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -1682,7 +1682,7 @@ Steensgaard::AnalyzeTheta(const rvsdg::ThetaNode & theta) } void -Steensgaard::AnalyzeStructuralNode(const jlm::rvsdg::structural_node & node) +Steensgaard::AnalyzeStructuralNode(const rvsdg::StructuralNode & node) { if (auto lambdaNode = dynamic_cast(&node)) { @@ -1732,7 +1732,7 @@ Steensgaard::AnalyzeRegion(rvsdg::Region & region) { AnalyzeSimpleNode(*simpleNode); } - else if (auto structuralNode = dynamic_cast(node)) + else if (auto structuralNode = dynamic_cast(node)) { AnalyzeStructuralNode(*structuralNode); } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index f2ce6f9c3..bd561bc93 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -94,7 +94,7 @@ class Steensgaard final : public AliasAnalysis AnalyzeSimpleNode(const rvsdg::simple_node & node); void - AnalyzeStructuralNode(const rvsdg::structural_node & node); + AnalyzeStructuralNode(const rvsdg::StructuralNode & node); void AnalyzeAlloca(const rvsdg::simple_node & node); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index f2aeae8f3..0691e3d4e 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -515,7 +515,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::Region & region) { EliminateTopDownSimpleNode(*simpleNode); } - else if (auto structuralNode = dynamic_cast(node)) + else if (auto structuralNode = dynamic_cast(node)) { EliminateTopDownStructuralNode(*structuralNode); } @@ -528,7 +528,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::Region & region) void TopDownMemoryNodeEliminator::EliminateTopDownStructuralNode( - const rvsdg::structural_node & structuralNode) + const rvsdg::StructuralNode & structuralNode) { if (auto gammaNode = dynamic_cast(&structuralNode)) { diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp index aaf3b93f5..65707a0f7 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp @@ -31,7 +31,7 @@ class GammaNode; class node; class Region; class simple_node; -class structural_node; +class StructuralNode; class ThetaNode; } @@ -134,7 +134,7 @@ class TopDownMemoryNodeEliminator final : public MemoryNodeEliminator EliminateTopDownRegion(rvsdg::Region & region); void - EliminateTopDownStructuralNode(const rvsdg::structural_node & structuralNode); + EliminateTopDownStructuralNode(const rvsdg::StructuralNode & structuralNode); void EliminateTopDownLambda(const lambda::node & lambdaNode); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 8edcea23c..5527f69a1 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -274,7 +274,7 @@ static void mark(rvsdg::Region *, cnectx &); static void -mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_gamma(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node->operation())); @@ -300,7 +300,7 @@ mark_gamma(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_theta(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); auto theta = static_cast(node); @@ -324,7 +324,7 @@ mark_theta(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_lambda(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_lambda(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); @@ -344,7 +344,7 @@ mark_lambda(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_phi(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_phi(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); @@ -364,21 +364,20 @@ mark_phi(const jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -mark_delta(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark_delta(const rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); } static void -mark(const jlm::rvsdg::structural_node * node, cnectx & ctx) +mark(const rvsdg::StructuralNode * node, cnectx & ctx) { - static std:: - unordered_map - map({ { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, - { std::type_index(typeid(rvsdg::ThetaOperation)), mark_theta }, - { typeid(lambda::operation), mark_lambda }, - { typeid(phi::operation), mark_phi }, - { typeid(delta::operation), mark_delta } }); + static std::unordered_map map( + { { std::type_index(typeid(rvsdg::GammaOperation)), mark_gamma }, + { std::type_index(typeid(rvsdg::ThetaOperation)), mark_theta }, + { typeid(lambda::operation), mark_lambda }, + { typeid(phi::operation), mark_phi }, + { typeid(delta::operation), mark_delta } }); auto & op = node->operation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); @@ -432,7 +431,7 @@ mark(rvsdg::Region * region, cnectx & ctx) if (auto simple = dynamic_cast(node)) mark(simple, ctx); else - mark(static_cast(node), ctx); + mark(static_cast(node), ctx); } } @@ -465,7 +464,7 @@ static void divert(rvsdg::Region *, cnectx &); static void -divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_gamma(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(rvsdg::is(node)); auto gamma = static_cast(node); @@ -483,7 +482,7 @@ divert_gamma(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_theta(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); auto theta = static_cast(node); @@ -500,7 +499,7 @@ divert_theta(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_lambda(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_lambda(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); @@ -509,7 +508,7 @@ divert_lambda(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_phi(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_phi(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(is(node)); @@ -518,15 +517,15 @@ divert_phi(jlm::rvsdg::structural_node * node, cnectx & ctx) } static void -divert_delta(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert_delta(rvsdg::StructuralNode * node, cnectx & ctx) { JLM_ASSERT(jlm::rvsdg::is(node)); } static void -divert(jlm::rvsdg::structural_node * node, cnectx & ctx) +divert(rvsdg::StructuralNode * node, cnectx & ctx) { - static std::unordered_map map( + static std::unordered_map map( { { std::type_index(typeid(rvsdg::GammaOperation)), divert_gamma }, { std::type_index(typeid(rvsdg::ThetaOperation)), divert_theta }, { typeid(lambda::operation), divert_lambda }, @@ -546,7 +545,7 @@ divert(rvsdg::Region * region, cnectx & ctx) if (auto simple = dynamic_cast(node)) divert_outputs(simple, ctx); else - divert(static_cast(node), ctx); + divert(static_cast(node), ctx); } } diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 36f0a19b6..5e2185c71 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -91,7 +91,7 @@ pullin(rvsdg::GammaNode * gamma, rvsdg::ThetaNode * theta) } static std::vector> -collect_condition_nodes(jlm::rvsdg::structural_node * tnode, jlm::rvsdg::structural_node * gnode) +collect_condition_nodes(rvsdg::StructuralNode * tnode, jlm::rvsdg::StructuralNode * gnode) { JLM_ASSERT(is(tnode)); JLM_ASSERT(rvsdg::is(gnode)); @@ -291,7 +291,7 @@ invert(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t r = 0; r < structnode->nsubregions(); r++) invert(structnode->subregion(r)); diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index a7166450f..59d2fd8e9 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -286,7 +286,7 @@ pull(rvsdg::Region * region) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { if (auto gamma = dynamic_cast(node)) pull(gamma); diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 1faf44aab..ea814c538 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -399,7 +399,7 @@ push(rvsdg::Region * region) { for (auto node : jlm::rvsdg::topdown_traverser(region)) { - if (auto strnode = dynamic_cast(node)) + if (auto strnode = dynamic_cast(node)) { for (size_t n = 0; n < strnode->nsubregions(); n++) push(strnode->subregion(n)); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 2af878550..caf014e44 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -487,7 +487,7 @@ unroll(rvsdg::Region * region, size_t factor) bool unrolled = false; for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) unrolled = unroll(structnode->subregion(n), factor); diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 18578e24f..0c783346c 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -429,7 +429,7 @@ flattened_binary_op::reduce( node->output(0)->divert_users(output); remove(node); } - else if (auto structnode = dynamic_cast(node)) + else if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) reduce(structnode->subregion(n), reduction); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 049380e9b..9e3c362e2 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -109,7 +109,7 @@ class GammaOperation final : public structural_op class GammaInput; class GammaOutput; -class GammaNode : public structural_node +class GammaNode : public StructuralNode { public: ~GammaNode() noexcept override; @@ -451,7 +451,7 @@ class GammaOutput final : public structural_output /* gamma node method definitions */ inline GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) - : structural_node(GammaOperation(nalternatives), predicate->region(), nalternatives) + : StructuralNode(GammaOperation(nalternatives), predicate->region(), nalternatives) { node::add_input(std::unique_ptr( new GammaInput(this, predicate, ControlType::Create(nalternatives)))); @@ -514,7 +514,7 @@ class GammaResult final : public RegionResult inline GammaInput * GammaNode::predicate() const noexcept { - return util::AssertedCast(structural_node::input(0)); + return util::AssertedCast(StructuralNode::input(0)); } inline GammaInput * diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 2fd3c34ff..da1c21ce1 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -145,7 +145,7 @@ Region::Region(rvsdg::Region * parent, jlm::rvsdg::graph * graph) on_region_create(this); } -Region::Region(jlm::rvsdg::structural_node * node, size_t index) +Region::Region(rvsdg::StructuralNode * node, size_t index) : index_(index), graph_(node->graph()), node_(node) @@ -306,7 +306,7 @@ Region::prune(bool recursive) for (const auto & node : nodes) { - if (auto snode = dynamic_cast(&node)) + if (auto snode = dynamic_cast(&node)) { for (size_t n = 0; n < snode->nsubregions(); n++) snode->subregion(n)->prune(recursive); @@ -319,7 +319,7 @@ Region::normalize(bool recursive) { for (auto node : jlm::rvsdg::topdown_traverser(this)) { - if (auto structnode = dynamic_cast(node)) + if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) structnode->subregion(n)->normalize(recursive); @@ -342,7 +342,7 @@ Region::NumRegions(const rvsdg::Region & region) noexcept size_t numRegions = 1; for (auto & node : region.nodes) { - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { for (size_t n = 0; n < structuralNode->nsubregions(); n++) { @@ -396,7 +396,7 @@ Region::ToTree( indentationString = std::string(indentationDepth, indentationChar); for (auto & node : region.nodes) { - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { auto nodeString = structuralNode->operation().debug_string(); auto annotationString = GetAnnotationString( @@ -478,7 +478,7 @@ nnodes(const jlm::rvsdg::Region * region) noexcept size_t n = region->nnodes(); for (const auto & node : region->nodes) { - if (auto snode = dynamic_cast(&node)) + if (auto snode = dynamic_cast(&node)) { for (size_t r = 0; r < snode->nsubregions(); r++) n += nnodes(snode->subregion(r)); @@ -494,7 +494,7 @@ nstructnodes(const rvsdg::Region * region) noexcept size_t n = 0; for (const auto & node : region->nodes) { - if (auto snode = dynamic_cast(&node)) + if (auto snode = dynamic_cast(&node)) { for (size_t r = 0; r < snode->nsubregions(); r++) n += nstructnodes(snode->subregion(r)); @@ -511,7 +511,7 @@ nsimpnodes(const rvsdg::Region * region) noexcept size_t n = 0; for (const auto & node : region->nodes) { - if (auto snode = dynamic_cast(&node)) + if (auto snode = dynamic_cast(&node)) { for (size_t r = 0; r < snode->nsubregions(); r++) n += nsimpnodes(snode->subregion(r)); @@ -531,7 +531,7 @@ ninputs(const rvsdg::Region * region) noexcept size_t n = region->nresults(); for (const auto & node : region->nodes) { - if (auto snode = dynamic_cast(&node)) + if (auto snode = dynamic_cast(&node)) { for (size_t r = 0; r < snode->nsubregions(); r++) n += ninputs(snode->subregion(r)); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 5e9af4ea0..da44f532b 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -27,7 +27,7 @@ class node; class simple_node; class simple_op; class structural_input; -class structural_node; +class StructuralNode; class structural_op; class structural_output; class SubstitutionMap; @@ -205,7 +205,7 @@ class RegionResult : public input * \brief Represent acyclic RVSDG subgraphs * * Regions represent acyclic RVSDG subgraphs and are instantiated with an index in \ref - * structural_node%s. Each region has \ref RegionArgument%s and \ref RegionResult%s that represent + * StructuralNode%s. Each region has \ref RegionArgument%s and \ref RegionResult%s that represent * the values at the beginning and end of the acyclic graph, respectively. In addition, each region * keeps track of the following properties: * @@ -258,7 +258,7 @@ class Region Region(rvsdg::Region * parent, jlm::rvsdg::graph * graph); - Region(rvsdg::structural_node * node, size_t index); + Region(rvsdg::StructuralNode * node, size_t index); /** * @return Returns an iterator range for iterating through the arguments of the region. @@ -357,7 +357,7 @@ class Region return graph_; } - inline jlm::rvsdg::structural_node * + inline rvsdg::StructuralNode * node() const noexcept { return node_; @@ -619,7 +619,7 @@ class Region * * @param region The top-level region that is converted * @param annotationMap A map with annotations for instances of \ref Region%s or - * structural_node%s. + * StructuralNode%s. * @return A string containing the ASCII tree of \p region. */ [[nodiscard]] static std::string @@ -678,7 +678,7 @@ class Region size_t index_; jlm::rvsdg::graph * graph_; - jlm::rvsdg::structural_node * node_; + rvsdg::StructuralNode * node_; std::vector results_; std::vector arguments_; region_bottom_node_list BottomNodes_; diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index 99913151f..3b68fb61a 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -21,7 +21,7 @@ structural_input::~structural_input() noexcept } structural_input::structural_input( - jlm::rvsdg::structural_node * node, + rvsdg::StructuralNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : node_input(origin, node, std::move(type)) @@ -38,9 +38,7 @@ structural_output::~structural_output() noexcept on_output_destroy(this); } -structural_output::structural_output( - jlm::rvsdg::structural_node * node, - std::shared_ptr type) +structural_output::structural_output(StructuralNode * node, std::shared_ptr type) : node_output(node, std::move(type)) { on_output_create(this); @@ -48,14 +46,14 @@ structural_output::structural_output( /* structural node */ -structural_node::~structural_node() +StructuralNode::~StructuralNode() noexcept { on_node_destroy(this); subregions_.clear(); } -structural_node::structural_node( +StructuralNode::StructuralNode( const jlm::rvsdg::structural_op & op, rvsdg::Region * region, size_t nsubregions) @@ -71,7 +69,7 @@ structural_node::structural_node( } structural_input * -structural_node::append_input(std::unique_ptr input) +StructuralNode::append_input(std::unique_ptr input) { if (input->node() != this) throw jlm::util::error("Appending input to wrong node."); @@ -86,7 +84,7 @@ structural_node::append_input(std::unique_ptr input) } structural_output * -structural_node::append_output(std::unique_ptr output) +StructuralNode::append_output(std::unique_ptr output) { if (output->node() != this) throw jlm::util::error("Appending output to wrong node."); diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 77c0c8fa3..21496de4c 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -18,13 +18,13 @@ class structural_input; class structural_op; class structural_output; -class structural_node : public node +class StructuralNode : public node { public: - virtual ~structural_node(); + ~StructuralNode() noexcept override; protected: - structural_node( + StructuralNode( /* FIXME: use move semantics instead of copy semantics for op */ const jlm::rvsdg::structural_op & op, rvsdg::Region * region, @@ -71,19 +71,19 @@ typedef jlm::util::intrusive_list type); static structural_input * create( - structural_node * node, + StructuralNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) { @@ -91,10 +91,10 @@ class structural_input : public node_input return node->append_input(std::move(input)); } - structural_node * + StructuralNode * node() const noexcept { - return static_cast(node_input::node()); + return static_cast(node_input::node()); } argument_list arguments; @@ -107,24 +107,24 @@ typedef jlm::util::intrusive_list type); + structural_output(StructuralNode * node, std::shared_ptr type); static structural_output * - create(structural_node * node, std::shared_ptr type) + create(StructuralNode * node, std::shared_ptr type) { auto output = std::make_unique(node, std::move(type)); return node->append_output(std::move(output)); } - structural_node * + StructuralNode * node() const noexcept { - return static_cast(node_output::node()); + return static_cast(node_output::node()); } result_list results; @@ -133,13 +133,13 @@ class structural_output : public node_output /* structural node method definitions */ inline jlm::rvsdg::structural_input * -structural_node::input(size_t index) const noexcept +StructuralNode::input(size_t index) const noexcept { return static_cast(node::input(index)); } inline jlm::rvsdg::structural_output * -structural_node::output(size_t index) const noexcept +StructuralNode::output(size_t index) const noexcept { return static_cast(node::output(index)); } @@ -160,7 +160,7 @@ Region::Contains(const rvsdg::Region & region, bool checkSubregions) continue; } - if (auto structuralNode = dynamic_cast(&node)) + if (auto structuralNode = dynamic_cast(&node)) { for (size_t n = 0; n < structuralNode->nsubregions(); n++) { diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index c1667d26e..f940963d8 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -27,7 +27,7 @@ ThetaOperation::copy() const } ThetaNode::ThetaNode(rvsdg::Region & parent) - : structural_node(ThetaOperation(), &parent, 1) + : StructuralNode(ThetaOperation(), &parent, 1) { auto predicate = control_false(subregion()); ThetaPredicateResult::Create(*predicate); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index cea84185e..33348be3f 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -30,7 +30,7 @@ class ThetaOperation final : public structural_op class ThetaInput; class ThetaOutput; -class ThetaNode final : public structural_node +class ThetaNode final : public StructuralNode { public: class loopvar_iterator @@ -100,7 +100,7 @@ class ThetaNode final : public structural_node [[nodiscard]] rvsdg::Region * subregion() const noexcept { - return structural_node::subregion(0); + return StructuralNode::subregion(0); } [[nodiscard]] RegionResult * diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 5cf0dbb94..6bcba5568 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -57,7 +57,7 @@ node_to_string( } s += "\n"; - if (auto snode = dynamic_cast(node)) + if (auto snode = dynamic_cast(node)) { for (size_t n = 0; n < snode->nsubregions(); n++) s += region_to_string(snode->subregion(n), depth + 1, map); @@ -297,7 +297,7 @@ convert_simple_node(const jlm::rvsdg::simple_node * node) } static inline std::string -convert_structural_node(const jlm::rvsdg::structural_node * node) +convert_structural_node(const rvsdg::StructuralNode * node) { std::string s; s += node_starttag(id(node), "", type(node)); @@ -327,7 +327,7 @@ convert_node(const jlm::rvsdg::node * node) if (auto n = dynamic_cast(node)) return convert_simple_node(n); - if (auto n = dynamic_cast(node)) + if (auto n = dynamic_cast(node)) return convert_structural_node(n); JLM_ASSERT(0); diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 9a4a4e2e5..3e812a861 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -37,7 +37,7 @@ test_gamma(void) /* test gamma copy */ - auto gamma2 = static_cast(gamma)->copy(graph.root(), { pred, v0, v1, v2 }); + auto gamma2 = static_cast(gamma)->copy(graph.root(), { pred, v0, v1, v2 }); view(graph.root(), stdout); assert(is(gamma2)); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index 8e50f3fed..d63ed264c 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -34,7 +34,7 @@ TestThetaCreation() jlm::tests::GraphExport::Create(*theta->output(0), "exp"); auto theta2 = - static_cast(theta)->copy(graph.root(), { imp1, imp2, imp3 }); + static_cast(theta)->copy(graph.root(), { imp1, imp2, imp3 }); jlm::rvsdg::view(graph.root(), stdout); assert(lv1->node() == theta); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 7ff50a3b3..df8a4b0d1 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -160,8 +160,6 @@ structural_op::copy() const return std::unique_ptr(new structural_op(*this)); } -/* structural_node class */ - structural_node::~structural_node() {} diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 6a031f5e3..517dfdbdb 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -209,14 +209,14 @@ class StructuralNodeArgument; class StructuralNodeInput; class StructuralNodeOutput; -class structural_node final : public rvsdg::structural_node +class structural_node final : public rvsdg::StructuralNode { public: ~structural_node() override; private: structural_node(rvsdg::Region * parent, size_t nsubregions) - : rvsdg::structural_node(structural_op(), parent, nsubregions) + : rvsdg::StructuralNode(structural_op(), parent, nsubregions) {} public: From 91b98db77453df0110d5382ed811f19626acbf57 Mon Sep 17 00:00:00 2001 From: halvorlinder <56249210+halvorlinder@users.noreply.github.com> Date: Wed, 16 Oct 2024 11:41:52 +0200 Subject: [PATCH 111/170] Theta node conversion to and from MLIR (#655) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR adds conversion of theta nodes from JLM to MLIR and from MLIR to JLM --------- Co-authored-by: Håvard Krogstie Co-authored-by: Magnus Sjalander --- jlm/mlir/backend/JlmToMlirConverter.cpp | 36 ++++ jlm/mlir/backend/JlmToMlirConverter.hpp | 7 + jlm/mlir/frontend/MlirToJlmConverter.cpp | 19 ++- jlm/mlir/frontend/MlirToJlmConverter.hpp | 1 + .../mlir/backend/TestJlmToMlirConverter.cpp | 103 ++++++++++++ .../mlir/frontend/TestMlirToJlmConverter.cpp | 157 ++++++++++++++++++ 6 files changed, 322 insertions(+), 1 deletion(-) diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 5b68714ca..51d59ad26 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -164,6 +164,10 @@ JlmToMlirConverter::ConvertNode( { return ConvertGamma(*gamma, block, inputs); } + else if (auto theta = dynamic_cast(&node)) + { + return ConvertTheta(*theta, block, inputs); + } else { auto message = util::strfmt("Unimplemented structural node: ", node.operation().debug_string()); @@ -464,6 +468,38 @@ JlmToMlirConverter::ConvertGamma( return gamma; } +::mlir::Operation * +JlmToMlirConverter::ConvertTheta( + const rvsdg::ThetaNode & thetaNode, + ::mlir::Block & block, + const ::llvm::SmallVector<::mlir::Value> & inputs) +{ + ::llvm::SmallVector<::mlir::Type> outputTypeRange; + for (size_t i = 0; i < thetaNode.noutputs(); ++i) + { + outputTypeRange.push_back(ConvertType(thetaNode.output(i)->type())); + } + + ::llvm::SmallVector<::mlir::NamedAttribute> attributes; + + auto theta = Builder_->create<::mlir::rvsdg::ThetaNode>( + Builder_->getUnknownLoc(), + ::mlir::TypeRange(::llvm::ArrayRef(outputTypeRange)), + ::mlir::ValueRange(::llvm::ArrayRef(inputs)), + attributes); + + block.push_back(theta); + auto & thetaBlock = theta.getRegion().emplaceBlock(); + auto regionResults = ConvertRegion(*thetaNode.subregion(), thetaBlock); + auto results = ::mlir::ValueRange({ std::next(regionResults.begin()), regionResults.end() }); + auto thetaResult = Builder_->create<::mlir::rvsdg::ThetaResult>( + Builder_->getUnknownLoc(), + regionResults[0], + results); + thetaBlock.push_back(thetaResult); + return theta; +} + ::mlir::Type JlmToMlirConverter::ConvertType(const rvsdg::Type & type) { diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index c4c0d6f7a..cc2e13b9a 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -11,6 +11,7 @@ #include #include #include +#include // MLIR RVSDG dialects #include @@ -164,6 +165,12 @@ class JlmToMlirConverter final ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs); + ::mlir::Operation * + ConvertTheta( + const rvsdg::ThetaNode & thetaNode, + ::mlir::Block & block, + const ::llvm::SmallVector<::mlir::Value> & inputs); + /** * Converts an RVSDG type to an MLIR RVSDG type. * \param type The RVSDG type to be converted. diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index 7280b3bc3..e911ef3a9 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -352,6 +352,22 @@ MlirToJlmConverter::ConvertOperation( return rvsdgGammaNode; } + else if (auto mlirThetaNode = ::mlir::dyn_cast<::mlir::rvsdg::ThetaNode>(&mlirOperation)) + { + auto rvsdgThetaNode = rvsdg::ThetaNode::create(&rvsdgRegion); + + // Add loop vars to the theta node + for (size_t i = 0; i < inputs.size(); i++) + { + rvsdgThetaNode->add_loopvar(inputs[i]); + } + + auto regionResults = ConvertRegion(mlirThetaNode.getRegion(), *rvsdgThetaNode->subregion()); + + rvsdgThetaNode->set_predicate(regionResults[0]); + + return rvsdgThetaNode; + } else if (auto mlirMatch = ::mlir::dyn_cast<::mlir::rvsdg::Match>(&mlirOperation)) { std::unordered_map mapping; @@ -381,7 +397,8 @@ MlirToJlmConverter::ConvertOperation( else if ( ::mlir::isa<::mlir::rvsdg::LambdaResult>(&mlirOperation) || ::mlir::isa<::mlir::rvsdg::OmegaResult>(&mlirOperation) - || ::mlir::isa<::mlir::rvsdg::GammaResult>(&mlirOperation)) + || ::mlir::isa<::mlir::rvsdg::GammaResult>(&mlirOperation) + || ::mlir::isa<::mlir::rvsdg::ThetaResult>(&mlirOperation)) { // This is a terminating operation that doesn't have a corresponding RVSDG node return nullptr; diff --git a/jlm/mlir/frontend/MlirToJlmConverter.hpp b/jlm/mlir/frontend/MlirToJlmConverter.hpp index c4a9b6fa7..49cd37118 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.hpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.hpp @@ -11,6 +11,7 @@ #include #include #include +#include #include #include diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 93ec90f8d..0c08be6ee 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -9,6 +9,7 @@ #include #include +#include #include static int @@ -599,8 +600,110 @@ TestGamma() return 0; } +/** \brief TestTheta + * + * This test is similar to previous tests, but uses a theta operation + */ +static int +TestTheta() +{ + using namespace jlm::llvm; + using namespace mlir::rvsdg; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto graph = &rvsdgModule->Rvsdg(); + + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + { + // Create a theta operation + std::cout << "Theta Operation" << std::endl; + auto entryvar1 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 5); + auto entryvar2 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 6); + jlm::rvsdg::ThetaNode * rvsdgThetaNode = jlm::rvsdg::ThetaNode::create(graph->root()); + + auto predicate = jlm::rvsdg::control_constant(rvsdgThetaNode->subregion(), 2, 0); + + rvsdgThetaNode->add_loopvar(entryvar1); + rvsdgThetaNode->add_loopvar(entryvar2); + rvsdgThetaNode->set_predicate(predicate); + + // Convert the RVSDG to MLIR + std::cout << "Convert to MLIR" << std::endl; + jlm::mlir::JlmToMlirConverter mlirgen; + auto omega = mlirgen.ConvertModule(*rvsdgModule); + + // Checking blocks and operations count + std::cout << "Checking blocks and operations count" << std::endl; + auto & omegaRegion = omega.getRegion(); + assert(omegaRegion.getBlocks().size() == 1); + auto & omegaBlock = omegaRegion.front(); + // 1 theta + 1 predicate + 2 constants + assert(omegaBlock.getOperations().size() == 4); + + bool thetaFound = false; + for (auto & operation : omegaBlock.getOperations()) + { + if (mlir::isa(operation)) + { + thetaFound = true; + std::cout << "Checking theta operation" << std::endl; + auto thetaOp = mlir::cast(operation); + // 2 loop vars + assert(thetaOp.getNumOperands() == 2); + assert(thetaOp.getNumResults() == 2); + + auto & thetaBlock = thetaOp.getRegion().front(); + auto thetaResult = thetaBlock.getTerminator(); + + assert(mlir::isa(thetaResult)); + auto thetaResultOp = mlir::cast(thetaResult); + + std::cout << "Checking theta predicate" << std::endl; + + assert(mlir::isa(thetaResultOp.getPredicate().getDefiningOp())); + auto controlConstant = + mlir::cast(thetaResultOp.getPredicate().getDefiningOp()); + + assert(controlConstant.getValue() == 0); + + assert(mlir::isa(controlConstant.getType())); + auto ctrlType = mlir::cast(controlConstant.getType()); + assert(ctrlType.getNumOptions() == 2); + + std::cout << "Checking theta loop vars" << std::endl; + //! getInputs() corresponds to the loop vars + auto loopVars = thetaOp.getInputs(); + assert(loopVars.size() == 2); + assert(mlir::isa(loopVars[0].getDefiningOp())); + assert(mlir::isa(loopVars[1].getDefiningOp())); + auto loopVar1 = mlir::cast(loopVars[0].getDefiningOp()); + auto loopVar2 = mlir::cast(loopVars[1].getDefiningOp()); + assert(loopVar1.value() == 5); + assert(loopVar2.value() == 6); + + // Theta result, constant control predicate + assert(thetaBlock.getOperations().size() == 2); + + std::cout << "Checking loop exitVars" << std::endl; + std::cout << thetaResultOp.getNumOperands() << std::endl; + + std::cout << "Checking theta subregion" << std::endl; + + // Two arguments and predicate + assert(thetaResultOp.getNumOperands() == 3); + } + } + // } + assert(thetaFound); + omega->destroy(); + } + return 0; +} + JLM_UNIT_TEST_REGISTER("jlm/mlir/backend/TestMlirLambdaGen", TestLambda) JLM_UNIT_TEST_REGISTER("jlm/mlir/backend/TestMlirAddOperationGen", TestAddOperation) JLM_UNIT_TEST_REGISTER("jlm/mlir/backend/TestMlirComZeroExtGen", TestComZeroExt) JLM_UNIT_TEST_REGISTER("jlm/mlir/backend/TestMlirMatchGen", TestMatch) JLM_UNIT_TEST_REGISTER("jlm/mlir/backend/TestMlirGammaGen", TestGamma) +JLM_UNIT_TEST_REGISTER("jlm/mlir/backend/TestMlirThetaGen", TestTheta) diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index ec1b8e4a6..174040d57 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -856,8 +856,165 @@ TestGammaOp() return 0; } +/** \brief TestThetaOp + * + * This function tests the Theta operation. It creates a lambda block with a Theta operation. + * + */ +static int +TestThetaOp() +{ + { + using namespace mlir::rvsdg; + using namespace mlir::jlm; + + // Setup MLIR Context and load dialects + std::cout << "Creating MLIR context" << std::endl; + auto context = std::make_unique(); + context->getOrLoadDialect(); + context->getOrLoadDialect(); + context->getOrLoadDialect(); + auto Builder_ = std::make_unique(context.get()); + + auto omega = Builder_->create(Builder_->getUnknownLoc()); + auto & omegaRegion = omega.getRegion(); + auto * omegaBlock = new mlir::Block; + omegaRegion.push_back(omegaBlock); + + // Handle function arguments + std::cout << "Creating function arguments" << std::endl; + ::llvm::SmallVector arguments; + arguments.push_back(Builder_->getType()); + arguments.push_back(Builder_->getType()); + ::llvm::ArrayRef argumentsArray(arguments); + + // Handle function results + std::cout << "Creating function results" << std::endl; + ::llvm::SmallVector results; + results.push_back(Builder_->getType()); + results.push_back(Builder_->getType()); + ::llvm::ArrayRef resultsArray(results); + + // LambdaNodes return a LambdaRefType + std::cout << "Creating LambdaRefType" << std::endl; + ::llvm::SmallVector lambdaRef; + auto refType = Builder_->getType(argumentsArray, resultsArray); + lambdaRef.push_back(refType); + + // Add function attributes + std::cout << "Creating function attributes" << std::endl; + ::llvm::SmallVector attributes; + auto attributeName = Builder_->getStringAttr("sym_name"); + auto attributeValue = Builder_->getStringAttr("test"); + auto symbolName = Builder_->getNamedAttr(attributeName, attributeValue); + attributes.push_back(symbolName); + ::llvm::ArrayRef<::mlir::NamedAttribute> attributesRef(attributes); + + // Add inputs to the function + ::llvm::SmallVector inputs; + + // Create the lambda node and add it to the region/block it resides in + std::cout << "Creating LambdaNode" << std::endl; + auto lambda = + Builder_->create(Builder_->getUnknownLoc(), lambdaRef, inputs, attributesRef); + omegaBlock->push_back(lambda); + auto & lambdaRegion = lambda.getRegion(); + auto * lambdaBlock = new mlir::Block; + lambdaRegion.push_back(lambdaBlock); + + // Add arguments to the region + std::cout << "Adding arguments to the region" << std::endl; + lambdaBlock->addArgument(Builder_->getType(), Builder_->getUnknownLoc()); + lambdaBlock->addArgument(Builder_->getType(), Builder_->getUnknownLoc()); + + ::llvm::SmallVector<::mlir::NamedAttribute> thetaAttributes; + ::llvm::SmallVector<::mlir::Type> typeRangeOuput; + typeRangeOuput.push_back(Builder_->getType()); + typeRangeOuput.push_back(Builder_->getType()); + ::mlir::rvsdg::ThetaNode theta = Builder_->create<::mlir::rvsdg::ThetaNode>( + Builder_->getUnknownLoc(), + ::mlir::TypeRange(::llvm::ArrayRef(typeRangeOuput)), // Ouputs types + ::mlir::ValueRange(::llvm::ArrayRef<::mlir::Value>( + { lambdaBlock->getArgument(0), lambdaBlock->getArgument(1) })), // Inputs + thetaAttributes); + lambdaBlock->push_back(theta); + + auto & thetaBlock = theta.getRegion().emplaceBlock(); + auto predicate = Builder_->create( + Builder_->getUnknownLoc(), + Builder_->getType<::mlir::rvsdg::RVSDG_CTRLType>(2), + 0); + thetaBlock.push_back(predicate); + + auto thetaResult = Builder_->create<::mlir::rvsdg::ThetaResult>( + Builder_->getUnknownLoc(), + predicate, + ::llvm::SmallVector(theta.getInputs())); + thetaBlock.push_back(thetaResult); + + // Handle the result of the lambda + ::llvm::SmallVector regionResults; + regionResults.push_back(theta->getResult(0)); + regionResults.push_back(theta->getResult(1)); + std::cout << "Creating LambdaResult" << std::endl; + auto lambdaResult = Builder_->create(Builder_->getUnknownLoc(), regionResults); + lambdaBlock->push_back(lambdaResult); + + // Handle the result of the omega + std::cout << "Creating OmegaResult" << std::endl; + ::llvm::SmallVector omegaRegionResults; + omegaRegionResults.push_back(lambda); + auto omegaResult = Builder_->create(Builder_->getUnknownLoc(), omegaRegionResults); + omegaBlock->push_back(omegaResult); + + // Convert the MLIR to RVSDG and check the result + std::cout << "Converting MLIR to RVSDG" << std::endl; + std::unique_ptr rootBlock = std::make_unique(); + rootBlock->push_back(omega); + auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); + auto region = rvsdgModule->Rvsdg().root(); + + { + using namespace jlm::rvsdg; + + assert(region->nnodes() == 1); + + // Get the lambda block + auto convertedLambda = + jlm::util::AssertedCast(region->nodes.first()); + assert(is(convertedLambda)); + + auto lambdaRegion = convertedLambda->subregion(); + + // Just the theta node + assert(lambdaRegion->nnodes() == 1); + + jlm::rvsdg::node_output * thetaOutput; + assert( + thetaOutput = dynamic_cast(lambdaRegion->result(0)->origin())); + jlm::rvsdg::node * node = thetaOutput->node(); + assert(is(node->operation())); + auto thetaNode = dynamic_cast(node); + + std::cout << "Checking theta node" << std::endl; + assert(thetaNode->ninputs() == 2); + assert(thetaNode->nloopvars() == 2); + assert(thetaNode->noutputs() == 2); + assert(thetaNode->nsubregions() == 1); + assert(is(thetaNode->predicate()->type())); + auto predicateType = + dynamic_cast(&thetaNode->predicate()->type()); + assert(predicateType->nalternatives() == 2); + std::cout << predicate.getValue() << std::endl; + } + } + + return 0; +} + JLM_UNIT_TEST_REGISTER("jlm/mlir/frontend/TestRvsdgLambdaGen", TestLambda) JLM_UNIT_TEST_REGISTER("jlm/mlir/frontend/TestRvsdgDivOperationGen", TestDivOperation) JLM_UNIT_TEST_REGISTER("jlm/mlir/frontend/TestRvsdgCompZeroExtGen", TestCompZeroExt) JLM_UNIT_TEST_REGISTER("jlm/mlir/frontend/TestMatchGen", TestMatchOp) JLM_UNIT_TEST_REGISTER("jlm/mlir/frontend/TestGammaGen", TestGammaOp) +JLM_UNIT_TEST_REGISTER("jlm/mlir/frontend/TestThetaGen", TestThetaOp) From 5a0fa445a500c24cd2068b34c26a395348f9689c Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Fri, 18 Oct 2024 19:04:19 +0200 Subject: [PATCH 112/170] Updated caching for CI CIRCT build (#661) Replaces the 'save-always: true' option, which has been deprecated. This version should have the advantage that even if the CI fails at a later step, the build should have already been saved and not require to be rebuilt when a fix is pushed. --- .github/actions/BuildCirct/action.yml | 30 +++++++++++++++++---------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/.github/actions/BuildCirct/action.yml b/.github/actions/BuildCirct/action.yml index 397d9d244..07e280351 100644 --- a/.github/actions/BuildCirct/action.yml +++ b/.github/actions/BuildCirct/action.yml @@ -4,6 +4,13 @@ description: "Builds CIRCT, which is used for the HLS backend" runs: using: "composite" steps: + - name: "Install LLVM, Clang, MLIR, and Ninja" + uses: ./.github/actions/InstallPackages + with: + install-llvm: true + install-mlir: true + install-ninja: true + - name: "Get the commit used for building CIRCT and use it as the cache key" id: get-circt-hash run: | @@ -11,26 +18,27 @@ runs: shell: bash - name: "Try to fetch CIRCT from the cache" - id: cache-circt - uses: actions/cache@v4 + id: restore-cache-circt + uses: actions/cache/restore@v4 with: - save-always: true path: | ${{ github.workspace }}/build-circt/circt key: ${{ runner.os }}-circt-${{ steps.get-circt-hash.outputs.hash }} - - name: "Install LLVM, Clang, MLIR, and Ninja" - uses: ./.github/actions/InstallPackages - with: - install-llvm: true - install-mlir: true - install-ninja: true - - name: "Build CIRCT if we didn't hit in the cache" - if: steps.cache-circt.outputs.cache-hit != 'true' + if: steps.restore-cache-circt.outputs.cache-hit != 'true' run: | ./scripts/build-circt.sh \ --build-path ${{ github.workspace }}/build-circt \ --install-path ${{ github.workspace }}/build-circt/circt \ --llvm-lit-path ~/.local/bin/lit shell: bash + + - name: "Save CIRCT to the cache" + if: steps.restore-cache-circt.outputs.cache-hit != 'true' + id: save-cache-circt + uses: actions/cache/save@v4 + with: + path: | + ${{ github.workspace }}/build-circt/circt + key: ${{ steps.restore-cache-circt.outputs.cache-primary-key }} From 69ae50c35976b82b8c3644e60f7f544e296b7587 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sat, 19 Oct 2024 11:42:47 +0200 Subject: [PATCH 113/170] Adds documentation and unit tests for MemoryConverter (#659) --- jlm/hls/Makefile.sub | 1 + jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 113 +++--- jlm/hls/backend/rvsdg2rhls/mem-conv.hpp | 10 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 2 +- .../rvsdg2rhls/MemoryConverterTests.cpp | 384 ++++++++++++++++++ 5 files changed, 456 insertions(+), 54 deletions(-) create mode 100644 tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index 4bb0e3d22..96ddd01e5 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -81,6 +81,7 @@ libhls_HEADERS = \ libhls_TESTS += \ tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests \ + tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests \ tests/jlm/hls/backend/rvsdg2rhls/TestFork \ tests/jlm/hls/backend/rvsdg2rhls/TestGamma \ tests/jlm/hls/backend/rvsdg2rhls/TestTheta \ diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 4f8caad69..b5491e188 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -177,17 +177,17 @@ get_impport_function_name(jlm::rvsdg::input * input) // trace function ptr to its call void trace_function_calls( - jlm::rvsdg::output * op, + jlm::rvsdg::output * output, std::vector & calls, std::unordered_set & visited) { - if (visited.count(op)) + if (visited.count(output)) { // skip already processed outputs return; } - visited.insert(op); - for (auto user : *op) + visited.insert(output); + for (auto user : *output) { if (auto si = dynamic_cast(user)) { @@ -325,9 +325,9 @@ replace_store(jlm::rvsdg::simple_node * orig) void gather_mem_nodes( jlm::rvsdg::Region * region, - std::vector & load_nodes, - std::vector & store_nodes, - std::vector & decouple_nodes, + std::vector & loadNodes, + std::vector & storeNodes, + std::vector & decoupleNodes, std::unordered_set exclude) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) @@ -335,12 +335,7 @@ gather_mem_nodes( if (auto structnode = dynamic_cast(node)) { for (size_t n = 0; n < structnode->nsubregions(); n++) - gather_mem_nodes( - structnode->subregion(n), - load_nodes, - store_nodes, - decouple_nodes, - exclude); + gather_mem_nodes(structnode->subregion(n), loadNodes, storeNodes, decoupleNodes, exclude); } else if (auto simplenode = dynamic_cast(node)) { @@ -350,69 +345,72 @@ gather_mem_nodes( } if (dynamic_cast(&simplenode->operation())) { - store_nodes.push_back(simplenode); + storeNodes.push_back(simplenode); } else if (dynamic_cast(&simplenode->operation())) { - load_nodes.push_back(simplenode); + loadNodes.push_back(simplenode); } else if (dynamic_cast(&simplenode->operation())) { // TODO: verify this is the right type of function call - decouple_nodes.push_back(simplenode); + decoupleNodes.push_back(simplenode); } } } } -// trace each input pointer to loads and stores +/** + * If the output is a pointer, it traces it to all memory operations it reaches. + * Pointers read from memory is not traced, i.e., the output of load operations is not traced. + * @param output The output to trace + * @param loadNodes A vector containing all load nodes that are reached + * @param storeNodes A vector containing all store nodes that are reached + * @param decoupleNodes A vector containing all decoupled load nodes that are reached + * @param visited A set of already visited outputs + */ void -trace_pointer_argument( - jlm::rvsdg::output * op, - std::vector & load_nodes, - std::vector & store_nodes, - std::vector & decouple_nodes, +TracePointer( + jlm::rvsdg::output * output, + std::vector & loadNodes, + std::vector & storeNodes, + std::vector & decoupleNodes, std::unordered_set & visited) { - if (!dynamic_cast(&op->type())) + if (!dynamic_cast(&output->type())) { - // only process pointer outputs + // Only process pointer outputs return; } - if (visited.count(op)) + if (visited.count(output)) { - // skip already processed outputs + // Skip already processed outputs return; } - visited.insert(op); - for (auto user : *op) + visited.insert(output); + for (auto user : *output) { if (auto si = dynamic_cast(user)) { auto simplenode = si->node(); if (dynamic_cast(&simplenode->operation())) { - store_nodes.push_back(simplenode); + storeNodes.push_back(simplenode); } else if (dynamic_cast(&simplenode->operation())) { - load_nodes.push_back(simplenode); + loadNodes.push_back(simplenode); } else if (dynamic_cast(&simplenode->operation())) { // TODO: verify this is the right type of function call - decouple_nodes.push_back(simplenode); + decoupleNodes.push_back(simplenode); } else { for (size_t i = 0; i < simplenode->noutputs(); ++i) { - trace_pointer_argument( - simplenode->output(i), - load_nodes, - store_nodes, - decouple_nodes, - visited); + TracePointer(simplenode->output(i), loadNodes, storeNodes, decoupleNodes, visited); } } } @@ -420,18 +418,18 @@ trace_pointer_argument( { for (auto & arg : sti->arguments) { - trace_pointer_argument(&arg, load_nodes, store_nodes, decouple_nodes, visited); + TracePointer(&arg, loadNodes, storeNodes, decoupleNodes, visited); } } else if (auto r = dynamic_cast(user)) { if (auto ber = dynamic_cast(r)) { - trace_pointer_argument(ber->argument(), load_nodes, store_nodes, decouple_nodes, visited); + TracePointer(ber->argument(), loadNodes, storeNodes, decoupleNodes, visited); } else { - trace_pointer_argument(r->output(), load_nodes, store_nodes, decouple_nodes, visited); + TracePointer(r->output(), loadNodes, storeNodes, decoupleNodes, visited); } } else @@ -445,12 +443,19 @@ trace_pointer_argument( * Decoupled loads are user specified and encoded as function calls that need special treatment. * This function traces the output to all nodes and checks if it is the first argument to a call * operation. + * @param output The output to check if it is a function pointer + * @param visited A set of already visited outputs (nodes) + * @return True if the output is a function pointer */ bool IsDecoupledFunctionPointer( jlm::rvsdg::output * output, std::unordered_set & visited) { + if (!output) + { + return false; + } if (!dynamic_cast(&output->type())) { // Only process pointer outputs @@ -464,6 +469,7 @@ IsDecoupledFunctionPointer( visited.insert(output); bool isDecoupled = false; + // Iterate through all users of the output for (auto user : *output) { if (auto simpleInput = dynamic_cast(user)) @@ -517,13 +523,13 @@ IsDecoupledFunctionPointer( } void -jlm::hls::trace_pointer_arguments( - const jlm::llvm::lambda::node * ln, - port_load_store_decouple & port_nodes) +jlm::hls::TracePointerArguments( + const jlm::llvm::lambda::node * lambda, + port_load_store_decouple & portNodes) { - for (size_t i = 0; i < ln->subregion()->narguments(); ++i) + for (size_t i = 0; i < lambda->subregion()->narguments(); ++i) { - auto arg = ln->subregion()->argument(i); + auto arg = lambda->subregion()->argument(i); if (dynamic_cast(&arg->type())) { // Decoupled loads are user specified and encoded as function calls that need special @@ -535,12 +541,12 @@ jlm::hls::trace_pointer_arguments( continue; } visited.clear(); - port_nodes.emplace_back(); - trace_pointer_argument( + portNodes.emplace_back(); + TracePointer( arg, - std::get<0>(port_nodes.back()), - std::get<1>(port_nodes.back()), - std::get<2>(port_nodes.back()), + std::get<0>(portNodes.back()), + std::get<1>(portNodes.back()), + std::get<2>(portNodes.back()), visited); } } @@ -582,7 +588,7 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) // response and request ports // port_load_store_decouple portNodes; - trace_pointer_arguments(lambda, portNodes); + TracePointerArguments(lambda, portNodes); auto responseTypePtr = get_mem_res_type(jlm::rvsdg::bittype::Create(64)); auto requestTypePtr = get_mem_req_type(jlm::rvsdg::bittype::Create(64), false); @@ -710,6 +716,11 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) // once. // remove_unused_state(root); + + // Need to get the lambda from the root since remote_unused_state replaces the lambda + JLM_ASSERT(root->nnodes() == 1); + newLambda = jlm::util::AssertedCast(root->Nodes().begin().ptr()); + // Go through in reverse since we are removing things for (int i = newLambda->ncvarguments() - 1; i >= 0; --i) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp index 51f819904..a8798afee 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp @@ -18,15 +18,21 @@ typedef std::vector>> port_load_store_decouple; +/** + * Traces all pointer arguments of a lambda node and finds all memory operations. + * Pointers read from memory is not traced, i.e., the output of load operations is not traced. + * @param lambda The lambda node for which to trace all pointer arguments + * @param portNodes A vector where each element contains all memory operations traced from a pointer + */ void -trace_pointer_arguments(const llvm::lambda::node * ln, port_load_store_decouple & port_nodes); +TracePointerArguments(const llvm::lambda::node * lambda, port_load_store_decouple & portNodes); void MemoryConverter(llvm::RvsdgModule & rm); /** * @param lambda The lambda node for wich the load and store operations are to be connected to - * response (arguemnts) ports + * response (argument) ports * @param argumentIndex The index of the reponse (argument) port to be connected * @param smap The substitution map for the lambda node * @param originalLoadNodes The load nodes to be connected to the reponse port diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 18e761aee..60a51320e 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -287,7 +287,7 @@ mem_sep_argument(rvsdg::Region * region) } auto state_user = *state_arg->begin(); port_load_store_decouple port_nodes; - trace_pointer_arguments(lambda, port_nodes); + TracePointerArguments(lambda, port_nodes); auto entry_states = jlm::llvm::LambdaEntryMemoryStateSplitOperation::Create(*state_arg, 1 + port_nodes.size()); auto state_result = GetMemoryStateResult(*lambda); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp new file mode 100644 index 000000000..e421d6f49 --- /dev/null +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -0,0 +1,384 @@ +/* + * Copyright 2024 Magnus Sjalander + * See COPYING for terms of redistribution. + */ + +#include "test-registry.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +static int +TestTraceArgument() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::llvm::PointerType::Create(), + jlm::llvm::PointerType::Create(), + jlm::rvsdg::bittype::Create(32), + MemoryStateType::Create() }, + { MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Load followed by store + auto loadAddress = lambda->fctargument(0); + auto memoryStateArgument = lambda->fctargument(3); + auto loadOutput = LoadNonVolatileNode::Create( + loadAddress, + { memoryStateArgument }, + jlm::llvm::PointerType::Create(), + 32); + + auto storeAddress = lambda->fctargument(1); + auto storeData = lambda->fctargument(2); + auto storeOutput = StoreNonVolatileNode::Create(storeAddress, storeData, { loadOutput[1] }, 32); + + auto lambdaOutput = lambda->finalize({ storeOutput[0] }); + jlm::llvm::GraphExport::Create(*lambdaOutput, "f"); + + // Act + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + port_load_store_decouple portNodes; + TracePointerArguments(lambda, portNodes); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Assert + assert(portNodes.size() == 2); // 2 pointer arguments + assert(std::get<0>(portNodes[0]).size() == 1); // 1 load for the first pointer + assert(std::get<1>(portNodes[0]).size() == 0); // 0 store for the first pointer + assert(std::get<2>(portNodes[0]).size() == 0); // 0 decouple for the first pointer + assert(std::get<0>(portNodes[1]).size() == 0); // 0 load for the first pointer + assert(std::get<1>(portNodes[1]).size() == 1); // 1 store for the second pointer + assert(std::get<2>(portNodes[1]).size() == 0); // 0 load for the first pointer + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryConverterTests-1", TestTraceArgument) + +static int +TestLoad() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }, + { jlm::rvsdg::bittype::Create(32), MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Single load + auto loadAddress = lambda->fctargument(0); + auto memoryStateArgument = lambda->fctargument(1); + auto loadOutput = LoadNonVolatileNode::Create( + loadAddress, + { memoryStateArgument }, + jlm::rvsdg::bittype::Create(32), + 32); + + auto lambdaOutput = lambda->finalize({ loadOutput[0], loadOutput[1] }); + jlm::llvm::GraphExport::Create(*lambdaOutput, "f"); + + // Act + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + MemoryConverter(*rvsdgModule); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Memory Converter replaces the lambda so we start from the root of the graph + auto region = rvsdgModule->Rvsdg().root(); + assert(region->nnodes() == 1); + lambda = jlm::util::AssertedCast(region->nodes.first()); + + // Assert + auto lambdaRegion = lambda->subregion(); + assert(lambdaRegion->nnodes() == 3); + assert(lambdaRegion->narguments() == 3); + assert(lambdaRegion->nresults() == 3); + + // Memory state + jlm::util::AssertedCast(&lambdaRegion->result(1)->origin()->type()); + + // Load Address + auto loadNode = + jlm::util::AssertedCast(lambdaRegion->result(0)->origin())->node(); + jlm::util::AssertedCast(&loadNode->operation()); + + // Load Data + loadNode = + jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); + jlm::util::AssertedCast(&loadNode->operation()); + + // Request Node + auto requestNode = + jlm::util::AssertedCast(lambdaRegion->result(2)->origin())->node(); + jlm::util::AssertedCast(&requestNode->operation()); + + // Response Node + auto responseNode = + jlm::util::AssertedCast(loadNode->input(2)->origin())->node(); + jlm::util::AssertedCast(&responseNode->operation()); + + // Response source + auto responseSource = responseNode->input(0)->origin(); + auto regionArgument = jlm::util::AssertedCast(responseSource); + assert(regionArgument->index() == 2); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryConverterTests-2", TestLoad) + +static int +TestLoadStore() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::llvm::PointerType::Create(), + jlm::rvsdg::bittype::Create(32), + MemoryStateType::Create() }, + { MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Load followed by store + auto loadAddress = lambda->fctargument(0); + auto storeData = lambda->fctargument(1); + auto memoryStateArgument = lambda->fctargument(2); + auto loadOutput = LoadNonVolatileNode::Create( + loadAddress, + { memoryStateArgument }, + jlm::llvm::PointerType::Create(), + 32); + auto storeOutput = StoreNonVolatileNode::Create(loadOutput[0], storeData, { loadOutput[1] }, 32); + + auto lambdaOutput = lambda->finalize({ storeOutput[0] }); + jlm::llvm::GraphExport::Create(*lambdaOutput, "f"); + + // Act + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + MemoryConverter(*rvsdgModule); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Memory Converter replaces the lambda so we start from the root of the graph + auto region = rvsdgModule->Rvsdg().root(); + assert(region->nnodes() == 1); + lambda = jlm::util::AssertedCast(region->nodes.first()); + + // Assert + auto lambdaRegion = lambda->subregion(); + assert(lambdaRegion->nnodes() == 5); + assert(lambdaRegion->narguments() == 5); + assert(lambdaRegion->nresults() == 3); + + // Memory state + std::cout << lambdaRegion->result(0)->origin()->type().debug_string() << std::endl; + jlm::util::AssertedCast(&lambdaRegion->result(0)->origin()->type()); + + // Store Node + auto storeNode = + jlm::util::AssertedCast(lambdaRegion->result(0)->origin())->node(); + jlm::util::AssertedCast(&storeNode->operation()); + + // Request Node + auto firstRequestNode = + jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); + jlm::util::AssertedCast(&firstRequestNode->operation()); + + // Request Node + auto secondRequestNode = + jlm::util::AssertedCast(lambdaRegion->result(2)->origin())->node(); + jlm::util::AssertedCast(&secondRequestNode->operation()); + + // Load node + auto loadNode = + jlm::util::AssertedCast(storeNode->input(0)->origin())->node(); + jlm::util::AssertedCast(&loadNode->operation()); + + // Response Node + auto responseNode = + jlm::util::AssertedCast(loadNode->input(2)->origin())->node(); + jlm::util::AssertedCast(&responseNode->operation()); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryConverterTests-3", TestLoadStore) + +static int +TestThetaLoad() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::rvsdg::bittype::Create(32), + jlm::rvsdg::bittype::Create(32), + jlm::rvsdg::bittype::Create(32), + jlm::llvm::PointerType::Create(), + MemoryStateType::Create() }, + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Theta + auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); + auto thetaRegion = theta->subregion(); + // Predicate + auto idv = theta->add_loopvar(lambda->fctargument(0)); + auto lvs = theta->add_loopvar(lambda->fctargument(1)); + auto lve = theta->add_loopvar(lambda->fctargument(2)); + jlm::rvsdg::bitult_op ult(32); + jlm::rvsdg::bitsgt_op sgt(32); + jlm::rvsdg::bitadd_op add(32); + jlm::rvsdg::bitsub_op sub(32); + auto arm = jlm::rvsdg::simple_node::create_normalized( + thetaRegion, + add, + { idv->argument(), lvs->argument() })[0]; + auto cmp = + jlm::rvsdg::simple_node::create_normalized(thetaRegion, ult, { arm, lve->argument() })[0]; + auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); + idv->result()->divert_to(arm); + theta->set_predicate(match); + + // Load node + auto loadAddress = theta->add_loopvar(lambda->fctargument(3)); + auto memoryStateArgument = theta->add_loopvar(lambda->fctargument(4)); + auto loadOutput = LoadNonVolatileNode::Create( + loadAddress->argument(), + { memoryStateArgument->argument() }, + PointerType::Create(), + 32); + loadAddress->result()->divert_to(loadOutput[0]); + memoryStateArgument->result()->divert_to(loadOutput[1]); + + auto lambdaOutput = lambda->finalize({ theta->output(3), theta->output(4) }); + GraphExport::Create(*lambdaOutput, "f"); + + auto lambdaRegion = lambda->subregion(); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Act + mem_sep_argument(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + auto * const entryMemoryStateSplitInput = *lambdaRegion->argument(4)->begin(); + auto * entryMemoryStateSplitNode = jlm::rvsdg::input::GetNode(*entryMemoryStateSplitInput); + jlm::util::AssertedCast( + &entryMemoryStateSplitNode->operation()); + auto exitMemoryStateMergeNode = + jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); + jlm::util::AssertedCast( + &exitMemoryStateMergeNode->operation()); + + // Act + ConvertThetaNodes(*rvsdgModule); + // Simple assert as ConvertThetaNodes() is tested in separate unit tests + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + // Act + mem_queue(*rvsdgModule); + // Simple assert as mem_queue() is tested in separate unit tests + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + // Act + MemoryConverter(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Memory Converter replaces the lambda so we start from the root of the graph + auto region = rvsdgModule->Rvsdg().root(); + assert(region->nnodes() == 1); + lambda = jlm::util::AssertedCast(region->nodes.first()); + lambdaRegion = lambda->subregion(); + + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + // Request Node + auto requestNode = + jlm::util::AssertedCast(lambdaRegion->result(2)->origin())->node(); + jlm::util::AssertedCast(&requestNode->operation()); + + // HLS_LOOP Node + auto loopOutput = + jlm::util::AssertedCast(requestNode->input(0)->origin()); + auto loopNode = jlm::util::AssertedCast(loopOutput->node()); + jlm::util::AssertedCast(&loopNode->operation()); + // Loop Result + auto & thetaResult = loopOutput->results; + assert(thetaResult.size() == 1); + // Load Node + auto loadNode = + jlm::util::AssertedCast(thetaResult.first()->origin())->node(); + jlm::util::AssertedCast(&loadNode->operation()); + // Loop Argument + auto thetaArgument = + jlm::util::AssertedCast(loadNode->input(1)->origin()); + auto thetaInput = thetaArgument->input(); + + // Response Node + auto responseNode = + jlm::util::AssertedCast(thetaInput->origin())->node(); + jlm::util::AssertedCast(&responseNode->operation()); + + // Lambda argument + jlm::util::AssertedCast(responseNode->input(0)->origin()); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryConverterTests-4", TestThetaLoad) From cf028fea7ecc0f6bdec7493bc8b70cf1cfc98dff Mon Sep 17 00:00:00 2001 From: halvorlinder <56249210+halvorlinder@users.noreply.github.com> Date: Mon, 18 Nov 2024 09:11:37 +0100 Subject: [PATCH 114/170] Roundtrip the undef node (#660) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR roundtrips the undef node and provides a unit test for it. In addition, it updates the commit hash used for building the mlir-rvsdg dialect to include the fix of the undef operation output type. --------- Co-authored-by: Håvard Krogstie Co-authored-by: Magnus Sjalander --- jlm/mlir/Makefile.sub | 1 + jlm/mlir/backend/JlmToMlirConverter.cpp | 6 ++ jlm/mlir/backend/JlmToMlirConverter.hpp | 3 + jlm/mlir/frontend/MlirToJlmConverter.cpp | 8 +++ jlm/mlir/frontend/MlirToJlmConverter.hpp | 1 + scripts/build-mlir.sh | 2 +- tests/jlm/mlir/TestJlmToMlirToJlm.cpp | 72 ++++++++++++++++++++++++ 7 files changed, 92 insertions(+), 1 deletion(-) create mode 100644 tests/jlm/mlir/TestJlmToMlirToJlm.cpp diff --git a/jlm/mlir/Makefile.sub b/jlm/mlir/Makefile.sub index 53886015b..73d8d4ccf 100644 --- a/jlm/mlir/Makefile.sub +++ b/jlm/mlir/Makefile.sub @@ -12,6 +12,7 @@ libmlir_HEADERS = \ libmlir_TESTS += \ tests/jlm/mlir/backend/TestJlmToMlirConverter \ tests/jlm/mlir/frontend/TestMlirToJlmConverter \ + tests/jlm/mlir/TestJlmToMlirToJlm \ libmlir_TEST_LIBS += \ libmlir \ diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 51d59ad26..96a6ab64d 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -337,6 +337,12 @@ JlmToMlirConverter::ConvertSimpleNode( ConvertType(node.output(0)->type()), // Control, ouput type ctlOp->value().alternative()); } + else if (auto undefOp = dynamic_cast(&node.operation())) + { + MlirOp = Builder_->create<::mlir::jlm::Undef>( + Builder_->getUnknownLoc(), + ConvertType(undefOp->GetType())); + } else if (auto matchOp = dynamic_cast(&(node.operation()))) { // ** region Create the MLIR mapping vector ** diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index cc2e13b9a..fcc6b9499 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -18,6 +18,9 @@ #include #include +// MLIR JLM dialects +#include + // MLIR generic dialects #include diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index e911ef3a9..048a4f67e 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -308,6 +308,14 @@ MlirToJlmConverter::ConvertOperation( return ConvertCmpIOp(ComOp, inputs, integerType.getWidth()); } + else if (auto UndefOp = ::mlir::dyn_cast<::mlir::jlm::Undef>(&mlirOperation)) + { + auto type = UndefOp.getResult().getType(); + std::shared_ptr jlmType = ConvertType(type); + auto jlmUndefOutput = jlm::llvm::UndefValueOperation::Create(rvsdgRegion, jlmType); + return rvsdg::output::GetNode(*jlmUndefOutput); + } + // * region Structural nodes ** else if (auto MlirCtrlConst = ::mlir::dyn_cast<::mlir::rvsdg::ConstantCtrl>(&mlirOperation)) { diff --git a/jlm/mlir/frontend/MlirToJlmConverter.hpp b/jlm/mlir/frontend/MlirToJlmConverter.hpp index 49cd37118..65755af7a 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.hpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.hpp @@ -14,6 +14,7 @@ #include #include +#include #include #include diff --git a/scripts/build-mlir.sh b/scripts/build-mlir.sh index 7a82b11da..274fc033a 100755 --- a/scripts/build-mlir.sh +++ b/scripts/build-mlir.sh @@ -1,7 +1,7 @@ #!/bin/bash set -eu -GIT_COMMIT=90f30f1112906f2868fb42a6fa1a20fb8a20e03b +GIT_COMMIT=3cdd282061b1f167fe4c3cb79f89b55666a4cff8 # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" diff --git a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp new file mode 100644 index 000000000..4517d1aba --- /dev/null +++ b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp @@ -0,0 +1,72 @@ +/* + * Copyright 2024 Halvor Linder Henriksen + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include +#include +#include +#include +#include + +static int +TestUndef() +{ + using namespace jlm::llvm; + using namespace mlir::rvsdg; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto graph = &rvsdgModule->Rvsdg(); + + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + nf->set_mutable(false); + { + // Create an undef operation + std::cout << "Undef Operation" << std::endl; + UndefValueOperation::Create(*graph->root(), jlm::rvsdg::bittype::Create(32)); + + // Convert the RVSDG to MLIR + std::cout << "Convert to MLIR" << std::endl; + jlm::mlir::JlmToMlirConverter mlirgen; + auto omega = mlirgen.ConvertModule(*rvsdgModule); + + std::cout << "Checking blocks and operations count" << std::endl; + auto & omegaRegion = omega.getRegion(); + assert(omegaRegion.getBlocks().size() == 1); + auto & omegaBlock = omegaRegion.front(); + // 1 undef + omegaResult + assert(omegaBlock.getOperations().size() == 2); + assert(mlir::isa(omegaBlock.front())); + auto mlirUndefOp = mlir::dyn_cast<::mlir::jlm::Undef>(&omegaBlock.front()); + mlirUndefOp.dump(); + + // Convert the MLIR to RVSDG and check the result + std::cout << "Converting MLIR to RVSDG" << std::endl; + std::unique_ptr rootBlock = std::make_unique(); + rootBlock->push_back(omega); + auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); + auto region = rvsdgModule->Rvsdg().root(); + + { + using namespace jlm::llvm; + + assert(region->nnodes() == 1); + + // Get the undef op + auto convertedUndef = + dynamic_cast(®ion->nodes.first()->operation()); + + assert(convertedUndef != nullptr); + + auto outputType = convertedUndef->result(0); + assert(jlm::rvsdg::is(outputType)); + assert(std::dynamic_pointer_cast(outputType)->nbits() == 32); + } + } + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/mlir/TestMlirUndefGen", TestUndef) From f1828f56eedec97864612d53edd779509adaccbd Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Mon, 18 Nov 2024 18:45:43 +0100 Subject: [PATCH 115/170] Cancel GitHub workflow if another commit is made to the same PR (#665) Adds the `cancel-in-progress: true` option to our GitHub workflows, which triggers if another commit is made to the same PR. I my experience, we only care about the latest commit, which makes it wasteful to finish the old workflow when a new commit is added. The exact code used in the PR is taken from [here](https://turso.tech/blog/simple-trick-to-save-environment-and-money-when-using-github-actions). --- .github/workflows/CheckHeaders.yml | 4 ++++ .github/workflows/ClangFormat.yml | 4 ++++ .github/workflows/Doxygen.yml | 4 ++++ .github/workflows/hls.yml | 4 ++++ .github/workflows/tests.yml | 4 ++++ 5 files changed, 20 insertions(+) diff --git a/.github/workflows/CheckHeaders.yml b/.github/workflows/CheckHeaders.yml index 1b954b286..e0dcf5912 100644 --- a/.github/workflows/CheckHeaders.yml +++ b/.github/workflows/CheckHeaders.yml @@ -4,6 +4,10 @@ on: pull_request: branches: [ master ] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: CheckHeaders: runs-on: ubuntu-22.04 diff --git a/.github/workflows/ClangFormat.yml b/.github/workflows/ClangFormat.yml index e4b068b36..6f72c41e1 100644 --- a/.github/workflows/ClangFormat.yml +++ b/.github/workflows/ClangFormat.yml @@ -4,6 +4,10 @@ on: pull_request: branches: [ master ] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: CheckFormat: runs-on: ubuntu-22.04 diff --git a/.github/workflows/Doxygen.yml b/.github/workflows/Doxygen.yml index 45b6c5e23..fa960ead3 100644 --- a/.github/workflows/Doxygen.yml +++ b/.github/workflows/Doxygen.yml @@ -4,6 +4,10 @@ on: pull_request: branches: [ master ] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: GenerateDocumentation: runs-on: ubuntu-22.04 diff --git a/.github/workflows/hls.yml b/.github/workflows/hls.yml index 52c91fc99..72c4a85d7 100644 --- a/.github/workflows/hls.yml +++ b/.github/workflows/hls.yml @@ -4,6 +4,10 @@ on: pull_request: branches: [ master ] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: hls-test-suite: diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index d7938d881..79515ce04 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -4,6 +4,10 @@ on: pull_request: branches: [ master ] +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + jobs: build: runs-on: ubuntu-22.04 From 7945a095398306c89ff9d83b62599131ecfa18ba Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 18 Nov 2024 19:11:53 +0100 Subject: [PATCH 116/170] Privatize top nodes in Region class (#663) --- jlm/hls/opt/cne.cpp | 2 +- jlm/llvm/opt/cne.cpp | 2 +- jlm/llvm/opt/push.cpp | 8 +++--- jlm/rvsdg/node.cpp | 14 +++++++--- jlm/rvsdg/region.cpp | 25 +++++++++++++++++- jlm/rvsdg/region.hpp | 44 +++++++++++++++++++++++++++++--- jlm/rvsdg/simple-normal-form.cpp | 2 +- jlm/rvsdg/traverser.cpp | 2 +- 8 files changed, 82 insertions(+), 17 deletions(-) diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index b660c5522..9949d30d0 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -428,7 +428,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) { if (node->ninputs() == 0) { - for (const auto & other : node->region()->top_nodes) + for (const auto & other : node->region()->TopNodes()) { if (&other != node && node->operation() == other.operation()) { diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 5527f69a1..63f4673f8 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -389,7 +389,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) { if (node->ninputs() == 0) { - for (const auto & other : node->region()->top_nodes) + for (const auto & other : node->region()->TopNodes()) { if (&other != node && node->operation() == other.operation()) { diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index ea814c538..3260ba907 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -165,8 +165,8 @@ push(rvsdg::GammaNode * gamma) { auto region = gamma->subregion(r); - /* push out all nullary nodes */ - for (auto & node : region->top_nodes) + // push out all nullary nodes + for (auto & node : region->TopNodes()) { if (!has_side_effects(&node)) copy_from_gamma(&node, r); @@ -233,8 +233,8 @@ push_top(rvsdg::ThetaNode * theta) { auto subregion = theta->subregion(); - /* push out all nullary nodes */ - for (auto & node : subregion->top_nodes) + // push out all nullary nodes + for (auto & node : subregion->TopNodes()) { if (!has_side_effects(&node)) copy_from_theta(&node); diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index ccf7b3e30..70b33135c 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -196,7 +196,8 @@ node::node(std::unique_ptr op, rvsdg::Region * region) { bool wasAdded = region->AddBottomNode(*this); JLM_ASSERT(wasAdded); - region->top_nodes.push_back(this); + wasAdded = region->AddTopNode(*this); + JLM_ASSERT(wasAdded); region->nodes.push_back(this); } @@ -207,7 +208,10 @@ node::~node() JLM_ASSERT(wasRemoved); if (ninputs() == 0) - region()->top_nodes.erase(this); + { + wasRemoved = region()->RemoveTopNode(*this); + JLM_ASSERT(wasRemoved); + } inputs_.clear(); region()->nodes.erase(this); @@ -221,7 +225,8 @@ node::add_input(std::unique_ptr input) if (ninputs() == 0) { JLM_ASSERT(depth() == 0); - region()->top_nodes.erase(this); + const auto wasRemoved = region()->RemoveTopNode(*this); + JLM_ASSERT(wasRemoved); } input->index_ = ninputs(); @@ -262,7 +267,8 @@ node::RemoveInput(size_t index) if (ninputs() == 0) { JLM_ASSERT(depth() == 0); - region()->top_nodes.push_back(this); + const auto wasAdded = region()->AddTopNode(*this); + JLM_ASSERT(wasAdded); } } diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index da1c21ce1..1ed5deb48 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -130,7 +130,7 @@ Region::~Region() noexcept prune(false); JLM_ASSERT(nodes.empty()); - JLM_ASSERT(top_nodes.empty()); + JLM_ASSERT(NumTopNodes() == 0); JLM_ASSERT(NumBottomNodes() == 0); while (arguments_.size()) @@ -225,6 +225,21 @@ Region::remove_node(jlm::rvsdg::node * node) delete node; } +bool +Region::AddTopNode(rvsdg::node & node) +{ + if (node.region() != this) + return false; + + if (node.ninputs() != 0) + return false; + + // FIXME: We should check that a node is not already part of the top nodes before adding it. + TopNodes_.push_back(&node); + + return true; +} + bool Region::AddBottomNode(rvsdg::node & node) { @@ -248,6 +263,14 @@ Region::RemoveBottomNode(rvsdg::node & node) return numBottomNodes != NumBottomNodes(); } +bool +Region::RemoveTopNode(rvsdg::node & node) +{ + auto numTopNodes = NumTopNodes(); + TopNodes_.erase(&node); + return numTopNodes != NumTopNodes(); +} + void Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool copy_results) const { diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index da44f532b..0050c40d9 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -302,7 +302,7 @@ class Region [[nodiscard]] TopNodeRange TopNodes() noexcept { - return { top_nodes.begin(), top_nodes.end() }; + return { TopNodes_.begin(), TopNodes_.end() }; } /** @@ -311,7 +311,7 @@ class Region [[nodiscard]] TopNodeConstRange TopNodes() const noexcept { - return { top_nodes.begin(), top_nodes.end() }; + return { TopNodes_.begin(), TopNodes_.end() }; } /** @@ -513,6 +513,15 @@ class Region return nodes.size(); } + /** + * @return The number of top nodes in the region. + */ + [[nodiscard]] size_t + NumTopNodes() const noexcept + { + return TopNodes_.size(); + } + /** * @return The number of bottom nodes in the region. */ @@ -525,6 +534,22 @@ class Region void remove_node(jlm::rvsdg::node * node); + /** + * \brief Adds \p node to the top nodes of the region. + * + * The node \p node is only added to the top nodes of this region, iff: + * 1. The node \p node belongs to the same region instance. + * 2. The node \p node has no inputs. + * + * @param node The node that is added. + * @return True, if \p node was added, otherwise false. + * + * @note This method is automatically invoked when a node is created. There is + * no need to invoke it manually. + */ + bool + AddTopNode(rvsdg::node & node); + /** * \brief Adds \p node to the bottom nodes of the region. * @@ -541,6 +566,18 @@ class Region bool AddBottomNode(rvsdg::node & node); + /** + * Removes \p node from the top nodes in the region. + * + * @param node The node that is removed. + * @return True, if \p node was a top node and removed, otherwise false. + * + * @note This method is automatically invoked when inputs are added to a node. There is no need to + * invoke it manually. + */ + bool + RemoveTopNode(rvsdg::node & node); + /** * Removes \p node from the bottom nodes in the region. * @@ -650,8 +687,6 @@ class Region region_nodes_list nodes; - region_top_node_list top_nodes; - private: static void ToTree( @@ -682,6 +717,7 @@ class Region std::vector results_; std::vector arguments_; region_bottom_node_list BottomNodes_; + region_top_node_list TopNodes_; }; static inline void diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 90b8e51a5..3386b030e 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -31,7 +31,7 @@ node_cse( } else { - for (auto & node : region->top_nodes) + for (auto & node : region->TopNodes()) { if (cse_test(&node)) return &node; diff --git a/jlm/rvsdg/traverser.cpp b/jlm/rvsdg/traverser.cpp index 25d30ef02..e763f470e 100644 --- a/jlm/rvsdg/traverser.cpp +++ b/jlm/rvsdg/traverser.cpp @@ -22,7 +22,7 @@ topdown_traverser::topdown_traverser(rvsdg::Region * region) : region_(region), tracker_(region->graph()) { - for (auto & node : region->top_nodes) + for (auto & node : region->TopNodes()) tracker_.set_nodestate(&node, traversal_nodestate::frontier); for (size_t n = 0; n < region->narguments(); n++) From 49d8193d50abc8a6601fd32bd6c34355696dd32d Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Tue, 19 Nov 2024 08:22:51 +0100 Subject: [PATCH 117/170] [AndersenAgnostic] add flags for soundly handling pointer smuggling through loads and stores of scalar values (#664) Adds flags for soundly handling any potential pointer smuggling through loading or storing pointers as scalars Also includes some small changes: - Add counts of the different types of PointerObject to the statistics - Add `#include ` to a file because I needed that to compile it. - Fix bug where OnlineCD would never be combined with DP or PIP. - Add environment variable option for using a specific config. --------- Co-authored-by: Nico Reissmann --- jlm/llvm/opt/RvsdgTreePrinter.cpp | 1 + jlm/llvm/opt/alias-analyses/Andersen.cpp | 78 ++++++++--- jlm/llvm/opt/alias-analyses/Andersen.hpp | 7 + .../opt/alias-analyses/PointerObjectSet.cpp | 100 ++++++++++++-- .../opt/alias-analyses/PointerObjectSet.hpp | 47 ++++++- .../alias-analyses/TestPointerObjectSet.cpp | 124 +++++++++++++++++- 6 files changed, 322 insertions(+), 35 deletions(-) diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 87788f01c..68f3e4949 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -8,6 +8,7 @@ #include #include +#include #include namespace jlm::llvm diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 83a8179ba..847d880b2 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -104,7 +104,7 @@ Andersen::Configuration::GetAllConfigurations() PickHybridCycleDetection(config); config.EnableOnlineCycleDetection(true); // OnlineCD can not be combined with HybridCD or LazyCD - configs.push_back(config); + PickDifferencePropagation(config); }; auto PickWorklistPolicy = [&](Configuration config) { @@ -160,13 +160,19 @@ class Andersen::Statistics final : public util::Statistics // A PointerObject of Register kind can represent multiple outputs in RVSDG. Sum them up. static constexpr const char * NumRegistersMappedToPointerObject_ = "#RegistersMappedToPointerObject"; + static constexpr const char * NumAllocaPointerObjects = "#AllocaPointerObjects"; + static constexpr const char * NumMallocPointerObjects = "#MallocPointerObjects"; + static constexpr const char * NumGlobalPointerObjects = "#GlobalPointerObjects"; + static constexpr const char * NumFunctionPointerObjects = "#FunctionPointerObjects"; + static constexpr const char * NumImportPointerObjects = "#ImportPointerObjects"; static constexpr const char * NumBaseConstraints_ = "#BaseConstraints"; static constexpr const char * NumSupersetConstraints_ = "#SupersetConstraints"; static constexpr const char * NumStoreConstraints_ = "#StoreConstraints"; static constexpr const char * NumLoadConstraints_ = "#LoadConstraints"; static constexpr const char * NumFunctionCallConstraints_ = "#FunctionCallConstraints"; - static constexpr const char * NumFlagConstraints_ = "#FlagConstraints"; + static constexpr const char * NumScalarFlagConstraints_ = "#ScalarFlagConstraints"; + static constexpr const char * NumOtherFlagConstraints_ = "#OtherFlagConstraints"; static constexpr const char * Configuration_ = "Configuration"; @@ -285,12 +291,30 @@ class Andersen::Statistics final : public util::Statistics { GetTimer(SetAndConstraintBuildingTimer_).stop(); + // Measure the number of pointer objects of different kinds AddMeasurement(NumPointerObjects_, set.NumPointerObjects()); AddMeasurement(NumMemoryPointerObjects_, set.NumMemoryPointerObjects()); AddMeasurement(NumMemoryPointerObjectsCanPoint_, set.NumMemoryPointerObjectsCanPoint()); AddMeasurement(NumRegisterPointerObjects_, set.NumRegisterPointerObjects()); AddMeasurement(NumRegistersMappedToPointerObject_, set.GetRegisterMap().size()); + AddMeasurement( + NumAllocaPointerObjects, + set.NumPointerObjectsOfKind(PointerObjectKind::AllocaMemoryObject)); + AddMeasurement( + NumMallocPointerObjects, + set.NumPointerObjectsOfKind(PointerObjectKind::MallocMemoryObject)); + AddMeasurement( + NumGlobalPointerObjects, + set.NumPointerObjectsOfKind(PointerObjectKind::GlobalMemoryObject)); + AddMeasurement( + NumFunctionPointerObjects, + set.NumPointerObjectsOfKind(PointerObjectKind::FunctionMemoryObject)); + AddMeasurement( + NumImportPointerObjects, + set.NumPointerObjectsOfKind(PointerObjectKind::ImportMemoryObject)); + + // Count the number of constraints of different kinds size_t numSupersetConstraints = 0; size_t numStoreConstraints = 0; size_t numLoadConstraints = 0; @@ -307,7 +331,9 @@ class Andersen::Statistics final : public util::Statistics AddMeasurement(NumStoreConstraints_, numStoreConstraints); AddMeasurement(NumLoadConstraints_, numLoadConstraints); AddMeasurement(NumFunctionCallConstraints_, numFunctionCallConstraints); - AddMeasurement(NumFlagConstraints_, constraints.NumFlagConstraints()); + const auto [scalarFlags, otherFlags] = constraints.NumFlagConstraints(); + AddMeasurement(NumScalarFlagConstraints_, scalarFlags); + AddMeasurement(NumOtherFlagConstraints_, otherFlags); } void @@ -668,15 +694,15 @@ Andersen::AnalyzeLoad(const LoadNode & loadNode) const auto addressRegisterPO = Set_->GetRegisterPointerObject(addressRegister); - if (!IsOrContainsPointerType(outputRegister.type())) + if (IsOrContainsPointerType(outputRegister.type())) { - // TODO: When reading address as an integer, some of address' target might still pointers, - // which should now be considered as having escaped - return; + const auto outputRegisterPO = Set_->CreateRegisterPointerObject(outputRegister); + Constraints_->AddConstraint(LoadConstraint(outputRegisterPO, addressRegisterPO)); + } + else + { + Set_->MarkAsLoadingAsScalar(addressRegisterPO); } - - const auto outputRegisterPO = Set_->CreateRegisterPointerObject(outputRegister); - Constraints_->AddConstraint(LoadConstraint(outputRegisterPO, addressRegisterPO)); } void @@ -685,18 +711,18 @@ Andersen::AnalyzeStore(const StoreNode & storeNode) const auto & addressRegister = *storeNode.GetAddressInput().origin(); const auto & valueRegister = *storeNode.GetStoredValueInput().origin(); + const auto addressRegisterPO = Set_->GetRegisterPointerObject(addressRegister); + // If the written value is not a pointer, be conservative and mark the address - if (!IsOrContainsPointerType(valueRegister.type())) + if (IsOrContainsPointerType(valueRegister.type())) { - // TODO: We are writing an integer to *address, - // which really should mark all of address' targets as pointing to external - // in case they are ever read as pointers. - return; + const auto valueRegisterPO = Set_->GetRegisterPointerObject(valueRegister); + Constraints_->AddConstraint(StoreConstraint(addressRegisterPO, valueRegisterPO)); + } + else + { + Set_->MarkAsStoringAsScalar(addressRegisterPO); } - - const auto addressRegisterPO = Set_->GetRegisterPointerObject(addressRegister); - const auto valueRegisterPO = Set_->GetRegisterPointerObject(valueRegister); - Constraints_->AddConstraint(StoreConstraint(addressRegisterPO, valueRegisterPO)); } void @@ -1291,6 +1317,9 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis size_t testAllConfigsIterations = 0; if (auto testAllConfigsString = std::getenv(ENV_TEST_ALL_CONFIGS)) testAllConfigsIterations = std::stoi(testAllConfigsString); + std::optional useExactConfig; + if (auto useExactConfigString = std::getenv(ENV_USE_EXACT_CONFIG)) + useExactConfig = std::stoi(useExactConfigString); const bool doubleCheck = std::getenv(ENV_DOUBLE_CHECK); const bool dumpGraphs = std::getenv(ENV_DUMP_SUBSET_GRAPH); @@ -1307,13 +1336,20 @@ Andersen::Analyze(const RvsdgModule & module, util::StatisticsCollector & statis if (dumpGraphs) Constraints_->DrawSubsetGraph(writer); - SolveConstraints(*Constraints_, Config_, *statistics); + auto config = Config_; + if (useExactConfig.has_value()) + { + auto allConfigs = Configuration::GetAllConfigurations(); + config = allConfigs.at(*useExactConfig); + } + + SolveConstraints(*Constraints_, config, *statistics); statistics->AddStatisticsFromSolution(*Set_); if (dumpGraphs) { auto & graph = Constraints_->DrawSubsetGraph(writer); - graph.AppendToLabel("After Solving with " + Config_.ToString()); + graph.AppendToLabel("After Solving with " + config.ToString()); writer.OutputAllGraphs(std::cout, util::GraphOutputFormat::Dot); } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index bc59f805e..5cb70ea6c 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -34,6 +34,13 @@ class Andersen final : public AliasAnalysis */ static inline const char * const ENV_TEST_ALL_CONFIGS = "JLM_ANDERSEN_TEST_ALL_CONFIGS"; + /** + * Alternative to testing all configs, this environment variable specifies exactly which config to + * use. It must be an index into the Configuration::GetAllConfigurations() vector. + * Should likely not be combined with ENV_TEST_ALL_CONFIGS or ENV_DOUBLE_CHECK + */ + static inline const char * const ENV_USE_EXACT_CONFIG = "JLM_ANDERSEN_USE_EXACT_CONFIG"; + /** * Environment variable that will trigger double checking of the analysis. * If ENV_TEST_ALL_CONFIGS is set, the output is double checked against them all. diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index ab73d4f48..e07a2f749 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -301,6 +301,38 @@ PointerObjectSet::CanTrackPointeesImplicitly(PointerObjectIndex index) const noe return PointerObjects_[root].CanTrackPointeesImplicitly(); } +bool +PointerObjectSet::MarkAsStoringAsScalar(PointerObjectIndex index) +{ + auto root = GetUnificationRoot(index); + if (PointerObjects_[root].StoredAsScalar) + return false; + PointerObjects_[root].StoredAsScalar = true; + return true; +} + +[[nodiscard]] bool +PointerObjectSet::IsStoredAsScalar(PointerObjectIndex index) const noexcept +{ + return PointerObjects_[GetUnificationRoot(index)].StoredAsScalar; +} + +bool +PointerObjectSet::MarkAsLoadingAsScalar(PointerObjectIndex index) +{ + auto root = GetUnificationRoot(index); + if (PointerObjects_[root].LoadedAsScalar) + return false; + PointerObjects_[root].LoadedAsScalar = true; + return true; +} + +[[nodiscard]] bool +PointerObjectSet::IsLoadedAsScalar(PointerObjectIndex index) const noexcept +{ + return PointerObjects_[GetUnificationRoot(index)].LoadedAsScalar; +} + PointerObjectIndex PointerObjectSet::GetUnificationRoot(PointerObjectIndex index) const noexcept { @@ -351,6 +383,10 @@ PointerObjectSet::UnifyPointerObjects(PointerObjectIndex object1, PointerObjectI MarkAsPointingToExternal(newRoot); if (HasPointeesEscaping(oldRoot)) MarkAsPointeesEscaping(newRoot); + if (IsStoredAsScalar(oldRoot)) + MarkAsStoringAsScalar(newRoot); + if (IsLoadedAsScalar(oldRoot)) + MarkAsLoadingAsScalar(newRoot); // Perform the actual unification PointerObjectParents_[oldRoot] = newRoot; @@ -753,17 +789,38 @@ FunctionCallConstraint::ApplyDirectly(PointerObjectSet & set) bool EscapeFlagConstraint::PropagateEscapedFlagsDirectly(PointerObjectSet & set) { - std::queue pointeeEscapers; + bool modified = false; + + // First handle all unification roots marked as storing or loading scalars + for (PointerObjectIndex idx = 0; idx < set.NumPointerObjects(); idx++) + { + if (!set.IsUnificationRoot(idx)) + continue; + + if (set.IsStoredAsScalar(idx)) + { + for (auto pointee : set.GetPointsToSet(idx).Items()) + { + modified |= set.MarkAsPointingToExternal(pointee); + } + } + if (set.IsLoadedAsScalar(idx)) + { + for (auto pointee : set.GetPointsToSet(idx).Items()) + { + modified |= set.MarkAsPointeesEscaping(pointee); + } + } + } - // First add all unification roots marked as PointeesEscaping + std::queue pointeeEscapers; + // Add all unification roots marked as PointeesEscaping to the queue for (PointerObjectIndex idx = 0; idx < set.NumPointerObjects(); idx++) { if (set.IsUnificationRoot(idx) && set.HasPointeesEscaping(idx)) pointeeEscapers.push(idx); } - bool modified = false; - // For all pointee escapers, check if they point to any PointerObjects not marked as escaped while (!pointeeEscapers.empty()) { @@ -927,24 +984,29 @@ PointerObjectConstraintSet::NumBaseConstraints() const noexcept return numBaseConstraints; } -size_t +std::pair PointerObjectConstraintSet::NumFlagConstraints() const noexcept { - size_t numFlagConstraints = 0; + size_t numScalarFlagConstraints = 0; + size_t numOtherFlagConstraints = 0; for (PointerObjectIndex i = 0; i < Set_.NumPointerObjects(); i++) { if (Set_.HasEscaped(i)) - numFlagConstraints++; + numOtherFlagConstraints++; if (!Set_.IsUnificationRoot(i)) continue; if (Set_.IsPointingToExternal(i)) - numFlagConstraints++; + numOtherFlagConstraints++; if (Set_.HasPointeesEscaping(i)) - numFlagConstraints++; + numOtherFlagConstraints++; + if (Set_.IsStoredAsScalar(i)) + numScalarFlagConstraints++; + if (Set_.IsLoadedAsScalar(i)) + numScalarFlagConstraints++; } - return numFlagConstraints; + return { numScalarFlagConstraints, numOtherFlagConstraints }; } /** @@ -1946,6 +2008,15 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) MarkAsPointeesEscaping(value); } + // If node has the stored as scalar constraint, but does not make its pointees escape outright + if (Set_.IsStoredAsScalar(node) && !Set_.HasPointeesEscaping(node)) + { + for (const auto pointee : newPointees.Items()) + { + MarkAsPointsToExternal(pointee); + } + } + // Loads on the form value = *n. for (const auto value : loadConstraints[node].Items()) { @@ -1958,6 +2029,15 @@ PointerObjectConstraintSet::RunWorklistSolver(WorklistStatistics & statistics) MarkAsPointsToExternal(value); } + // If node has the loaded as scalar constraint, but does not make its pointees escape outright + if (Set_.IsLoadedAsScalar(node) && !Set_.HasPointeesEscaping(node)) + { + for (const auto pointee : newPointees.Items()) + { + MarkAsPointeesEscaping(pointee); + } + } + // Function calls on the form (*n)() for (const auto callNode : callConstraints[node].Items()) { diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index 09f7cabf1..0a52599b6 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -78,12 +78,22 @@ class PointerObjectSet final // This flag is implied by HasEscaped uint8_t PointsToExternal : 1; + // If set, any pointee of this object should point to external. + // The unification root is the source of truth for this flag! + uint8_t StoredAsScalar : 1; + + // If set, any pointee of this object should mark its pointees as escaping. + // The unification root is the source of truth for this flag! + uint8_t LoadedAsScalar : 1; + explicit PointerObject(PointerObjectKind kind, bool canPoint) : Kind(kind), CanPointFlag(canPoint), HasEscaped(0), PointeesEscaping(0), - PointsToExternal(0) + PointsToExternal(0), + StoredAsScalar(0), + LoadedAsScalar(0) { JLM_ASSERT(kind != PointerObjectKind::COUNT); @@ -386,6 +396,34 @@ class PointerObjectSet final [[nodiscard]] bool CanTrackPointeesImplicitly(PointerObjectIndex index) const noexcept; + /** + * Marks the PointerObject with the given \p index as holding the target of a scalar store. + * @return true if the flags was changed by this operation, false otherwise + */ + bool + MarkAsStoringAsScalar(PointerObjectIndex index); + + /** + * @return true if the PointerObject with the given \p index is the target of a scalar store, + * false otherwise. If it is, any pointee of \p index will be marked as pointing to external. + */ + [[nodiscard]] bool + IsStoredAsScalar(PointerObjectIndex index) const noexcept; + + /** + * Marks the PointerObject with the given \p index as holding the target of a scalar load. + * @return true if the flags was changed by this operation, false otherwise + */ + bool + MarkAsLoadingAsScalar(PointerObjectIndex index); + + /** + * @return true if the PointerObject with the given \p index is the target of a scalar load, false + * otherwise. If it is, any pointee of \p index will be marked as making its pointees escape. + */ + [[nodiscard]] bool + IsLoadedAsScalar(PointerObjectIndex index) const noexcept; + /** * @return the root in the unification the PointerObject with the given \p index belongs to. * PointerObjects that have not been unified will always be their own root. @@ -983,9 +1021,12 @@ class PointerObjectConstraintSet final NumBaseConstraints() const noexcept; /** - * @return the number of flag constraints, including memory objects that are not pointees. + * Gets the number of flag constraints, among all PointerObjetcs. + * Flags that are unified are only counted once (on the unification root). + * The count is divided into two: flags for loads/stores of scalars, and the other flags + * @return a pair (num flags on scalar operations, num other flags) */ - [[nodiscard]] size_t + [[nodiscard]] std::pair NumFlagConstraints() const noexcept; /** diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index aa2be6f9e..79319e385 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -494,13 +494,133 @@ TestEscapedFunctionConstraint() // Use both EscapedFunctionConstraint and EscapeFlagConstraint to propagate flags result = EscapedFunctionConstraint::PropagateEscapedFunctionsDirectly(set); - result &= EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(result); + // Now the return value is marked as all pointees escaping, so make that happen + result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); // Now the local function has been marked as escaped as well, since it is the return value assert(result); assert(set.HasEscaped(localFunctionPO)); } +static void +TestStoredAsScalarFlag() +{ + using namespace jlm::llvm::aa; + + jlm::tests::NAllocaNodesTest rvsdg(3); + rvsdg.InitializeTest(); + + PointerObjectSet set; + const auto p0 = set.CreateDummyRegisterPointerObject(); + const auto p1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); + const auto p11 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); + const auto p2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); + + set.AddToPointsToSet(p0, p1); + set.AddToPointsToSet(p1, p11); + set.AddToPointsToSet(p0, p2); + + bool result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(!result); + + set.MarkAsStoringAsScalar(p0); + result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(result); + + // p0 should only have the single stored as scalar flag + assert(!set.HasEscaped(p0)); + assert(!set.HasPointeesEscaping(p0)); + assert(!set.IsPointingToExternal(p0)); + assert(!set.IsLoadedAsScalar(p0)); + assert(set.IsStoredAsScalar(p0)); + + // p1 and p2 should both point to external, but not any other flags + assert(!set.HasEscaped(p1)); + assert(!set.HasPointeesEscaping(p1)); + assert(set.IsPointingToExternal(p1)); + assert(!set.IsLoadedAsScalar(p1)); + assert(!set.IsStoredAsScalar(p1)); + + assert(!set.HasEscaped(p2)); + assert(!set.HasPointeesEscaping(p2)); + assert(set.IsPointingToExternal(p2)); + assert(!set.IsLoadedAsScalar(p2)); + assert(!set.IsStoredAsScalar(p2)); + + // p11 should have no flags + assert(!set.HasEscaped(p11)); + assert(!set.HasPointeesEscaping(p11)); + assert(!set.IsPointingToExternal(p11)); + assert(!set.IsLoadedAsScalar(p11)); + assert(!set.IsStoredAsScalar(p11)); + + // Applying again does nothing + result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(!result); +} + +static void +TestLoadedAsScalarFlag() +{ + using namespace jlm::llvm::aa; + + jlm::tests::NAllocaNodesTest rvsdg(5); + rvsdg.InitializeTest(); + + PointerObjectSet set; + const auto p0 = set.CreateDummyRegisterPointerObject(); + const auto p1 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(0), true); + const auto p11 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(1), true); + const auto p12 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(2), true); + const auto p2 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(3), true); + const auto p21 = set.CreateAllocaMemoryObject(rvsdg.GetAllocaNode(4), true); + + set.AddToPointsToSet(p0, p1); + set.AddToPointsToSet(p1, p11); + set.AddToPointsToSet(p1, p12); + set.AddToPointsToSet(p0, p2); + set.AddToPointsToSet(p2, p21); + + bool result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(!result); + + set.MarkAsLoadingAsScalar(p0); + result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(result); + + // p0 should only have the single loaded as scalar flag + assert(!set.HasEscaped(p0)); + assert(!set.HasPointeesEscaping(p0)); + assert(!set.IsPointingToExternal(p0)); + assert(!set.IsStoredAsScalar(p0)); + assert(set.IsLoadedAsScalar(p0)); + + // p1 should only have the pointees escape flag + assert(!set.HasEscaped(p1)); + assert(set.HasPointeesEscaping(p1)); + assert(!set.IsPointingToExternal(p1)); + assert(!set.IsStoredAsScalar(p1)); + assert(!set.IsLoadedAsScalar(p1)); + + // p11, p12, p21 should have escaped, but not be flagged using the store or load flags + assert(set.HasEscaped(p11)); + assert(!set.IsLoadedAsScalar(p11)); + assert(!set.IsStoredAsScalar(p11)); + + assert(set.HasEscaped(p12)); + assert(!set.IsLoadedAsScalar(p12)); + assert(!set.IsStoredAsScalar(p12)); + + assert(set.HasEscaped(p21)); + assert(!set.IsLoadedAsScalar(p21)); + assert(!set.IsStoredAsScalar(p21)); + + // Applying again does nothing + result = EscapeFlagConstraint::PropagateEscapedFlagsDirectly(set); + assert(!result); +} + static void TestFunctionCallConstraint() { @@ -878,6 +998,8 @@ TestPointerObjectSet() TestStoreConstraintDirectly(); TestLoadConstraintDirectly(); TestEscapedFunctionConstraint(); + TestStoredAsScalarFlag(); + TestLoadedAsScalarFlag(); TestFunctionCallConstraint(); TestAddPointsToExternalConstraint(); TestAddRegisterContentEscapedConstraint(); From 40d718bae9477b2812a40506e15da96a6fe7f813 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 20 Nov 2024 18:32:33 +0100 Subject: [PATCH 118/170] Privatize nodes attribute in Region class (#666) --- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 4 +- .../rvsdg2rhls/DeadNodeElimination.cpp | 2 +- .../backend/rvsdg2rhls/GammaConversion.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/check-rhls.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp | 4 +- jlm/llvm/ir/operators/Phi.cpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/RvsdgTreePrinter.cpp | 4 +- .../RegionAwareMemoryNodeProvider.cpp | 4 +- .../TopDownMemoryNodeEliminator.cpp | 4 +- jlm/llvm/opt/inversion.cpp | 2 +- jlm/rvsdg/node.cpp | 6 ++- jlm/rvsdg/region.cpp | 37 ++++++++++++++----- jlm/rvsdg/region.hpp | 35 +++++++++++++++--- jlm/rvsdg/structural-node.hpp | 2 +- jlm/rvsdg/view.cpp | 4 +- .../rvsdg2rhls/MemoryConverterTests.cpp | 6 +-- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 6 +-- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 2 +- .../llvm/ThreeAddressCodeConversionTests.cpp | 4 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 6 +-- .../alias-analyses/TestMemoryStateEncoder.cpp | 4 +- .../opt/alias-analyses/TestPointsToGraph.cpp | 2 +- tests/jlm/llvm/opt/test-push.cpp | 4 +- tests/jlm/llvm/opt/test-unroll.cpp | 2 +- tests/jlm/mlir/TestJlmToMlirToJlm.cpp | 2 +- .../mlir/frontend/TestMlirToJlmConverter.cpp | 14 +++---- tests/jlm/rvsdg/RegionTests.cpp | 4 +- tests/jlm/rvsdg/test-graph.cpp | 4 +- 35 files changed, 119 insertions(+), 73 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index aba7e75f7..21434994a 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -139,7 +139,7 @@ BaseHLS::JlmSize(const jlm::rvsdg::Type * type) void BaseHLS::create_node_names(rvsdg::Region * r) { - for (auto & node : r->nodes) + for (auto & node : r->Nodes()) { if (dynamic_cast(&node)) { @@ -161,7 +161,7 @@ const jlm::llvm::lambda::node * BaseHLS::get_hls_lambda(llvm::RvsdgModule & rm) { auto region = rm.Rvsdg().root(); - auto ln = dynamic_cast(region->nodes.begin().ptr()); + auto ln = dynamic_cast(region->Nodes().begin().ptr()); if (region->nnodes() == 1 && ln) { return ln; diff --git a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp index 560b84e7f..e754aa0dc 100644 --- a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp +++ b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp @@ -120,7 +120,7 @@ EliminateDeadNodes(llvm::RvsdgModule & rvsdgModule) throw util::error("Root should have only one node now"); } - auto lambdaNode = dynamic_cast(rootRegion.nodes.begin().ptr()); + auto lambdaNode = dynamic_cast(rootRegion.Nodes().begin().ptr()); if (!lambdaNode) { throw util::error("Node needs to be a lambda"); diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 6df9bcfa4..6f145ff4f 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -105,7 +105,7 @@ CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) for (size_t i = 0; i < gammaNode.nsubregions(); ++i) { - for (auto & node : gammaNode.subregion(i)->nodes) + for (auto & node : gammaNode.subregion(i)->Nodes()) { if (rvsdg::is(&node) || rvsdg::is(&node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp index a0d2d4992..48edb9ded 100644 --- a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp @@ -47,11 +47,11 @@ check_rhls(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); auto root = graph.root(); - if (root->nodes.size() != 1) + if (root->nnodes() != 1) { throw jlm::util::error("Root should have only one node now"); } - auto ln = dynamic_cast(root->nodes.begin().ptr()); + auto ln = dynamic_cast(root->Nodes().begin().ptr()); if (!ln) { throw jlm::util::error("Node needs to be a lambda"); diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 8d072be98..d647f5a1a 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -466,7 +466,7 @@ process_loopnode(loop_node * loopNode) void dae_conv(rvsdg::Region * region) { - auto lambda = dynamic_cast(region->nodes.begin().ptr()); + auto lambda = dynamic_cast(region->Nodes().begin().ptr()); bool changed; do { diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 020c6071a..5f219b7c1 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -64,7 +64,7 @@ instrument_ref(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); auto root = graph.root(); - auto lambda = dynamic_cast(root->nodes.begin().ptr()); + auto lambda = dynamic_cast(root->Nodes().begin().ptr()); auto newLambda = change_function_name(lambda, "instrumented_ref"); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index b5491e188..7ea04c483 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -565,7 +565,7 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) // auto root = rm.Rvsdg().root(); - auto lambda = dynamic_cast(root->nodes.begin().ptr()); + auto lambda = dynamic_cast(root->Nodes().begin().ptr()); // // Converting loads and stores to explicitly use memory ports diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index 2e64bf0f5..a8a087e36 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -522,7 +522,7 @@ process_loops(jlm::rvsdg::output * state_edge) void jlm::hls::mem_queue(jlm::rvsdg::Region * region) { - auto lambda = dynamic_cast(region->nodes.first()); + auto lambda = dynamic_cast(region->Nodes().begin().ptr()); auto state_arg = GetMemoryStateArgument(*lambda); if (!state_arg) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 60a51320e..0ef624bc8 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -133,7 +133,7 @@ route_through(rvsdg::Region * target, jlm::rvsdg::output * response) void mem_sep_independent(rvsdg::Region * region) { - auto lambda = dynamic_cast(region->nodes.begin().ptr()); + auto lambda = dynamic_cast(region->Nodes().begin().ptr()); auto lambda_region = lambda->subregion(); auto state_arg = GetMemoryStateArgument(*lambda); if (!state_arg) @@ -277,7 +277,7 @@ trace_edge( void mem_sep_argument(rvsdg::Region * region) { - auto lambda = dynamic_cast(region->nodes.begin().ptr()); + auto lambda = dynamic_cast(region->Nodes().begin().ptr()); auto lambda_region = lambda->subregion(); auto state_arg = GetMemoryStateArgument(*lambda); if (!state_arg) diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp index 06b42857c..21d942d93 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp @@ -326,11 +326,11 @@ dne(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); auto root = graph.root(); - if (root->nodes.size() != 1) + if (root->nnodes() != 1) { throw util::error("Root should have only one node now"); } - auto ln = dynamic_cast(root->nodes.begin().ptr()); + auto ln = dynamic_cast(root->Nodes().begin().ptr()); if (!ln) { throw util::error("Node needs to be a lambda"); diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 789b034d9..316fa1c34 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -100,7 +100,7 @@ node::ExtractLambdaNodes(const phi::node & phiNode) std::function &)> extractLambdaNodes = [&](auto & phiNode, auto & lambdaNodes) { - for (auto & node : phiNode.subregion()->nodes) + for (auto & node : phiNode.subregion()->Nodes()) { if (auto lambdaNode = dynamic_cast(&node)) { diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 40fb6cb82..7a7921bd1 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -314,7 +314,7 @@ DeadNodeElimination::SweepRegion(rvsdg::Region & region) const region.prune(false); std::vector> nodesTopDown(region.nnodes()); - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { nodesTopDown[node.depth()].push_back(&node); } diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index c1f790061..0ec6385f0 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -101,7 +101,7 @@ InvariantValueRedirection::RedirectInRegion(rvsdg::Region & region) // We do not need a traverser here and can just iterate through all the nodes of a region as // it is irrelevant in which order we handle the nodes. - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (auto gammaNode = dynamic_cast(&node)) { diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 68f3e4949..4f1729345 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -95,7 +95,7 @@ RvsdgTreePrinter::AnnotateNumRvsdgNodes( std::function annotateRegion = [&](const rvsdg::Region & region) { - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (auto structuralNode = dynamic_cast(&node)) { @@ -141,7 +141,7 @@ RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( std::count_if(resultRange.begin(), resultRange.end(), IsMemoryStateInput); annotationMap.AddAnnotation(®ion, { resultLabel, numMemoryStateResults }); - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (auto structuralNode = dynamic_cast(&node)) { diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index e52b940d0..dab9ea611 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -675,7 +675,7 @@ RegionAwareMemoryNodeProvider::AnnotateRegion(rvsdg::Region & region) Provisioning_->AddRegionSummary(RegionSummary::Create(region)); } - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (auto structuralNode = dynamic_cast(&node)) { @@ -1044,7 +1044,7 @@ RegionAwareMemoryNodeProvider::ToRegionTree( subtree += util::strfmt(indent(depth), "MemoryNodes: ", toString(memoryNodes), "\n"); } - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (auto structuralNode = dynamic_cast(&node)) { diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 0691e3d4e..96bcd42ce 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -618,7 +618,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownPhi(const phi::node & phiNode) { std::vector lambdaNodes; util::HashSet liveNodes; - for (auto & node : phiSubregion.nodes) + for (auto & node : phiSubregion.Nodes()) { if (auto lambdaNode = dynamic_cast(&node)) { @@ -924,7 +924,7 @@ TopDownMemoryNodeEliminator::CheckInvariants( std::vector & regions, std::vector & callNodes) { - for (auto & node : rootRegion.nodes) + for (auto & node : rootRegion.Nodes()) { if (auto lambdaNode = dynamic_cast(&node)) { diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 5e2185c71..b582f4dc5 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -98,7 +98,7 @@ collect_condition_nodes(rvsdg::StructuralNode * tnode, jlm::rvsdg::StructuralNod JLM_ASSERT(gnode->region()->node() == tnode); std::vector> nodes; - for (auto & node : tnode->subregion(0)->nodes) + for (auto & node : tnode->subregion(0)->Nodes()) { if (&node == gnode) continue; diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 70b33135c..d922dcb47 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -198,7 +198,8 @@ node::node(std::unique_ptr op, rvsdg::Region * region) JLM_ASSERT(wasAdded); wasAdded = region->AddTopNode(*this); JLM_ASSERT(wasAdded); - region->nodes.push_back(this); + wasAdded = region->AddNode(*this); + JLM_ASSERT(wasAdded); } node::~node() @@ -214,7 +215,8 @@ node::~node() } inputs_.clear(); - region()->nodes.erase(this); + wasRemoved = region()->RemoveNode(*this); + JLM_ASSERT(wasRemoved); } node_input * diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 1ed5deb48..dd294fd09 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -129,7 +129,7 @@ Region::~Region() noexcept RemoveResult(results_.size() - 1); prune(false); - JLM_ASSERT(nodes.empty()); + JLM_ASSERT(nnodes() == 0); JLM_ASSERT(NumTopNodes() == 0); JLM_ASSERT(NumBottomNodes() == 0); @@ -255,6 +255,17 @@ Region::AddBottomNode(rvsdg::node & node) return true; } +bool +Region::AddNode(rvsdg::node & node) +{ + if (node.region() != this) + return false; + + Nodes_.push_back(&node); + + return true; +} + bool Region::RemoveBottomNode(rvsdg::node & node) { @@ -271,6 +282,14 @@ Region::RemoveTopNode(rvsdg::node & node) return numTopNodes != NumTopNodes(); } +bool +Region::RemoveNode(rvsdg::node & node) +{ + auto numNodes = nnodes(); + Nodes_.erase(&node); + return numNodes != nnodes(); +} + void Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool copy_results) const { @@ -278,7 +297,7 @@ Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool // order nodes top-down std::vector> context(nnodes()); - for (const auto & node : nodes) + for (const auto & node : Nodes()) { JLM_ASSERT(node.depth() < context.size()); context[node.depth()].push_back(&node); @@ -327,7 +346,7 @@ Region::prune(bool recursive) if (!recursive) return; - for (const auto & node : nodes) + for (const auto & node : Nodes()) { if (auto snode = dynamic_cast(&node)) { @@ -363,7 +382,7 @@ size_t Region::NumRegions(const rvsdg::Region & region) noexcept { size_t numRegions = 1; - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (auto structuralNode = dynamic_cast(&node)) { @@ -417,7 +436,7 @@ Region::ToTree( // Convert the region's structural nodes with their subregions to a string indentationDepth++; indentationString = std::string(indentationDepth, indentationChar); - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (auto structuralNode = dynamic_cast(&node)) { @@ -499,7 +518,7 @@ size_t nnodes(const jlm::rvsdg::Region * region) noexcept { size_t n = region->nnodes(); - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (auto snode = dynamic_cast(&node)) { @@ -515,7 +534,7 @@ size_t nstructnodes(const rvsdg::Region * region) noexcept { size_t n = 0; - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (auto snode = dynamic_cast(&node)) { @@ -532,7 +551,7 @@ size_t nsimpnodes(const rvsdg::Region * region) noexcept { size_t n = 0; - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (auto snode = dynamic_cast(&node)) { @@ -552,7 +571,7 @@ size_t ninputs(const rvsdg::Region * region) noexcept { size_t n = region->nresults(); - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (auto snode = dynamic_cast(&node)) { diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 0050c40d9..60fa58703 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -320,7 +320,7 @@ class Region [[nodiscard]] NodeRange Nodes() noexcept { - return { nodes.begin(), nodes.end() }; + return { Nodes_.begin(), Nodes_.end() }; } /** @@ -329,7 +329,7 @@ class Region [[nodiscard]] NodeConstRange Nodes() const noexcept { - return { nodes.begin(), nodes.end() }; + return { Nodes_.begin(), Nodes_.end() }; } /** @@ -510,7 +510,7 @@ class Region inline size_t nnodes() const noexcept { - return nodes.size(); + return Nodes_.size(); } /** @@ -566,6 +566,20 @@ class Region bool AddBottomNode(rvsdg::node & node); + /** + * \brief Adds \p node to the region. + * + * The node \p node is only added to this region, iff \p node belongs to the same region instance. + * + * @param node The node that is added. + * @return True, if \p node was added, otherwise false. + * + * @note This method is automatically invoked when a node is created. There is no need to invoke + * it manually. + */ + bool + AddNode(rvsdg::node & node); + /** * Removes \p node from the top nodes in the region. * @@ -590,6 +604,18 @@ class Region bool RemoveBottomNode(rvsdg::node & node); + /** + * Remove \p node from the region. + * + * @param node The node that is removed. + * @return True, if \p node was removed, otherwise false. + * + * @note This method is automatically invoked when a node is deleted. There is no need to invoke + * it manually. + */ + bool + RemoveNode(rvsdg::node & node); + /** \brief Copy a region with substitutions \param target Target region to create nodes in @@ -685,8 +711,6 @@ class Region [[nodiscard]] static std::string ToTree(const rvsdg::Region & region) noexcept; - region_nodes_list nodes; - private: static void ToTree( @@ -718,6 +742,7 @@ class Region std::vector arguments_; region_bottom_node_list BottomNodes_; region_top_node_list TopNodes_; + region_nodes_list Nodes_; }; static inline void diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 21496de4c..93164b18c 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -148,7 +148,7 @@ template bool Region::Contains(const rvsdg::Region & region, bool checkSubregions) { - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (is(&node)) { diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 6bcba5568..3a129e8e8 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -95,7 +95,7 @@ region_body( std::unordered_map & map) { std::vector> context; - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (node.depth() >= context.size()) context.resize(node.depth() + 1); @@ -343,7 +343,7 @@ convert_region(const rvsdg::Region * region) for (size_t n = 0; n < region->narguments(); n++) s += argument_tag(id(region->argument(n))); - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) s += convert_node(&node); for (size_t n = 0; n < region->nresults(); n++) diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index e421d6f49..1e9fd4473 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -117,7 +117,7 @@ TestLoad() // Memory Converter replaces the lambda so we start from the root of the graph auto region = rvsdgModule->Rvsdg().root(); assert(region->nnodes() == 1); - lambda = jlm::util::AssertedCast(region->nodes.first()); + lambda = jlm::util::AssertedCast(region->Nodes().begin().ptr()); // Assert auto lambdaRegion = lambda->subregion(); @@ -203,7 +203,7 @@ TestLoadStore() // Memory Converter replaces the lambda so we start from the root of the graph auto region = rvsdgModule->Rvsdg().root(); assert(region->nnodes() == 1); - lambda = jlm::util::AssertedCast(region->nodes.first()); + lambda = jlm::util::AssertedCast(region->Nodes().begin().ptr()); // Assert auto lambdaRegion = lambda->subregion(); @@ -343,7 +343,7 @@ TestThetaLoad() // Memory Converter replaces the lambda so we start from the root of the graph auto region = rvsdgModule->Rvsdg().root(); assert(region->nnodes() == 1); - lambda = jlm::util::AssertedCast(region->nodes.first()); + lambda = jlm::util::AssertedCast(region->Nodes().begin().ptr()); lambdaRegion = lambda->subregion(); assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 0fc973e3f..2b1997010 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -58,12 +58,12 @@ TestFork() { auto omegaRegion = rm.Rvsdg().root(); assert(omegaRegion->nnodes() == 1); - auto lambda = util::AssertedCast(omegaRegion->nodes.first()); + auto lambda = util::AssertedCast(omegaRegion->Nodes().begin().ptr()); assert(is(lambda)); auto lambdaRegion = lambda->subregion(); assert(lambdaRegion->nnodes() == 1); - auto loop = util::AssertedCast(lambdaRegion->nodes.first()); + auto loop = util::AssertedCast(lambdaRegion->Nodes().begin().ptr()); assert(is(loop)); // Traverse the rvsgd graph upwards to check connections @@ -124,7 +124,7 @@ TestConstantFork() { auto omegaRegion = rm.Rvsdg().root(); assert(omegaRegion->nnodes() == 1); - auto lambda = util::AssertedCast(omegaRegion->nodes.first()); + auto lambda = util::AssertedCast(omegaRegion->Nodes().begin().ptr()); assert(is(lambda)); auto lambdaRegion = lambda->subregion(); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 0555b039e..af5602049 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -159,7 +159,7 @@ TestLambda() // Assert assert(rvsdg.root()->nnodes() == 1); - auto & newLambdaNode = dynamic_cast(*rvsdg.root()->nodes.begin()); + auto & newLambdaNode = dynamic_cast(*rvsdg.root()->Nodes().begin()); assert(newLambdaNode.ninputs() == 2); assert(newLambdaNode.subregion()->narguments() == 3); assert(newLambdaNode.subregion()->nresults() == 2); diff --git a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp index f0bd91788..03a48d629 100644 --- a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp @@ -99,7 +99,7 @@ LoadVolatileConversion() auto lambdaOutput = rvsdgModule->Rvsdg().root()->result(0)->origin(); auto lambda = dynamic_cast(jlm::rvsdg::output::GetNode(*lambdaOutput)); - auto loadVolatileNode = lambda->subregion()->nodes.first(); + auto loadVolatileNode = lambda->subregion()->Nodes().begin().ptr(); assert(dynamic_cast(loadVolatileNode)); return 0; @@ -128,7 +128,7 @@ StoreVolatileConversion() auto lambdaOutput = rvsdgModule->Rvsdg().root()->result(0)->origin(); auto lambda = dynamic_cast(jlm::rvsdg::output::GetNode(*lambdaOutput)); - auto storeVolatileNode = lambda->subregion()->nodes.first(); + auto storeVolatileNode = lambda->subregion()->Nodes().begin().ptr(); assert(dynamic_cast(storeVolatileNode)); return 0; diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 52d98bbb4..bd7d92f8f 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -78,7 +78,7 @@ TestGamma() // jlm::rvsdg::view(graph.root(), stdout); assert(gamma->noutputs() == 2); - assert(gamma->subregion(1)->nodes.empty()); + assert(gamma->subregion(1)->nnodes() == 0); assert(gamma->subregion(1)->narguments() == 2); assert(gamma->ninputs() == 3); assert(graph.root()->narguments() == 2); @@ -153,7 +153,7 @@ TestTheta() // jlm::rvsdg::view(graph.root(), stdout); assert(theta->noutputs() == 3); - assert(theta->subregion()->nodes.size() == 1); + assert(theta->subregion()->nnodes() == 1); assert(graph.root()->narguments() == 2); } @@ -270,7 +270,7 @@ TestLambda() RunDeadNodeElimination(rm); // jlm::rvsdg::view(graph.root(), stdout); - assert(lambda->subregion()->nodes.empty()); + assert(lambda->subregion()->nnodes() == 0); assert(graph.root()->narguments() == 1); } diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 9324b6ca0..62edbbeb7 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -1253,7 +1253,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(is(*callXExitSplit, 1, 13)); jlm::rvsdg::node * undefNode = nullptr; - for (auto & node : test.GetLambdaTest().subregion()->nodes) + for (auto & node : test.GetLambdaTest().subregion()->Nodes()) { if (is(&node)) { @@ -1288,7 +1288,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(is(*callXExitSplit, 1, 13)); jlm::rvsdg::node * undefNode = nullptr; - for (auto & node : test.GetLambdaTest2().subregion()->nodes) + for (auto & node : test.GetLambdaTest2().subregion()->Nodes()) { if (is(&node)) { diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp index 9418aca9f..d72239fe5 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp @@ -49,7 +49,7 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis { using namespace jlm::llvm; - for (auto & node : region.nodes) + for (auto & node : region.Nodes()) { if (jlm::rvsdg::is(&node)) { diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 97f319471..b85652c05 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -51,7 +51,7 @@ test_gamma() pushout.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(graph.root()->nodes.size() == 3); + assert(graph.root()->nnodes() == 3); } static inline void @@ -99,7 +99,7 @@ test_theta() pushout.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(graph.root()->nodes.size() == 3); + assert(graph.root()->nnodes() == 3); } static inline void diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index d0f0ca652..b9561bd46 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -24,7 +24,7 @@ static size_t nthetas(jlm::rvsdg::Region * region) { size_t n = 0; - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (jlm::rvsdg::is(&node)) n++; diff --git a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp index 4517d1aba..02495ac5c 100644 --- a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp +++ b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp @@ -57,7 +57,7 @@ TestUndef() // Get the undef op auto convertedUndef = - dynamic_cast(®ion->nodes.first()->operation()); + dynamic_cast(®ion->Nodes().begin()->operation()); assert(convertedUndef != nullptr); diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 174040d57..8cb826ca7 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -115,11 +115,11 @@ TestLambda() assert(region->nnodes() == 1); auto convertedLambda = - jlm::util::AssertedCast(region->nodes.first()); + jlm::util::AssertedCast(region->Nodes().begin().ptr()); assert(is(convertedLambda)); assert(convertedLambda->subregion()->nnodes() == 1); - assert(is(convertedLambda->subregion()->nodes.first())); + assert(is(convertedLambda->subregion()->Nodes().begin().ptr())); } } return 0; @@ -268,7 +268,7 @@ TestDivOperation() // Get the lambda block auto convertedLambda = - jlm::util::AssertedCast(region->nodes.first()); + jlm::util::AssertedCast(region->Nodes().begin().ptr()); assert(is(convertedLambda)); // 2 Constants + 1 DivUIOp @@ -446,7 +446,7 @@ TestCompZeroExt() // Get the lambda block auto convertedLambda = - jlm::util::AssertedCast(region->nodes.first()); + jlm::util::AssertedCast(region->Nodes().begin().ptr()); assert(is(convertedLambda)); // 2 Constants + AddOp + CompOp + ZeroExtOp @@ -659,7 +659,7 @@ TestMatchOp() // Get the lambda block auto convertedLambda = - jlm::util::AssertedCast(region->nodes.first()); + jlm::util::AssertedCast(region->Nodes().begin().ptr()); assert(is(convertedLambda)); auto lambdaRegion = convertedLambda->subregion(); @@ -832,7 +832,7 @@ TestGammaOp() // Get the lambda block auto convertedLambda = - jlm::util::AssertedCast(region->nodes.first()); + jlm::util::AssertedCast(region->Nodes().begin().ptr()); assert(is(convertedLambda)); auto lambdaRegion = convertedLambda->subregion(); @@ -981,7 +981,7 @@ TestThetaOp() // Get the lambda block auto convertedLambda = - jlm::util::AssertedCast(region->nodes.first()); + jlm::util::AssertedCast(region->Nodes().begin().ptr()); assert(is(convertedLambda)); auto lambdaRegion = convertedLambda->subregion(); diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 4677f8417..4a6b80fba 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -357,7 +357,7 @@ ToTree_EmptyRvsdgWithAnnotations() graph rvsdg; AnnotationMap annotationMap; - annotationMap.AddAnnotation(rvsdg.root(), Annotation("NumNodes", rvsdg.root()->nodes.size())); + annotationMap.AddAnnotation(rvsdg.root(), Annotation("NumNodes", rvsdg.root()->nnodes())); // Act auto tree = Region::ToTree(*rvsdg.root(), annotationMap); @@ -418,7 +418,7 @@ ToTree_RvsdgWithStructuralNodesAndAnnotations() auto subregion2 = structuralNode2->subregion(2); AnnotationMap annotationMap; - annotationMap.AddAnnotation(subregion2, Annotation("NumNodes", subregion2->nodes.size())); + annotationMap.AddAnnotation(subregion2, Annotation("NumNodes", subregion2->nnodes())); annotationMap.AddAnnotation(subregion2, Annotation("NumArguments", subregion2->narguments())); // Act diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index ab35f8ad4..9fb5dab5d 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -16,7 +16,7 @@ static bool region_contains_node(const jlm::rvsdg::Region * region, const jlm::rvsdg::node * n) { - for (const auto & node : region->nodes) + for (const auto & node : region->Nodes()) { if (&node == n) return true; @@ -160,7 +160,7 @@ Copy() assert(is(copiedArgument)); assert(newGraph->root()->nnodes() == 1); - auto copiedNode = newGraph->root()->nodes.first(); + auto copiedNode = newGraph->root()->Nodes().begin().ptr(); assert(copiedNode->ninputs() == 1 && copiedNode->noutputs() == 1); assert(copiedNode->input(0)->origin() == copiedArgument); From d5d4f699e339136e93bc1e229074a65007c93c8c Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 25 Nov 2024 09:00:55 +0100 Subject: [PATCH 119/170] Rename graph class to Graph (#667) --- jlm/hls/opt/cne.cpp | 4 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 6 +- .../InterProceduralGraphConversion.cpp | 4 +- jlm/llvm/ir/RvsdgModule.hpp | 4 +- jlm/llvm/ir/operators/Load.cpp | 4 +- jlm/llvm/ir/operators/Load.hpp | 4 +- jlm/llvm/ir/operators/Store.cpp | 4 +- jlm/llvm/ir/operators/Store.hpp | 4 +- jlm/llvm/opt/DeadNodeElimination.cpp | 6 +- jlm/llvm/opt/DeadNodeElimination.hpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/InvariantValueRedirection.hpp | 2 +- jlm/llvm/opt/OptimizationSequence.cpp | 4 +- jlm/llvm/opt/RvsdgTreePrinter.cpp | 6 +- jlm/llvm/opt/RvsdgTreePrinter.hpp | 8 +-- jlm/llvm/opt/alias-analyses/Andersen.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.hpp | 2 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 2 +- .../RegionAwareMemoryNodeProvider.cpp | 4 +- .../RegionAwareMemoryNodeProvider.hpp | 2 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 8 +-- jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 6 +- .../TopDownMemoryNodeEliminator.cpp | 4 +- jlm/llvm/opt/cne.cpp | 4 +- jlm/llvm/opt/inlining.cpp | 6 +- jlm/llvm/opt/inversion.cpp | 4 +- jlm/llvm/opt/pull.cpp | 4 +- jlm/llvm/opt/push.cpp | 4 +- jlm/llvm/opt/reduction.cpp | 16 ++--- jlm/llvm/opt/unroll.cpp | 4 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 2 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 2 +- jlm/rvsdg/RvsdgModule.hpp | 6 +- jlm/rvsdg/binary.cpp | 8 +-- jlm/rvsdg/binary.hpp | 10 +-- jlm/rvsdg/bitstring/concat.cpp | 4 +- jlm/rvsdg/gamma.cpp | 4 +- jlm/rvsdg/gamma.hpp | 4 +- jlm/rvsdg/graph.cpp | 19 +++--- jlm/rvsdg/graph.hpp | 12 ++-- jlm/rvsdg/node-normal-form.cpp | 6 +- jlm/rvsdg/node-normal-form.hpp | 12 ++-- jlm/rvsdg/node.cpp | 2 +- jlm/rvsdg/node.hpp | 6 +- jlm/rvsdg/nullary.cpp | 4 +- jlm/rvsdg/operation.cpp | 6 +- jlm/rvsdg/operation.hpp | 8 +-- jlm/rvsdg/region.cpp | 2 +- jlm/rvsdg/region.hpp | 6 +- jlm/rvsdg/simple-normal-form.cpp | 4 +- jlm/rvsdg/simple-normal-form.hpp | 2 +- jlm/rvsdg/statemux.cpp | 4 +- jlm/rvsdg/statemux.hpp | 4 +- jlm/rvsdg/structural-normal-form.cpp | 4 +- jlm/rvsdg/structural-normal-form.hpp | 2 +- jlm/rvsdg/tracker.cpp | 6 +- jlm/rvsdg/tracker.hpp | 10 +-- jlm/rvsdg/traverser.hpp | 6 +- jlm/rvsdg/unary.cpp | 4 +- jlm/rvsdg/unary.hpp | 4 +- jlm/rvsdg/view.hpp | 2 +- tests/TestRvsdgs.hpp | 2 +- tests/jlm/llvm/ir/operators/LoadTests.cpp | 14 ++--- tests/jlm/llvm/ir/operators/StoreTests.cpp | 12 ++-- tests/jlm/llvm/ir/operators/TestCall.cpp | 4 +- tests/jlm/llvm/ir/operators/TestFree.cpp | 2 +- tests/jlm/llvm/ir/operators/TestLambda.cpp | 2 +- tests/jlm/llvm/ir/operators/TestPhi.cpp | 2 +- tests/jlm/llvm/ir/operators/test-sext.cpp | 6 +- tests/jlm/llvm/opt/TestLoadMuxReduction.cpp | 6 +- tests/jlm/llvm/opt/TestLoadStoreReduction.cpp | 2 +- .../opt/alias-analyses/TestPointsToGraph.cpp | 2 +- tests/jlm/llvm/opt/test-pull.cpp | 2 +- tests/jlm/llvm/opt/test-push.cpp | 2 +- tests/jlm/llvm/opt/test-unroll.cpp | 12 ++-- tests/jlm/rvsdg/ArgumentTests.cpp | 4 +- tests/jlm/rvsdg/RegionTests.cpp | 26 ++++---- tests/jlm/rvsdg/ResultTests.cpp | 4 +- tests/jlm/rvsdg/TestStructuralNode.cpp | 2 +- tests/jlm/rvsdg/bitstring/bitstring.cpp | 62 +++++++++---------- tests/jlm/rvsdg/test-binary.cpp | 4 +- tests/jlm/rvsdg/test-bottomup.cpp | 6 +- tests/jlm/rvsdg/test-cse.cpp | 2 +- tests/jlm/rvsdg/test-gamma.cpp | 16 ++--- tests/jlm/rvsdg/test-graph.cpp | 10 +-- tests/jlm/rvsdg/test-nodes.cpp | 8 +-- tests/jlm/rvsdg/test-statemux.cpp | 4 +- tests/jlm/rvsdg/test-theta.cpp | 10 +-- tests/jlm/rvsdg/test-topdown.cpp | 12 ++-- tests/jlm/rvsdg/test-typemismatch.cpp | 2 +- tests/test-operation.hpp | 4 +- 91 files changed, 278 insertions(+), 281 deletions(-) diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 9949d30d0..7f84b0bea 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -31,7 +31,7 @@ class cnestat final : public util::Statistics {} void - start_mark_stat(const jlm::rvsdg::graph & graph) noexcept + start_mark_stat(const Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); @@ -51,7 +51,7 @@ class cnestat final : public util::Statistics } void - end_divert_stat(const jlm::rvsdg::graph & graph) noexcept + end_divert_stat(const Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index b5541ea25..064e1f31a 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -31,7 +31,7 @@ class rvsdg_destruction_stat final : public util::Statistics {} void - start(const rvsdg::graph & graph) noexcept + start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); AddTimer(Label::Timer).start(); @@ -538,14 +538,14 @@ convert_node(const rvsdg::node & node, context & ctx) } static void -convert_nodes(const rvsdg::graph & graph, context & ctx) +convert_nodes(const rvsdg::Graph & graph, context & ctx) { for (const auto & node : rvsdg::topdown_traverser(graph.root())) convert_node(*node, ctx); } static void -convert_imports(const rvsdg::graph & graph, ipgraph_module & im, context & ctx) +convert_imports(const rvsdg::Graph & graph, ipgraph_module & im, context & ctx) { auto & ipg = im.ipgraph(); diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 566ab3c59..758df47de 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -303,7 +303,7 @@ class InterProceduralGraphToRvsdgStatistics final : public util::Statistics } void - End(const rvsdg::graph & graph) noexcept + End(const rvsdg::Graph & graph) noexcept { AddTimer(Label::Timer).stop(); AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); @@ -1095,7 +1095,7 @@ ConvertInterProceduralGraphNode( static void ConvertStronglyConnectedComponent( const std::unordered_set & stronglyConnectedComponent, - rvsdg::graph & graph, + rvsdg::Graph & graph, RegionalizedVariableMap & regionalizedVariableMap, InterProceduralGraphToRvsdgStatisticsCollector & statisticsCollector) { diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 86ac07ffc..1dc002039 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -22,7 +22,7 @@ class GraphImport final : public rvsdg::GraphImport { private: GraphImport( - rvsdg::graph & graph, + rvsdg::Graph & graph, std::shared_ptr valueType, std::string name, llvm::linkage linkage) @@ -49,7 +49,7 @@ class GraphImport final : public rvsdg::GraphImport static GraphImport & Create( - rvsdg::graph & graph, + rvsdg::Graph & graph, std::shared_ptr valueType, std::string name, llvm::linkage linkage) diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 471e1a3de..145740a7f 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -564,7 +564,7 @@ load_normal_form::~load_normal_form() load_normal_form::load_normal_form( const std::type_info & opclass, rvsdg::node_normal_form * parent, - rvsdg::graph * graph) noexcept + rvsdg::Graph * graph) noexcept : simple_normal_form(opclass, parent, graph), enable_load_mux_(false), enable_load_store_(false), @@ -671,7 +671,7 @@ static jlm::rvsdg::node_normal_form * create_load_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::llvm::load_normal_form(opclass, parent, graph); } diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index db4beb029..5c84611ee 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -25,7 +25,7 @@ class load_normal_form final : public rvsdg::simple_normal_form load_normal_form( const std::type_info & opclass, rvsdg::node_normal_form * parent, - rvsdg::graph * graph) noexcept; + rvsdg::Graph * graph) noexcept; virtual bool normalize_node(rvsdg::node * node) const override; @@ -462,7 +462,7 @@ class LoadNonVolatileOperation final : public LoadOperation NumMemoryStates() const noexcept override; static load_normal_form * - GetNormalForm(rvsdg::graph * graph) noexcept + GetNormalForm(rvsdg::Graph * graph) noexcept { return jlm::util::AssertedCast( graph->node_normal_form(typeid(LoadNonVolatileOperation))); diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 6d7acdd4e..2b394175e 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -309,7 +309,7 @@ store_normal_form::~store_normal_form() store_normal_form::store_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept + rvsdg::Graph * graph) noexcept : simple_normal_form(opclass, parent, graph), enable_store_mux_(false), enable_store_store_(false), @@ -467,7 +467,7 @@ static jlm::rvsdg::node_normal_form * create_store_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::llvm::store_normal_form(opclass, parent, graph); } diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 89903aaca..70bcab3e2 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -25,7 +25,7 @@ class store_normal_form final : public jlm::rvsdg::simple_normal_form store_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept; + rvsdg::Graph * graph) noexcept; virtual bool normalize_node(jlm::rvsdg::node * node) const override; @@ -166,7 +166,7 @@ class StoreNonVolatileOperation final : public StoreOperation NumMemoryStates() const noexcept override; static store_normal_form * - GetNormalForm(rvsdg::graph * graph) noexcept + GetNormalForm(rvsdg::Graph * graph) noexcept { return util::AssertedCast( graph->node_normal_form(typeid(StoreNonVolatileOperation))); diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 7a7921bd1..f369805a6 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -103,7 +103,7 @@ class DeadNodeElimination::Statistics final : public util::Statistics {} void - StartMarkStatistics(const jlm::rvsdg::graph & graph) noexcept + StartMarkStatistics(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); @@ -123,7 +123,7 @@ class DeadNodeElimination::Statistics final : public util::Statistics } void - StopSweepStatistics(const jlm::rvsdg::graph & graph) noexcept + StopSweepStatistics(const rvsdg::Graph & graph) noexcept { GetTimer(SweepTimerLabel_).stop(); AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); @@ -294,7 +294,7 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) } void -DeadNodeElimination::SweepRvsdg(jlm::rvsdg::graph & rvsdg) const +DeadNodeElimination::SweepRvsdg(rvsdg::Graph & rvsdg) const { SweepRegion(*rvsdg.root()); diff --git a/jlm/llvm/opt/DeadNodeElimination.hpp b/jlm/llvm/opt/DeadNodeElimination.hpp index d64fe280e..4c21b907b 100644 --- a/jlm/llvm/opt/DeadNodeElimination.hpp +++ b/jlm/llvm/opt/DeadNodeElimination.hpp @@ -85,7 +85,7 @@ class DeadNodeElimination final : public optimization MarkOutput(const jlm::rvsdg::output & output); void - SweepRvsdg(jlm::rvsdg::graph & rvsdg) const; + SweepRvsdg(rvsdg::Graph & rvsdg) const; void SweepRegion(rvsdg::Region & region) const; diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 0ec6385f0..ccc3bf5a0 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -60,7 +60,7 @@ InvariantValueRedirection::run( } void -InvariantValueRedirection::RedirectInRootRegion(rvsdg::graph & rvsdg) +InvariantValueRedirection::RedirectInRootRegion(rvsdg::Graph & rvsdg) { // We require a topdown traversal in the root region to ensure that a lambda node is visited // before its call nodes. This ensures that all invariant values are redirected in the lambda diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index 3830775cc..79f0b3dc5 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -58,7 +58,7 @@ class InvariantValueRedirection final : public optimization private: static void - RedirectInRootRegion(rvsdg::graph & rvsdg); + RedirectInRootRegion(rvsdg::Graph & rvsdg); static void RedirectInRegion(rvsdg::Region & region); diff --git a/jlm/llvm/opt/OptimizationSequence.cpp b/jlm/llvm/opt/OptimizationSequence.cpp index fb0286dbd..3179a0ef3 100644 --- a/jlm/llvm/opt/OptimizationSequence.cpp +++ b/jlm/llvm/opt/OptimizationSequence.cpp @@ -21,14 +21,14 @@ class OptimizationSequence::Statistics final : public util::Statistics {} void - StartMeasuring(const jlm::rvsdg::graph & graph) noexcept + StartMeasuring(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddTimer(Label::Timer).start(); } void - EndMeasuring(const jlm::rvsdg::graph & graph) noexcept + EndMeasuring(const rvsdg::Graph & graph) noexcept { GetTimer(Label::Timer).stop(); AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 4f1729345..2a5bfa118 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -65,7 +65,7 @@ RvsdgTreePrinter::run(RvsdgModule & rvsdgModule) } util::AnnotationMap -RvsdgTreePrinter::ComputeAnnotationMap(const rvsdg::graph & rvsdg) const +RvsdgTreePrinter::ComputeAnnotationMap(const rvsdg::Graph & rvsdg) const { util::AnnotationMap annotationMap; for (auto annotation : Configuration_.RequiredAnnotations().Items()) @@ -88,7 +88,7 @@ RvsdgTreePrinter::ComputeAnnotationMap(const rvsdg::graph & rvsdg) const void RvsdgTreePrinter::AnnotateNumRvsdgNodes( - const rvsdg::graph & rvsdg, + const rvsdg::Graph & rvsdg, util::AnnotationMap & annotationMap) { static std::string_view label("NumRvsdgNodes"); @@ -121,7 +121,7 @@ RvsdgTreePrinter::AnnotateNumRvsdgNodes( void RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( - const rvsdg::graph & rvsdg, + const rvsdg::Graph & rvsdg, util::AnnotationMap & annotationMap) { std::string_view argumentLabel("NumMemoryStateTypeArguments"); diff --git a/jlm/llvm/opt/RvsdgTreePrinter.hpp b/jlm/llvm/opt/RvsdgTreePrinter.hpp index a0a1c622c..0f40aa8dd 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.hpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.hpp @@ -13,7 +13,7 @@ namespace jlm::rvsdg { -class graph; +class Graph; class input; class output; } @@ -131,7 +131,7 @@ class RvsdgTreePrinter final : public optimization * @return An instance of \ref AnnotationMap. */ [[nodiscard]] util::AnnotationMap - ComputeAnnotationMap(const rvsdg::graph & rvsdg) const; + ComputeAnnotationMap(const rvsdg::Graph & rvsdg) const; /** * Adds an annotation to \p annotationMap that indicates the number of RVSDG nodes for regions @@ -143,7 +143,7 @@ class RvsdgTreePrinter final : public optimization * @see NumRvsdgNodes */ static void - AnnotateNumRvsdgNodes(const rvsdg::graph & rvsdg, util::AnnotationMap & annotationMap); + AnnotateNumRvsdgNodes(const rvsdg::Graph & rvsdg, util::AnnotationMap & annotationMap); /** * Adds an annotation to \p annotationMap that indicates the number of inputs/outputs of type @@ -156,7 +156,7 @@ class RvsdgTreePrinter final : public optimization */ static void AnnotateNumMemoryStateInputsOutputs( - const rvsdg::graph & rvsdg, + const rvsdg::Graph & rvsdg, util::AnnotationMap & annotationMap); void diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 847d880b2..784c05098 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -272,7 +272,7 @@ class Andersen::Statistics final : public util::Statistics {} void - StartAndersenStatistics(const rvsdg::graph & graph) noexcept + StartAndersenStatistics(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); AddTimer(AnalysisTimer_).start(); @@ -1199,7 +1199,7 @@ Andersen::AnalyzeRegion(rvsdg::Region & region) } void -Andersen::AnalyzeRvsdg(const rvsdg::graph & graph) +Andersen::AnalyzeRvsdg(const rvsdg::Graph & graph) { auto & rootRegion = *graph.root(); diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index 5cb70ea6c..f35a5b078 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -420,7 +420,7 @@ class Andersen final : public AliasAnalysis AnalyzeTheta(const rvsdg::ThetaNode & node); void - AnalyzeRvsdg(const rvsdg::graph & graph); + AnalyzeRvsdg(const rvsdg::Graph & graph); /** * Traverses the given module, and initializes the members Set_ and Constraints_ with diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 60b8f16a0..18ee787b8 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -27,7 +27,7 @@ class EncodingStatistics final : public util::Statistics {} void - Start(const rvsdg::graph & graph) + Start(const rvsdg::Graph & graph) { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddTimer(Label::Timer).start(); diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index dab9ea611..1ab3198ec 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -969,7 +969,7 @@ RegionAwareMemoryNodeProvider::ResolveUnknownMemoryNodeReferences(const RvsdgMod } }; - auto nodes = rvsdg::graph::ExtractTailNodes(rvsdgModule.Rvsdg()); + auto nodes = rvsdg::Graph::ExtractTailNodes(rvsdgModule.Rvsdg()); for (auto & node : nodes) { if (auto lambdaNode = dynamic_cast(node)) @@ -1004,7 +1004,7 @@ RegionAwareMemoryNodeProvider::ShouldCreateRegionSummary(const rvsdg::Region & r std::string RegionAwareMemoryNodeProvider::ToRegionTree( - const rvsdg::graph & rvsdg, + const rvsdg::Graph & rvsdg, const RegionAwareMemoryNodeProvisioning & provisioning) { auto toString = [](const util::HashSet & memoryNodes) diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp index 08fac9106..762330f8c 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp @@ -208,7 +208,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider * @return A string that contains the region tree. */ static std::string - ToRegionTree(const rvsdg::graph & rvsdg, const RegionAwareMemoryNodeProvisioning & provisioning); + ToRegionTree(const rvsdg::Graph & rvsdg, const RegionAwareMemoryNodeProvisioning & provisioning); std::unique_ptr Provisioning_; }; diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 558bd1463..01034dbe9 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -914,7 +914,7 @@ class Steensgaard::Statistics final : public util::Statistics {} void - StartSteensgaardStatistics(const jlm::rvsdg::graph & graph) noexcept + StartSteensgaardStatistics(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); AddTimer(AnalysisTimerLabel_).start(); @@ -1744,7 +1744,7 @@ Steensgaard::AnalyzeRegion(rvsdg::Region & region) } void -Steensgaard::AnalyzeRvsdg(const jlm::rvsdg::graph & graph) +Steensgaard::AnalyzeRvsdg(const rvsdg::Graph & graph) { AnalyzeImports(graph); AnalyzeRegion(*graph.root()); @@ -1752,7 +1752,7 @@ Steensgaard::AnalyzeRvsdg(const jlm::rvsdg::graph & graph) } void -Steensgaard::AnalyzeImports(const rvsdg::graph & graph) +Steensgaard::AnalyzeImports(const rvsdg::Graph & graph) { auto rootRegion = graph.root(); for (size_t n = 0; n < rootRegion->narguments(); n++) @@ -1769,7 +1769,7 @@ Steensgaard::AnalyzeImports(const rvsdg::graph & graph) } void -Steensgaard::AnalyzeExports(const rvsdg::graph & graph) +Steensgaard::AnalyzeExports(const rvsdg::Graph & graph) { auto rootRegion = graph.root(); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index bd561bc93..fcf197730 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -64,13 +64,13 @@ class Steensgaard final : public AliasAnalysis private: void - AnalyzeRvsdg(const rvsdg::graph & graph); + AnalyzeRvsdg(const rvsdg::Graph & graph); void - AnalyzeImports(const rvsdg::graph & graph); + AnalyzeImports(const rvsdg::Graph & graph); void - AnalyzeExports(const rvsdg::graph & graph); + AnalyzeExports(const rvsdg::Graph & graph); void AnalyzeRegion(rvsdg::Region & region); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 96bcd42ce..7a2cc2b80 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -23,7 +23,7 @@ class TopDownMemoryNodeEliminator::Statistics final : public util::Statistics {} void - Start(const rvsdg::graph & graph) noexcept + Start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); AddTimer(Label::Timer).start(); @@ -863,7 +863,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownIndirectCall( void TopDownMemoryNodeEliminator::InitializeLiveNodesOfTailLambdas(const RvsdgModule & rvsdgModule) { - auto nodes = rvsdg::graph::ExtractTailNodes(rvsdgModule.Rvsdg()); + auto nodes = rvsdg::Graph::ExtractTailNodes(rvsdgModule.Rvsdg()); for (auto & node : nodes) { if (auto lambdaNode = dynamic_cast(node)) diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 63f4673f8..82f6a58b0 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -28,7 +28,7 @@ class cnestat final : public util::Statistics {} void - start_mark_stat(const jlm::rvsdg::graph & graph) noexcept + start_mark_stat(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); @@ -48,7 +48,7 @@ class cnestat final : public util::Statistics } void - end_divert_stat(const jlm::rvsdg::graph & graph) noexcept + end_divert_stat(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 60f66b1c5..eba80257e 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -25,14 +25,14 @@ class ilnstat final : public util::Statistics {} void - start(const jlm::rvsdg::graph & graph) + start(const rvsdg::Graph & graph) { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddTimer(Label::Timer).start(); } void - stop(const jlm::rvsdg::graph & graph) + stop(const rvsdg::Graph & graph) { AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); GetTimer(Label::Timer).stop(); @@ -142,7 +142,7 @@ inlineCall(jlm::rvsdg::simple_node * call, const lambda::node * lambda) } static void -inlining(jlm::rvsdg::graph & rvsdg) +inlining(rvsdg::Graph & rvsdg) { for (auto node : rvsdg::topdown_traverser(rvsdg.root())) { diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index b582f4dc5..cfa3f0a4d 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -26,7 +26,7 @@ class ivtstat final : public util::Statistics {} void - start(const jlm::rvsdg::graph & graph) noexcept + start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); @@ -34,7 +34,7 @@ class ivtstat final : public util::Statistics } void - end(const jlm::rvsdg::graph & graph) noexcept + end(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 59d2fd8e9..c94c5caae 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -24,14 +24,14 @@ class pullstat final : public util::Statistics {} void - start(const jlm::rvsdg::graph & graph) noexcept + start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); AddTimer(Label::Timer).start(); } void - end(const jlm::rvsdg::graph & graph) noexcept + end(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); GetTimer(Label::Timer).stop(); diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 3260ba907..d39a2b4db 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -27,14 +27,14 @@ class pushstat final : public util::Statistics {} void - start(const jlm::rvsdg::graph & graph) noexcept + start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgInputsBefore, jlm::rvsdg::ninputs(graph.root())); AddTimer(Label::Timer).start(); } void - end(const jlm::rvsdg::graph & graph) noexcept + end(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgInputsAfter, jlm::rvsdg::ninputs(graph.root())); GetTimer(Label::Timer).stop(); diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index 67148872a..c5eb78837 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -24,7 +24,7 @@ class redstat final : public util::Statistics {} void - start(const jlm::rvsdg::graph & graph) noexcept + start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); @@ -32,7 +32,7 @@ class redstat final : public util::Statistics } void - end(const jlm::rvsdg::graph & graph) noexcept + end(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); @@ -47,7 +47,7 @@ class redstat final : public util::Statistics }; static void -enable_mux_reductions(jlm::rvsdg::graph & graph) +enable_mux_reductions(rvsdg::Graph & graph) { auto nf = graph.node_normal_form(typeid(jlm::rvsdg::mux_op)); auto mnf = static_cast(nf); @@ -57,7 +57,7 @@ enable_mux_reductions(jlm::rvsdg::graph & graph) } static void -enable_store_reductions(jlm::rvsdg::graph & graph) +enable_store_reductions(rvsdg::Graph & graph) { auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(true); @@ -68,7 +68,7 @@ enable_store_reductions(jlm::rvsdg::graph & graph) } static void -enable_load_reductions(jlm::rvsdg::graph & graph) +enable_load_reductions(rvsdg::Graph & graph) { auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(true); @@ -81,7 +81,7 @@ enable_load_reductions(jlm::rvsdg::graph & graph) } static void -enable_gamma_reductions(jlm::rvsdg::graph & graph) +enable_gamma_reductions(rvsdg::Graph & graph) { auto nf = rvsdg::GammaOperation::normal_form(&graph); nf->set_mutable(true); @@ -92,7 +92,7 @@ enable_gamma_reductions(jlm::rvsdg::graph & graph) } static void -enable_unary_reductions(jlm::rvsdg::graph & graph) +enable_unary_reductions(rvsdg::Graph & graph) { auto nf = jlm::rvsdg::unary_op::normal_form(&graph); // set_mutable generates incorrect output for a number of @@ -105,7 +105,7 @@ enable_unary_reductions(jlm::rvsdg::graph & graph) } static void -enable_binary_reductions(jlm::rvsdg::graph & graph) +enable_binary_reductions(rvsdg::Graph & graph) { auto nf = jlm::rvsdg::binary_op::normal_form(&graph); nf->set_mutable(true); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index caf014e44..692aa6cc9 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -24,14 +24,14 @@ class unrollstat final : public util::Statistics {} void - start(const jlm::rvsdg::graph & graph) noexcept + start(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); AddTimer(Label::Timer).start(); } void - end(const jlm::rvsdg::graph & graph) noexcept + end(const rvsdg::Graph & graph) noexcept { AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); GetTimer(Label::Timer).stop(); diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 96a6ab64d..8d14e83c6 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -47,7 +47,7 @@ JlmToMlirConverter::ConvertModule(const llvm::RvsdgModule & rvsdgModule) } ::mlir::rvsdg::OmegaNode -JlmToMlirConverter::ConvertOmega(const rvsdg::graph & graph) +JlmToMlirConverter::ConvertOmega(const rvsdg::Graph & graph) { auto omega = Builder_->create<::mlir::rvsdg::OmegaNode>(Builder_->getUnknownLoc()); auto & omegaBlock = omega.getRegion().emplaceBlock(); diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index fcc6b9499..e68e6e18a 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -74,7 +74,7 @@ class JlmToMlirConverter final * \return An MLIR RVSDG OmegaNode. */ ::mlir::rvsdg::OmegaNode - ConvertOmega(const rvsdg::graph & graph); + ConvertOmega(const rvsdg::Graph & graph); /** * Converts all nodes in an RVSDG region. Conversion of structural nodes cause their regions to diff --git a/jlm/rvsdg/RvsdgModule.hpp b/jlm/rvsdg/RvsdgModule.hpp index 0ef2c9f4f..d40fffd2c 100644 --- a/jlm/rvsdg/RvsdgModule.hpp +++ b/jlm/rvsdg/RvsdgModule.hpp @@ -35,7 +35,7 @@ class RvsdgModule * * @return The RVSDG of the module. */ - jlm::rvsdg::graph & + jlm::rvsdg::Graph & Rvsdg() noexcept { return Rvsdg_; @@ -45,14 +45,14 @@ class RvsdgModule * * @return The RVSDG of the module. */ - [[nodiscard]] const jlm::rvsdg::graph & + [[nodiscard]] const Graph & Rvsdg() const noexcept { return Rvsdg_; } private: - graph Rvsdg_; + Graph Rvsdg_; }; } diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 0c783346c..79289fd00 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -58,7 +58,7 @@ binary_normal_form::~binary_normal_form() noexcept binary_normal_form::binary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + Graph * graph) : simple_normal_form(operator_class, parent, graph), enable_reducible_(true), enable_reorder_(true), @@ -281,7 +281,7 @@ flattened_binary_normal_form::~flattened_binary_normal_form() noexcept flattened_binary_normal_form::flattened_binary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + Graph * graph) : simple_normal_form(operator_class, parent, graph) {} @@ -449,7 +449,7 @@ jlm::rvsdg::node_normal_form * binary_operation_get_default_normal_form_( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::binary_normal_form(operator_class, parent, graph); } @@ -458,7 +458,7 @@ jlm::rvsdg::node_normal_form * flattened_binary_operation_get_default_normal_form_( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::flattened_binary_normal_form(operator_class, parent, graph); } diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index 234e0c266..7e1ee424b 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -27,7 +27,7 @@ class binary_normal_form final : public simple_normal_form binary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph); + Graph * graph); virtual bool normalize_node(jlm::rvsdg::node * node) const override; @@ -104,7 +104,7 @@ class flattened_binary_normal_form final : public simple_normal_form flattened_binary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph); + Graph * graph); virtual bool normalize_node(jlm::rvsdg::node * node) const override; @@ -160,7 +160,7 @@ class binary_op : public simple_op is_commutative() const noexcept; static jlm::rvsdg::binary_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept + normal_form(Graph * graph) noexcept { return static_cast( graph->node_normal_form(typeid(binary_op))); @@ -208,7 +208,7 @@ class flattened_binary_op final : public simple_op } static jlm::rvsdg::flattened_binary_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept + normal_form(Graph * graph) noexcept { return static_cast( graph->node_normal_form(typeid(flattened_binary_op))); @@ -223,7 +223,7 @@ class flattened_binary_op final : public simple_op reduce(rvsdg::Region * region, const flattened_binary_op::reduction & reduction); static inline void - reduce(jlm::rvsdg::graph * graph, const flattened_binary_op::reduction & reduction) + reduce(Graph * graph, const flattened_binary_op::reduction & reduction) { reduce(graph->root(), reduction); } diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index b2b4303a1..c4017bed5 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -86,7 +86,7 @@ class concat_normal_form final : public simple_normal_form public: virtual ~concat_normal_form() noexcept; - concat_normal_form(jlm::rvsdg::node_normal_form * parent, jlm::rvsdg::graph * graph) + concat_normal_form(jlm::rvsdg::node_normal_form * parent, Graph * graph) : simple_normal_form(typeid(bitconcat_op), parent, graph), enable_reducible_(true), enable_flatten_(true) @@ -233,7 +233,7 @@ static node_normal_form * get_default_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + Graph * graph) { return new concat_normal_form(parent, graph); } diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 7356b2bde..7bcf3b9ee 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -154,7 +154,7 @@ gamma_normal_form::~gamma_normal_form() noexcept gamma_normal_form::gamma_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept + Graph * graph) noexcept : structural_normal_form(operator_class, parent, graph), enable_predicate_reduction_(false), enable_invariant_reduction_(false), @@ -395,7 +395,7 @@ jlm::rvsdg::node_normal_form * gamma_node_get_default_normal_form_( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::gamma_normal_form(operator_class, parent, graph); } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 9e3c362e2..e450448e3 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -25,7 +25,7 @@ class gamma_normal_form final : public structural_normal_form gamma_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept; + Graph * graph) noexcept; virtual bool normalize_node(jlm::rvsdg::node * node) const override; @@ -94,7 +94,7 @@ class GammaOperation final : public structural_op operator==(const operation & other) const noexcept override; static jlm::rvsdg::gamma_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept + normal_form(Graph * graph) noexcept { return static_cast( graph->node_normal_form(typeid(GammaOperation))); diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 8530f91e7..9d00b8e50 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -14,10 +14,7 @@ namespace jlm::rvsdg { -GraphImport::GraphImport( - rvsdg::graph & graph, - std::shared_ptr type, - std::string name) +GraphImport::GraphImport(Graph & graph, std::shared_ptr type, std::string name) : RegionArgument(graph.root(), nullptr, std::move(type)), Name_(std::move(name)) {} @@ -27,29 +24,29 @@ GraphExport::GraphExport(rvsdg::output & origin, std::string name) Name_(std::move(name)) {} -graph::~graph() +Graph::~Graph() { JLM_ASSERT(!has_active_trackers(this)); delete root_; } -graph::graph() +Graph::Graph() : normalized_(false), root_(new rvsdg::Region(nullptr, this)) {} -std::unique_ptr -graph::copy() const +std::unique_ptr +Graph::copy() const { SubstitutionMap smap; - std::unique_ptr graph(new jlm::rvsdg::graph()); + std::unique_ptr graph(new jlm::rvsdg::Graph()); root()->copy(graph->root(), smap, true, true); return graph; } jlm::rvsdg::node_normal_form * -graph::node_normal_form(const std::type_info & type) noexcept +Graph::node_normal_form(const std::type_info & type) noexcept { auto i = node_normal_forms_.find(std::type_index(type)); if (i != node_normal_forms_.end()) @@ -68,7 +65,7 @@ graph::node_normal_form(const std::type_info & type) noexcept } std::vector -graph::ExtractTailNodes(const graph & rvsdg) +Graph::ExtractTailNodes(const Graph & rvsdg) { auto IsOnlyExported = [](const rvsdg::output & output) { diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index fb46355c7..a24813142 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -28,7 +28,7 @@ namespace jlm::rvsdg class GraphImport : public RegionArgument { protected: - GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name); + GraphImport(Graph & graph, std::shared_ptr type, std::string name); public: [[nodiscard]] const std::string & @@ -60,12 +60,12 @@ class GraphExport : public RegionResult std::string Name_; }; -class graph +class Graph { public: - ~graph(); + ~Graph(); - graph(); + Graph(); [[nodiscard]] rvsdg::Region * root() const noexcept @@ -86,7 +86,7 @@ class graph normalized_ = true; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const; jlm::rvsdg::node_normal_form * @@ -108,7 +108,7 @@ class graph * @return A vector of tail nodes. */ static std::vector - ExtractTailNodes(const graph & rvsdg); + ExtractTailNodes(const Graph & rvsdg); private: bool normalized_; diff --git a/jlm/rvsdg/node-normal-form.cpp b/jlm/rvsdg/node-normal-form.cpp index 5880b29f2..3a99d30e5 100644 --- a/jlm/rvsdg/node-normal-form.cpp +++ b/jlm/rvsdg/node-normal-form.cpp @@ -42,7 +42,7 @@ namespace typedef jlm::rvsdg::node_normal_form * (*create_node_normal_form_functor)( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph); + Graph * graph); typedef std::unordered_map node_normal_form_registry; @@ -78,7 +78,7 @@ node_normal_form::register_factory( jlm::rvsdg::node_normal_form * (*fn)( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph)) + Graph * graph)) { if (!registry) { @@ -92,7 +92,7 @@ node_normal_form * node_normal_form::create( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + Graph * graph) { return lookup_factory_functor(&operator_class)(operator_class, parent, graph); } diff --git a/jlm/rvsdg/node-normal-form.hpp b/jlm/rvsdg/node-normal-form.hpp index dd3ea7d79..79e0da003 100644 --- a/jlm/rvsdg/node-normal-form.hpp +++ b/jlm/rvsdg/node-normal-form.hpp @@ -22,7 +22,7 @@ namespace jlm::rvsdg { -class graph; +class Graph; class node; class operation; class output; @@ -36,7 +36,7 @@ class node_normal_form inline node_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept + Graph * graph) noexcept : operator_class_(operator_class), parent_(parent), graph_(graph), @@ -58,7 +58,7 @@ class node_normal_form return parent_; } - inline jlm::rvsdg::graph * + [[nodiscard]] Graph * graph() const noexcept { return graph_; @@ -79,13 +79,13 @@ class node_normal_form jlm::rvsdg::node_normal_form * (*fn)( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph)); + Graph * graph)); static node_normal_form * create( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph); + Graph * graph); class opclass_hash_accessor { @@ -136,7 +136,7 @@ class node_normal_form private: const std::type_info & operator_class_; node_normal_form * parent_; - jlm::rvsdg::graph * graph_; + Graph * graph_; struct { diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index d922dcb47..1d0aef836 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -141,7 +141,7 @@ jlm::rvsdg::node_normal_form * node_get_default_normal_form_( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::node_normal_form(operator_class, parent, graph); } diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 81f4dd8d1..85dfd7dca 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -26,7 +26,7 @@ namespace base class type; } -class graph; +class Graph; class node_normal_form; class output; class SubstitutionMap; @@ -822,7 +822,7 @@ class node } public: - inline jlm::rvsdg::graph * + [[nodiscard]] Graph * graph() const noexcept { return graph_; @@ -884,7 +884,7 @@ class node private: size_t depth_; - jlm::rvsdg::graph * graph_; + Graph * graph_; rvsdg::Region * region_; std::unique_ptr operation_; std::vector> inputs_; diff --git a/jlm/rvsdg/nullary.cpp b/jlm/rvsdg/nullary.cpp index 52d11d30a..c78350ec0 100644 --- a/jlm/rvsdg/nullary.cpp +++ b/jlm/rvsdg/nullary.cpp @@ -19,7 +19,7 @@ class nullary_normal_form final : public simple_normal_form nullary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + Graph * graph) : simple_normal_form(operator_class, parent, graph) {} }; @@ -38,7 +38,7 @@ jlm::rvsdg::node_normal_form * nullary_operation_get_default_normal_form_( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::nullary_normal_form(operator_class, parent, graph); } diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index 348b4da2f..fae80999b 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -15,7 +15,7 @@ operation::~operation() noexcept {} jlm::rvsdg::node_normal_form * -operation::normal_form(jlm::rvsdg::graph * graph) noexcept +operation::normal_form(Graph * graph) noexcept { return graph->node_normal_form(typeid(operation)); } @@ -52,7 +52,7 @@ simple_op::result(size_t index) const noexcept } jlm::rvsdg::simple_normal_form * -simple_op::normal_form(jlm::rvsdg::graph * graph) noexcept +simple_op::normal_form(Graph * graph) noexcept { return static_cast(graph->node_normal_form(typeid(simple_op))); } @@ -66,7 +66,7 @@ structural_op::operator==(const operation & other) const noexcept } jlm::rvsdg::structural_normal_form * -structural_op::normal_form(jlm::rvsdg::graph * graph) noexcept +structural_op::normal_form(Graph * graph) noexcept { return static_cast(graph->node_normal_form(typeid(structural_op))); } diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index 9427bf65e..5fd2193ec 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -16,7 +16,7 @@ namespace jlm::rvsdg { -class graph; +class Graph; class node; class node_normal_form; class output; @@ -45,7 +45,7 @@ class operation } static jlm::rvsdg::node_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept; + normal_form(Graph * graph) noexcept; }; template @@ -86,7 +86,7 @@ class simple_op : public operation result(size_t index) const noexcept; static jlm::rvsdg::simple_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept; + normal_form(Graph * graph) noexcept; private: std::vector> operands_; @@ -102,7 +102,7 @@ class structural_op : public operation operator==(const operation & other) const noexcept override; static jlm::rvsdg::structural_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept; + normal_form(Graph * graph) noexcept; }; } diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index dd294fd09..2605c861b 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -137,7 +137,7 @@ Region::~Region() noexcept RemoveArgument(arguments_.size() - 1); } -Region::Region(rvsdg::Region * parent, jlm::rvsdg::graph * graph) +Region::Region(Region *, Graph * graph) : index_(0), graph_(graph), node_(nullptr) diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 60fa58703..15f76792d 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -256,7 +256,7 @@ class Region public: ~Region() noexcept; - Region(rvsdg::Region * parent, jlm::rvsdg::graph * graph); + Region(rvsdg::Region * parent, Graph * graph); Region(rvsdg::StructuralNode * node, size_t index); @@ -351,7 +351,7 @@ class Region return { BottomNodes_.begin(), BottomNodes_.end() }; } - inline jlm::rvsdg::graph * + [[nodiscard]] Graph * graph() const noexcept { return graph_; @@ -736,7 +736,7 @@ class Region ToString(const util::Annotation & annotation, char labelValueSeparator); size_t index_; - jlm::rvsdg::graph * graph_; + Graph * graph_; rvsdg::StructuralNode * node_; std::vector results_; std::vector arguments_; diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 3386b030e..7cf958ff7 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -50,7 +50,7 @@ simple_normal_form::~simple_normal_form() noexcept simple_normal_form::simple_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept + Graph * graph) noexcept : node_normal_form(operator_class, parent, graph), enable_cse_(true) { @@ -113,7 +113,7 @@ static jlm::rvsdg::node_normal_form * get_default_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::simple_normal_form(operator_class, parent, graph); } diff --git a/jlm/rvsdg/simple-normal-form.hpp b/jlm/rvsdg/simple-normal-form.hpp index 996f2b57c..e61928f6d 100644 --- a/jlm/rvsdg/simple-normal-form.hpp +++ b/jlm/rvsdg/simple-normal-form.hpp @@ -21,7 +21,7 @@ class simple_normal_form : public node_normal_form simple_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept; + Graph * graph) noexcept; virtual bool normalize_node(jlm::rvsdg::node * node) const override; diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 4eb261224..a2d13a74e 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -111,7 +111,7 @@ mux_normal_form::~mux_normal_form() noexcept mux_normal_form::mux_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept + Graph * graph) noexcept : simple_normal_form(opclass, parent, graph), enable_mux_mux_(false), enable_multiple_origin_(false) @@ -204,7 +204,7 @@ static jlm::rvsdg::node_normal_form * create_mux_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::mux_normal_form(opclass, parent, graph); } diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index bea46e80a..843468a78 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -24,7 +24,7 @@ class mux_normal_form final : public simple_normal_form mux_normal_form( const std::type_info & opclass, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept; + Graph * graph) noexcept; virtual bool normalize_node(jlm::rvsdg::node * node) const override; @@ -79,7 +79,7 @@ class mux_op final : public simple_op copy() const override; static jlm::rvsdg::mux_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept + normal_form(Graph * graph) noexcept { return static_cast(graph->node_normal_form(typeid(mux_op))); } diff --git a/jlm/rvsdg/structural-normal-form.cpp b/jlm/rvsdg/structural-normal-form.cpp index 843b17fb6..5fc358d57 100644 --- a/jlm/rvsdg/structural-normal-form.cpp +++ b/jlm/rvsdg/structural-normal-form.cpp @@ -15,7 +15,7 @@ structural_normal_form::~structural_normal_form() noexcept structural_normal_form::structural_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept + Graph * graph) noexcept : node_normal_form(operator_class, parent, graph) {} @@ -25,7 +25,7 @@ static jlm::rvsdg::node_normal_form * get_default_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { return new jlm::rvsdg::structural_normal_form(operator_class, parent, graph); } diff --git a/jlm/rvsdg/structural-normal-form.hpp b/jlm/rvsdg/structural-normal-form.hpp index 04124d38f..2c754f2ca 100644 --- a/jlm/rvsdg/structural-normal-form.hpp +++ b/jlm/rvsdg/structural-normal-form.hpp @@ -19,7 +19,7 @@ class structural_normal_form : public node_normal_form structural_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) noexcept; + Graph * graph) noexcept; }; } diff --git a/jlm/rvsdg/tracker.cpp b/jlm/rvsdg/tracker.cpp index 9edcd8f28..b5a1a3fe7 100644 --- a/jlm/rvsdg/tracker.cpp +++ b/jlm/rvsdg/tracker.cpp @@ -12,7 +12,7 @@ using namespace std::placeholders; namespace { -typedef std::unordered_set tracker_set; +typedef std::unordered_set tracker_set; tracker_set * active_trackers() @@ -42,7 +42,7 @@ namespace jlm::rvsdg { bool -has_active_trackers(const jlm::rvsdg::graph * graph) +has_active_trackers(const Graph * graph) { auto at = active_trackers(); return at->find(graph) != at->end(); @@ -163,7 +163,7 @@ tracker::~tracker() noexcept unregister_tracker(this); } -tracker::tracker(jlm::rvsdg::graph * graph, size_t nstates) +tracker::tracker(Graph * graph, size_t nstates) : graph_(graph), states_(nstates) { diff --git a/jlm/rvsdg/tracker.hpp b/jlm/rvsdg/tracker.hpp index 5f650873d..834163778 100644 --- a/jlm/rvsdg/tracker.hpp +++ b/jlm/rvsdg/tracker.hpp @@ -19,14 +19,14 @@ namespace jlm::rvsdg static const size_t tracker_nodestate_none = (size_t)-1; -class graph; +class Graph; class node; class Region; class tracker_depth_state; class tracker_nodestate; bool -has_active_trackers(const jlm::rvsdg::graph * graph); +has_active_trackers(const Graph * graph); /* Track states of nodes within the graph. Each node can logically be in * one of the numbered states, plus another "initial" state. All nodes are @@ -36,7 +36,7 @@ struct tracker public: ~tracker() noexcept; - tracker(jlm::rvsdg::graph * graph, size_t nstates); + tracker(Graph * graph, size_t nstates); /* get state of the node */ ssize_t @@ -54,7 +54,7 @@ struct tracker jlm::rvsdg::node * peek_bottom(size_t state) const; - inline jlm::rvsdg::graph * + [[nodiscard]] Graph * graph() const noexcept { return graph_; @@ -70,7 +70,7 @@ struct tracker void node_destroy(jlm::rvsdg::node * node); - jlm::rvsdg::graph * graph_; + jlm::rvsdg::Graph * graph_; /* FIXME: need RAII idiom for state reservation */ std::vector> states_; diff --git a/jlm/rvsdg/traverser.hpp b/jlm/rvsdg/traverser.hpp index d1798e726..a65702828 100644 --- a/jlm/rvsdg/traverser.hpp +++ b/jlm/rvsdg/traverser.hpp @@ -15,7 +15,7 @@ namespace jlm::rvsdg { -class graph; +class Graph; class input; class output; @@ -86,7 +86,7 @@ enum class traversal_nodestate class traversal_tracker final { public: - inline traversal_tracker(jlm::rvsdg::graph * graph); + inline traversal_tracker(Graph * graph); inline traversal_nodestate get_nodestate(jlm::rvsdg::node * node); @@ -231,7 +231,7 @@ class bottomup_traverser final /* traversal tracker implementation */ -traversal_tracker::traversal_tracker(jlm::rvsdg::graph * graph) +traversal_tracker::traversal_tracker(Graph * graph) : tracker_(graph, 2) {} diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 249eb7eac..52ec39366 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -18,7 +18,7 @@ unary_normal_form::~unary_normal_form() noexcept unary_normal_form::unary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + Graph * graph) : simple_normal_form(operator_class, parent, graph), enable_reducible_(true) { @@ -101,7 +101,7 @@ jlm::rvsdg::node_normal_form * unary_operation_get_default_normal_form_( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph) + jlm::rvsdg::Graph * graph) { jlm::rvsdg::node_normal_form * nf = new jlm::rvsdg::unary_normal_form(operator_class, parent, graph); diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index f2cb3f39b..b5f0f2a13 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -25,7 +25,7 @@ class unary_normal_form final : public simple_normal_form unary_normal_form( const std::type_info & operator_class, jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::graph * graph); + Graph * graph); virtual bool normalize_node(jlm::rvsdg::node * node) const override; @@ -72,7 +72,7 @@ class unary_op : public simple_op reduce_operand(unop_reduction_path_t path, jlm::rvsdg::output * arg) const = 0; static jlm::rvsdg::unary_normal_form * - normal_form(jlm::rvsdg::graph * graph) noexcept + normal_form(Graph * graph) noexcept { return static_cast(graph->node_normal_form(typeid(unary_op))); } diff --git a/jlm/rvsdg/view.hpp b/jlm/rvsdg/view.hpp index adbf17b36..a59a8de5a 100644 --- a/jlm/rvsdg/view.hpp +++ b/jlm/rvsdg/view.hpp @@ -51,7 +51,7 @@ view(const rvsdg::Region * region, FILE * out); * @param out the file to be written to. */ inline void -view(const jlm::rvsdg::graph & graph, FILE * out) +view(const Graph & graph, FILE * out) { return view(graph.root(), out); } diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index ce3bfca67..0cc55821a 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -26,7 +26,7 @@ class RvsdgTest return *module_; } - const jlm::rvsdg::graph & + const rvsdg::Graph & graph() { return module().Rvsdg(); diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index f59f8a33f..4f80f02e6 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -56,7 +56,7 @@ TestCopy() auto valueType = jlm::tests::valuetype::Create(); auto pointerType = PointerType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto address1 = &jlm::tests::GraphImport::Create(graph, pointerType, "address1"); auto memoryState1 = &jlm::tests::GraphImport::Create(graph, memoryType, "memoryState1"); @@ -85,7 +85,7 @@ TestLoadAllocaReduction() auto mt = MemoryStateType::Create(); auto bt = jlm::rvsdg::bittype::Create(32); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_alloca_reducible(false); @@ -128,7 +128,7 @@ TestMultipleOriginReduction() auto vt = jlm::tests::valuetype::Create(); auto pt = PointerType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_multiple_origin_reducible(false); @@ -163,7 +163,7 @@ TestLoadStoreStateReduction() // Arrange auto bt = jlm::rvsdg::bittype::Create(32); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_store_state_reducible(false); @@ -211,7 +211,7 @@ TestLoadStoreReduction() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_store_reducible(false); @@ -251,7 +251,7 @@ TestLoadLoadReduction() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); @@ -408,7 +408,7 @@ NodeCopy() auto memoryType = MemoryStateType::Create(); auto valueType = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto & address1 = jlm::tests::GraphImport::Create(graph, pointerType, "address1"); auto & iOState1 = jlm::tests::GraphImport::Create(graph, iOStateType, "iOState1"); auto & memoryState1 = jlm::tests::GraphImport::Create(graph, memoryType, "memoryState1"); diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index f683d6c5f..261c41a07 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -141,7 +141,7 @@ StoreVolatileNodeCopy() auto memoryType = MemoryStateType::Create(); auto valueType = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto & address1 = jlm::tests::GraphImport::Create(graph, pointerType, "address1"); auto & value1 = jlm::tests::GraphImport::Create(graph, valueType, "value1"); auto & ioState1 = jlm::tests::GraphImport::Create(graph, ioStateType, "ioState1"); @@ -182,7 +182,7 @@ TestCopy() auto pointerType = PointerType::Create(); auto memoryStateType = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto address1 = &jlm::tests::GraphImport::Create(graph, pointerType, "address1"); auto value1 = &jlm::tests::GraphImport::Create(graph, valueType, "value1"); auto memoryState1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state1"); @@ -214,7 +214,7 @@ TestStoreMuxReduction() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(StoreNonVolatileOperation)); auto snf = static_cast(nf); snf->set_mutable(false); @@ -263,7 +263,7 @@ TestMultipleOriginReduction() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(StoreNonVolatileOperation)); auto snf = static_cast(nf); snf->set_mutable(false); @@ -302,7 +302,7 @@ TestStoreAllocaReduction() auto mt = MemoryStateType::Create(); auto bt = jlm::rvsdg::bittype::Create(32); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(StoreNonVolatileOperation)); auto snf = static_cast(nf); snf->set_mutable(false); @@ -351,7 +351,7 @@ TestStoreStoreReduction() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto a = &jlm::tests::GraphImport::Create(graph, pt, "address"); auto v1 = &jlm::tests::GraphImport::Create(graph, vt, "value"); auto v2 = &jlm::tests::GraphImport::Create(graph, vt, "value"); diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 7ba64dad9..0f2ea8883 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -26,7 +26,7 @@ TestCopy() { valueType, iostatetype::Create(), MemoryStateType::Create() }, { valueType, iostatetype::Create(), MemoryStateType::Create() }); - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto function1 = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "function1"); auto value1 = &jlm::tests::GraphImport::Create(rvsdg, valueType, "value1"); auto iOState1 = &jlm::tests::GraphImport::Create(rvsdg, iOStateType, "iOState1"); @@ -63,7 +63,7 @@ TestCallNodeAccessors() { valueType, iostatetype::Create(), MemoryStateType::Create() }, { valueType, iostatetype::Create(), MemoryStateType::Create() }); - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto f = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "function"); auto v = &jlm::tests::GraphImport::Create(rvsdg, valueType, "value"); auto i = &jlm::tests::GraphImport::Create(rvsdg, iOStateType, "IOState"); diff --git a/tests/jlm/llvm/ir/operators/TestFree.cpp b/tests/jlm/llvm/ir/operators/TestFree.cpp index 6136bdac5..1a88b81b3 100644 --- a/tests/jlm/llvm/ir/operators/TestFree.cpp +++ b/tests/jlm/llvm/ir/operators/TestFree.cpp @@ -68,7 +68,7 @@ TestRvsdgCreator() using namespace jlm::llvm; // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto address = &jlm::tests::GraphImport::Create(rvsdg, PointerType::Create(), "p"); auto memoryState = &jlm::tests::GraphImport::Create(rvsdg, MemoryStateType::Create(), "m"); diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index 5f7debd2d..4c0f76a91 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -487,7 +487,7 @@ TestCallSummaryComputationLambdaResult() using namespace jlm::llvm; // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index e7499a7ab..24c790a00 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -17,7 +17,7 @@ TestPhiCreation() { using namespace jlm::llvm; - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto vtype = jlm::tests::valuetype::Create(); auto iOStateType = iostatetype::Create(); diff --git a/tests/jlm/llvm/ir/operators/test-sext.cpp b/tests/jlm/llvm/ir/operators/test-sext.cpp index e4a16bf87..a376c1a95 100644 --- a/tests/jlm/llvm/ir/operators/test-sext.cpp +++ b/tests/jlm/llvm/ir/operators/test-sext.cpp @@ -18,7 +18,7 @@ test_bitunary_reduction() { auto bt32 = jlm::rvsdg::bittype::Create(32); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = jlm::llvm::sext_op::normal_form(&graph); nf->set_mutable(false); @@ -45,7 +45,7 @@ test_bitbinary_reduction() { auto bt32 = jlm::rvsdg::bittype::Create(32); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = jlm::llvm::sext_op::normal_form(&graph); nf->set_mutable(false); @@ -75,7 +75,7 @@ test_inverse_reduction() auto bt64 = jlm::rvsdg::bittype::Create(64); - jlm::rvsdg::graph graph; + rvsdg::Graph graph; auto nf = jlm::llvm::sext_op::normal_form(&graph); nf->set_mutable(false); diff --git a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp index 3a33b98b9..9aff8228f 100644 --- a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp @@ -23,7 +23,7 @@ TestSuccess() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_mux_reducible(false); @@ -77,7 +77,7 @@ TestWrongNumberOfOperands() auto pt = PointerType::Create(); auto mt = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_mux_reducible(false); @@ -122,7 +122,7 @@ TestLoadWithoutStates() auto valueType = jlm::tests::valuetype::Create(); auto pointerType = PointerType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_mux_reducible(false); diff --git a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp index 85454a147..780f1458d 100644 --- a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp @@ -25,7 +25,7 @@ TestLoadStoreReductionWithDifferentValueOperandType() auto pointerType = PointerType::Create(); auto memoryStateType = MemoryStateType::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_store_reducible(false); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp index d72239fe5..2230801cb 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp @@ -87,7 +87,7 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis } void - AnalyzeImports(const jlm::rvsdg::graph & rvsdg) + AnalyzeImports(const jlm::rvsdg::Graph & rvsdg) { using namespace jlm::llvm; diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index b786bbab6..84ef82b66 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -62,7 +62,7 @@ test_pullin_bottom() auto vt = jlm::tests::valuetype::Create(); auto ct = jlm::rvsdg::ControlType::Create(2); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index b85652c05..d67f4d543 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -111,7 +111,7 @@ test_push_theta_bottom() auto pt = PointerType::Create(); auto ct = jlm::rvsdg::ControlType::Create(2); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index b9561bd46..5f50451fe 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -77,7 +77,7 @@ test_unrollinfo() jlm::rvsdg::bitsub_op sub(32); { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto x = &jlm::tests::GraphImport::Create(graph, bt32, "x"); auto theta = create_theta(slt, add, x, x, x); auto ui = jlm::llvm::unrollinfo::create(theta); @@ -92,7 +92,7 @@ test_unrollinfo() } { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); @@ -146,7 +146,7 @@ test_known_boundaries() jlm::rvsdg::bitsub_op sub(32); { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); @@ -166,7 +166,7 @@ test_known_boundaries() } { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); @@ -186,7 +186,7 @@ test_known_boundaries() } { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); @@ -207,7 +207,7 @@ test_known_boundaries() } { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); nf->set_mutable(false); diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index aa942bb4c..f664db2e6 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -21,7 +21,7 @@ ArgumentNodeMismatch() // Arrange auto valueType = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); @@ -58,7 +58,7 @@ ArgumentInputTypeMismatch() auto valueType = jlm::tests::valuetype::Create(); auto stateType = jlm::tests::statetype::Create(); - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "import"); auto structuralNode = structural_node::create(rvsdg.root(), 1); diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 4a6b80fba..d0eee1f4e 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -20,7 +20,7 @@ IteratorRanges() // Arrange auto valueType = valuetype::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto structuralNode = structural_node::create(graph.root(), 1); auto & subregion = *structuralNode->subregion(0); @@ -91,7 +91,7 @@ Contains() // Arrange auto valueType = valuetype::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = structural_node::create(graph.root(), 1); @@ -124,7 +124,7 @@ static int IsRootRegion() { // Arrange - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto structuralNode = jlm::tests::structural_node::create(graph.root(), 1); @@ -146,7 +146,7 @@ NumRegions_EmptyRvsdg() using namespace jlm::rvsdg; // Arrange - jlm::rvsdg::graph graph; + Graph graph; // Act & Assert assert(Region::NumRegions(*graph.root()) == 1); @@ -165,7 +165,7 @@ NumRegions_NonEmptyRvsdg() using namespace jlm::rvsdg; // Arrange - jlm::rvsdg::graph graph; + const Graph graph; auto structuralNode = jlm::tests::structural_node::create(graph.root(), 4); jlm::tests::structural_node::create(structuralNode->subregion(0), 2); jlm::tests::structural_node::create(structuralNode->subregion(3), 5); @@ -187,7 +187,7 @@ RemoveResultsWhere() using namespace jlm::tests; // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); @@ -242,7 +242,7 @@ RemoveArgumentsWhere() using namespace jlm::tests; // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); @@ -296,7 +296,7 @@ PruneArguments() using namespace jlm::tests; // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); @@ -333,7 +333,7 @@ ToTree_EmptyRvsdg() using namespace jlm::rvsdg; // Arrange - graph rvsdg; + Graph rvsdg; // Act auto tree = Region::ToTree(*rvsdg.root()); @@ -354,7 +354,7 @@ ToTree_EmptyRvsdgWithAnnotations() using namespace jlm::util; // Arrange - graph rvsdg; + Graph rvsdg; AnnotationMap annotationMap; annotationMap.AddAnnotation(rvsdg.root(), Annotation("NumNodes", rvsdg.root()->nnodes())); @@ -379,7 +379,7 @@ ToTree_RvsdgWithStructuralNodes() using namespace jlm::rvsdg; // Arrange - graph rvsdg; + Graph rvsdg; auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 2); jlm::tests::structural_node::create(structuralNode->subregion(0), 1); jlm::tests::structural_node::create(structuralNode->subregion(1), 3); @@ -412,7 +412,7 @@ ToTree_RvsdgWithStructuralNodesAndAnnotations() using namespace jlm::util; // Arrange - graph rvsdg; + Graph rvsdg; auto structuralNode1 = jlm::tests::structural_node::create(rvsdg.root(), 2); auto structuralNode2 = jlm::tests::structural_node::create(structuralNode1->subregion(1), 3); auto subregion2 = structuralNode2->subregion(2); @@ -451,7 +451,7 @@ BottomNodeTests() auto valueType = valuetype::Create(); // Arrange - graph rvsdg; + Graph rvsdg; // Act & Assert // A newly created node without any users should automatically be added to the bottom nodes diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index 18d954a65..ab19a9df0 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -21,7 +21,7 @@ ResultNodeMismatch() // Arrange auto valueType = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); @@ -63,7 +63,7 @@ ResultInputTypeMismatch() auto valueType = jlm::tests::valuetype::Create(); auto stateType = jlm::tests::statetype::Create(); - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto structuralNode = structural_node::create(rvsdg.root(), 1); auto structuralOutput = jlm::rvsdg::structural_output::create(structuralNode, valueType); diff --git a/tests/jlm/rvsdg/TestStructuralNode.cpp b/tests/jlm/rvsdg/TestStructuralNode.cpp index f23c98831..dce708237 100644 --- a/tests/jlm/rvsdg/TestStructuralNode.cpp +++ b/tests/jlm/rvsdg/TestStructuralNode.cpp @@ -15,7 +15,7 @@ TestOutputRemoval() using namespace jlm; // Arrange - rvsdg::graph rvsdg; + rvsdg::Graph rvsdg; auto valueType = tests::valuetype::Create(); auto structuralNode = tests::structural_node::create(rvsdg.root(), 1); diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index a170932da..7690de4d6 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -19,7 +19,7 @@ types_bitstring_arithmetic_test_bitand(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -47,7 +47,7 @@ types_bitstring_arithmetic_test_bitashr(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -86,7 +86,7 @@ types_bitstring_arithmetic_test_bitdifference(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -109,7 +109,7 @@ types_bitstring_arithmetic_test_bitnegate(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto c0 = create_bitconstant(graph.root(), 32, 3); @@ -137,7 +137,7 @@ types_bitstring_arithmetic_test_bitnot(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto c0 = create_bitconstant(graph.root(), 32, 3); @@ -165,7 +165,7 @@ types_bitstring_arithmetic_test_bitor(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -193,7 +193,7 @@ types_bitstring_arithmetic_test_bitproduct(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -222,7 +222,7 @@ types_bitstring_arithmetic_test_bitshiproduct(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -245,7 +245,7 @@ types_bitstring_arithmetic_test_bitshl(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -277,7 +277,7 @@ types_bitstring_arithmetic_test_bitshr(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -309,7 +309,7 @@ types_bitstring_arithmetic_test_bitsmod(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -338,7 +338,7 @@ types_bitstring_arithmetic_test_bitsquotient(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -367,7 +367,7 @@ types_bitstring_arithmetic_test_bitsum(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -396,7 +396,7 @@ types_bitstring_arithmetic_test_bituhiproduct(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -419,7 +419,7 @@ types_bitstring_arithmetic_test_bitumod(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -448,7 +448,7 @@ types_bitstring_arithmetic_test_bituquotient(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -477,7 +477,7 @@ types_bitstring_arithmetic_test_bitxor(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -521,7 +521,7 @@ types_bitstring_comparison_test_bitequal(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -555,7 +555,7 @@ types_bitstring_comparison_test_bitnotequal(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -589,7 +589,7 @@ types_bitstring_comparison_test_bitsgreater(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -627,7 +627,7 @@ types_bitstring_comparison_test_bitsgreatereq(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -668,7 +668,7 @@ types_bitstring_comparison_test_bitsless(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -706,7 +706,7 @@ types_bitstring_comparison_test_bitslesseq(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -747,7 +747,7 @@ types_bitstring_comparison_test_bitugreater(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -785,7 +785,7 @@ types_bitstring_comparison_test_bitugreatereq(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -826,7 +826,7 @@ types_bitstring_comparison_test_bituless(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -864,7 +864,7 @@ types_bitstring_comparison_test_bitulesseq(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); @@ -933,7 +933,7 @@ types_bitstring_test_constant(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto b1 = output::GetNode(*create_bitconstant(graph.root(), "00110011")); auto b2 = output::GetNode(*create_bitconstant(graph.root(), 8, 204)); @@ -969,7 +969,7 @@ types_bitstring_test_normalize(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; bittype bits32(32); auto imp = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "imp"); @@ -1028,7 +1028,7 @@ types_bitstring_test_reduction(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto a = create_bitconstant(graph.root(), "1100"); auto b = create_bitconstant(graph.root(), "1010"); @@ -1082,7 +1082,7 @@ types_bitstring_test_slice_concat(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto base_const1 = create_bitconstant(graph.root(), "00110111"); auto base_const2 = create_bitconstant(graph.root(), "11001000"); diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 24c1de723..235f420c9 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -19,7 +19,7 @@ test_flattened_binary_reduction() /* test paralell reduction */ { - jlm::rvsdg::graph graph; + Graph graph; auto i0 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i1 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); @@ -53,7 +53,7 @@ test_flattened_binary_reduction() /* test linear reduction */ { - jlm::rvsdg::graph graph; + Graph graph; auto i0 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i1 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); diff --git a/tests/jlm/rvsdg/test-bottomup.cpp b/tests/jlm/rvsdg/test-bottomup.cpp index f6138ca8c..ef437d6da 100644 --- a/tests/jlm/rvsdg/test-bottomup.cpp +++ b/tests/jlm/rvsdg/test-bottomup.cpp @@ -12,7 +12,7 @@ static void test_initialization() { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto vtype = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, {}); auto n2 = jlm::tests::test_op::create(graph.root(), {}, { vtype }); @@ -36,7 +36,7 @@ test_initialization() static void test_basic_traversal() { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); @@ -60,7 +60,7 @@ test_basic_traversal() static void test_order_enforcement_traversal() { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); diff --git a/tests/jlm/rvsdg/test-cse.cpp b/tests/jlm/rvsdg/test-cse.cpp index 93fba8b14..929f7fbd4 100644 --- a/tests/jlm/rvsdg/test-cse.cpp +++ b/tests/jlm/rvsdg/test-cse.cpp @@ -14,7 +14,7 @@ test_main() auto t = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto i = &jlm::tests::GraphImport::Create(graph, t, "i"); auto o1 = jlm::tests::test_op::create(graph.root(), {}, { t })->output(0); diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 3e812a861..956aaf5c7 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -16,7 +16,7 @@ test_gamma(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto cmp = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), ""); auto v0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); auto v1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); @@ -53,7 +53,7 @@ test_predicate_reduction(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; GammaOperation::normal_form(&graph)->set_predicate_reduction(true); bittype bits2(2); @@ -87,7 +87,7 @@ test_invariant_reduction(void) auto vtype = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; GammaOperation::normal_form(&graph)->set_invariant_reduction(true); auto pred = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), ""); @@ -112,7 +112,7 @@ test_control_constant_reduction() { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; GammaOperation::normal_form(&graph)->set_control_constant_reduction(true); auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(1), "x"); @@ -150,7 +150,7 @@ test_control_constant_reduction2() { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; GammaOperation::normal_form(&graph)->set_control_constant_reduction(true); auto import = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), "import"); @@ -182,7 +182,7 @@ TestRemoveGammaOutputsWhere() using namespace jlm::rvsdg; // Arrange - jlm::rvsdg::graph rvsdg; + Graph rvsdg; auto vt = jlm::tests::valuetype::Create(); ControlType ct(2); @@ -244,7 +244,7 @@ TestPruneOutputs() using namespace jlm::rvsdg; // Arrange - jlm::rvsdg::graph rvsdg; + Graph rvsdg; auto vt = jlm::tests::valuetype::Create(); ControlType ct(2); @@ -293,7 +293,7 @@ TestIsInvariant() using namespace jlm::rvsdg; // Arrange - jlm::rvsdg::graph rvsdg; + Graph rvsdg; auto vt = jlm::tests::valuetype::Create(); ControlType ct(2); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 9fb5dab5d..e74d6eca6 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -33,7 +33,7 @@ test_recursive_prune() auto t = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto imp = &jlm::tests::GraphImport::Create(graph, t, "i"); auto n1 = jlm::tests::test_op::create(graph.root(), { imp }, { t }); @@ -71,7 +71,7 @@ JLM_UNIT_TEST_REGISTER("rvsdg/test-graph_prune", test_recursive_prune) static int test_empty_graph_pruning(void) { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; jlm::rvsdg::view(graph.root(), stdout); @@ -93,7 +93,7 @@ test_prune_replace(void) auto type = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0) }, { type }); @@ -122,7 +122,7 @@ test_graph(void) auto type = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); assert(n1); @@ -146,7 +146,7 @@ Copy() // Arrange auto valueType = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto & argument = TestGraphArgument::Create(*graph.root(), nullptr, valueType); auto node = test_op::create(graph.root(), { &argument }, { valueType }); TestGraphResult::Create(*node->output(0), nullptr); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index cfbd20a03..53445de38 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -18,7 +18,7 @@ test_node_copy(void) auto stype = jlm::tests::statetype::Create(); auto vtype = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto s = &jlm::tests::GraphImport::Create(graph, stype, ""); auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); @@ -96,7 +96,7 @@ test_node_depth() { auto vt = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto null = jlm::tests::test_op::create(graph.root(), {}, { vt }); @@ -123,7 +123,7 @@ static void TestRemoveOutputsWhere() { // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); auto & node1 = @@ -186,7 +186,7 @@ static void TestRemoveInputsWhere() { // Arrange - jlm::rvsdg::graph rvsdg; + jlm::rvsdg::Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index 77a618a1c..648b91c59 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -20,7 +20,7 @@ test_mux_mux_reduction() auto st = jlm::tests::statetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::mux_op)); auto mnf = static_cast(nf); mnf->set_mutable(false); @@ -60,7 +60,7 @@ test_multiple_origin_reduction() auto st = jlm::tests::statetype::Create(); - jlm::rvsdg::graph graph; + Graph graph; auto nf = graph.node_normal_form(typeid(jlm::rvsdg::mux_op)); auto mnf = static_cast(nf); mnf->set_mutable(false); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index d63ed264c..11a6cddf2 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -15,7 +15,7 @@ TestThetaCreation() { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto t = jlm::tests::valuetype::Create(); auto imp1 = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), "imp1"); @@ -54,7 +54,7 @@ TestRemoveThetaOutputsWhere() using namespace jlm::rvsdg; // Arrange - graph rvsdg; + Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); @@ -104,7 +104,7 @@ TestPruneThetaOutputs() using namespace jlm::rvsdg; // Arrange - graph rvsdg; + Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); @@ -139,7 +139,7 @@ TestRemoveThetaInputsWhere() using namespace jlm::rvsdg; // Arrange - graph rvsdg; + Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); @@ -195,7 +195,7 @@ TestPruneThetaInputs() using namespace jlm::rvsdg; // Arrange - graph rvsdg; + Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); auto ctl = &jlm::tests::GraphImport::Create(rvsdg, ControlType::Create(2), "ctl"); diff --git a/tests/jlm/rvsdg/test-topdown.cpp b/tests/jlm/rvsdg/test-topdown.cpp index 3e56c8f3c..45dfa7bdf 100644 --- a/tests/jlm/rvsdg/test-topdown.cpp +++ b/tests/jlm/rvsdg/test-topdown.cpp @@ -14,7 +14,7 @@ test_initialization() { auto vtype = jlm::tests::valuetype::Create(); - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto i = &jlm::tests::GraphImport::Create(graph, vtype, "i"); auto constant = jlm::tests::test_op::create(graph.root(), {}, { vtype }); @@ -46,7 +46,7 @@ test_initialization() static void test_basic_traversal() { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); @@ -72,7 +72,7 @@ test_basic_traversal() static void test_order_enforcement_traversal() { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); @@ -99,7 +99,7 @@ test_order_enforcement_traversal() static void test_traversal_insertion() { - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); @@ -154,7 +154,7 @@ test_traversal_insertion() static void test_mutable_traverse() { - auto test = [](jlm::rvsdg::graph * graph, + auto test = [](jlm::rvsdg::Graph * graph, jlm::rvsdg::node * n1, jlm::rvsdg::node * n2, jlm::rvsdg::node * n3) @@ -179,7 +179,7 @@ test_mutable_traverse() assert(seen_n3); }; - jlm::rvsdg::graph graph; + jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); auto n2 = jlm::tests::test_op::create(graph.root(), {}, { type }); diff --git a/tests/jlm/rvsdg/test-typemismatch.cpp b/tests/jlm/rvsdg/test-typemismatch.cpp index f3a009025..8098e2bbb 100644 --- a/tests/jlm/rvsdg/test-typemismatch.cpp +++ b/tests/jlm/rvsdg/test-typemismatch.cpp @@ -15,7 +15,7 @@ test_main(void) { using namespace jlm::rvsdg; - jlm::rvsdg::graph graph; + Graph graph; auto type = jlm::tests::statetype::Create(); auto value_type = jlm::tests::valuetype::Create(); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 517dfdbdb..1a1eba15c 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -26,7 +26,7 @@ namespace jlm::tests */ class GraphImport final : public rvsdg::GraphImport { - GraphImport(rvsdg::graph & graph, std::shared_ptr type, std::string name) + GraphImport(rvsdg::Graph & graph, std::shared_ptr type, std::string name) : rvsdg::GraphImport(graph, std::move(type), std::move(name)) {} @@ -35,7 +35,7 @@ class GraphImport final : public rvsdg::GraphImport Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; static GraphImport & - Create(rvsdg::graph & graph, std::shared_ptr type, std::string name) + Create(rvsdg::Graph & graph, std::shared_ptr type, std::string name) { auto graphImport = new GraphImport(graph, std::move(type), std::move(name)); graph.root()->append_argument(graphImport); From 8879117a63e04eaf468c2786bcfdd522c81a2d58 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 25 Nov 2024 09:24:13 +0100 Subject: [PATCH 120/170] Rename structural_output class to StructuralOutput (#668) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 12 +++++----- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 5 ++--- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 ++-- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 ++-- jlm/hls/ir/hls.cpp | 10 ++++----- jlm/hls/ir/hls.hpp | 10 ++++----- jlm/hls/opt/cne.cpp | 8 +++---- jlm/hls/util/view.cpp | 4 ++-- jlm/llvm/ir/RvsdgModule.cpp | 2 +- jlm/llvm/ir/RvsdgModule.hpp | 2 +- jlm/llvm/ir/operators/Phi.cpp | 2 +- jlm/llvm/ir/operators/Phi.hpp | 8 +++---- jlm/llvm/ir/operators/delta.cpp | 2 +- jlm/llvm/ir/operators/delta.hpp | 8 +++---- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/ir/operators/lambda.hpp | 8 +++---- jlm/llvm/opt/cne.cpp | 8 +++---- jlm/llvm/opt/inversion.cpp | 4 ++-- jlm/llvm/opt/pull.cpp | 2 +- jlm/rvsdg/gamma.cpp | 8 +++---- jlm/rvsdg/gamma.hpp | 8 +++---- jlm/rvsdg/region.cpp | 8 +++---- jlm/rvsdg/region.hpp | 14 ++++++------ jlm/rvsdg/structural-node.cpp | 10 ++++----- jlm/rvsdg/structural-node.hpp | 22 +++++++++---------- jlm/rvsdg/theta.cpp | 4 ++-- jlm/rvsdg/theta.hpp | 10 ++++----- .../rvsdg2rhls/MemoryConverterTests.cpp | 2 +- tests/jlm/rvsdg/ResultTests.cpp | 4 ++-- tests/jlm/rvsdg/TestStructuralNode.cpp | 10 ++++----- tests/jlm/rvsdg/test-graph.cpp | 2 +- tests/jlm/rvsdg/test-nodes.cpp | 4 ++-- tests/test-operation.cpp | 6 ++--- tests/test-operation.hpp | 21 ++++++++---------- 37 files changed, 120 insertions(+), 124 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 3520d76d6..62c4bfa65 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -2514,7 +2514,7 @@ RhlsToFirrtlConverter::TraceArgument(rvsdg::RegionArgument * arg) // Need to find the source of the outer regions argument return TraceArgument(o); } - else if (auto o = dynamic_cast(origin)) + else if (auto o = dynamic_cast(origin)) { // Check if we the input of one loop_node is connected to the output of another // StructuralNode, i.e., if the input is connected to the output of another loop_node @@ -2593,7 +2593,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB { origin = TraceArgument(o); } - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { // Need to trace through the region to find the source node origin = TraceStructuralOutput(o); @@ -2658,7 +2658,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB { origin = TraceArgument(o); } - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { // Need to trace through the region to find the source node origin = TraceStructuralOutput(o); @@ -2701,7 +2701,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB // We have found the source output output = o; } - else if (auto o = dynamic_cast(origin)) + else if (auto o = dynamic_cast(origin)) { // Need to trace through the region to find the source node output = TraceStructuralOutput(o); @@ -2778,7 +2778,7 @@ RhlsToFirrtlConverter::createInstances( // Trace a structural output back to the "node" generating the value // Returns the output of the node jlm::rvsdg::simple_output * -RhlsToFirrtlConverter::TraceStructuralOutput(jlm::rvsdg::structural_output * output) +RhlsToFirrtlConverter::TraceStructuralOutput(rvsdg::StructuralOutput * output) { auto node = output->node(); @@ -2790,7 +2790,7 @@ RhlsToFirrtlConverter::TraceStructuralOutput(jlm::rvsdg::structural_output * out } JLM_ASSERT(output->results.size() == 1); auto origin = output->results.begin().ptr()->origin(); - if (auto o = dynamic_cast(origin)) + if (auto o = dynamic_cast(origin)) { // Need to trace the output of the nested structural node return TraceStructuralOutput(o); diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index fee8ea292..895608373 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -261,7 +261,7 @@ class RhlsToFirrtlConverter : public BaseHLS jlm::rvsdg::output * TraceArgument(rvsdg::RegionArgument * arg); jlm::rvsdg::simple_output * - TraceStructuralOutput(jlm::rvsdg::structural_output * out); + TraceStructuralOutput(rvsdg::StructuralOutput * out); void InitializeMemReq(circt::firrtl::FModuleOp module); diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 21434994a..f59936e10 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -90,7 +90,7 @@ BaseHLS::get_port_name(jlm::rvsdg::output * port) { result += "o"; } - else if (dynamic_cast(port)) + else if (dynamic_cast(port)) { result += "so"; } diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index d647f5a1a..c6559f424 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -295,8 +295,7 @@ decouple_load( continue; } auto new_res_origin = smap.lookup(res->origin()); - auto new_state_output = - jlm::rvsdg::structural_output::create(new_loop, new_res_origin->Type()); + auto new_state_output = rvsdg::StructuralOutput::create(new_loop, new_res_origin->Type()); ExitResult::Create(*new_res_origin, *new_state_output); res->output()->divert_users(new_state_output); } @@ -328,7 +327,7 @@ decouple_load( // create output for address auto load_addr = gate_out[0]; - auto addr_output = jlm::rvsdg::structural_output::create(new_loop, load_addr->Type()); + auto addr_output = rvsdg::StructuralOutput::create(new_loop, load_addr->Type()); ExitResult::Create(*load_addr, *addr_output); // trace and remove loop input for mem data reponse auto mem_data_loop_out = new_load->input(new_load->ninputs() - 1)->origin(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 7ea04c483..7aa582372 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -47,7 +47,7 @@ jlm::hls::route_request(rvsdg::Region * target, jlm::rvsdg::output * request) { auto ln = dynamic_cast(request->region()->node()); JLM_ASSERT(ln); - auto output = jlm::rvsdg::structural_output::create(ln, request->Type()); + auto output = rvsdg::StructuralOutput::create(ln, request->Type()); ExitResult::Create(*request, *output); return route_request(target, output); } @@ -134,7 +134,7 @@ trace_call(const jlm::rvsdg::output * output) } return trace_call(argument->input()); } - else if (auto so = dynamic_cast(output)) + else if (auto so = dynamic_cast(output)) { for (auto & r : so->results) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index a8a087e36..c395ff317 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -95,7 +95,7 @@ find_load_store( } } -jlm::rvsdg::structural_output * +jlm::rvsdg::StructuralOutput * find_loop_output(jlm::rvsdg::structural_input * sti) { auto sti_arg = sti->arguments.first(); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 325e7c235..532e52209 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -148,7 +148,7 @@ inline_calls(rvsdg::Region * region) else if (dynamic_cast(&(node->operation()))) { auto traced = jlm::hls::trace_call(node->input(0)); - auto so = dynamic_cast(traced); + auto so = dynamic_cast(traced); if (!so) { if (auto graphImport = dynamic_cast(traced)) @@ -162,7 +162,7 @@ inline_calls(rvsdg::Region * region) } } JLM_ASSERT(rvsdg::is(so->node())); - auto ln = dynamic_cast(traced)->node(); + auto ln = dynamic_cast(traced)->node(); llvm::inlineCall( dynamic_cast(node), dynamic_cast(ln)); diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index f4116c8a2..8be111665 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -51,7 +51,7 @@ backedge_argument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * i } backedge_result & -backedge_result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +backedge_result::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { JLM_ASSERT(output == nullptr); return *backedge_result::create(&origin); @@ -60,16 +60,16 @@ backedge_result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * ou ExitResult::~ExitResult() noexcept = default; ExitResult & -ExitResult::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +ExitResult::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { return Create(origin, *output); } -jlm::rvsdg::structural_output * +rvsdg::StructuralOutput * loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { auto input = jlm::rvsdg::structural_input::create(this, origin, origin->Type()); - auto output = jlm::rvsdg::structural_output::create(this, origin->Type()); + auto output = rvsdg::StructuralOutput::create(this, origin->Type()); auto & argument_in = EntryArgument::Create(*subregion(), *input, origin->Type()); auto argument_loop = add_backedge(origin->Type()); @@ -117,7 +117,7 @@ loop_node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const } for (size_t i = 0; i < noutputs(); ++i) { - auto out = jlm::rvsdg::structural_output::create(loop, output(i)->Type()); + auto out = rvsdg::StructuralOutput::create(loop, output(i)->Type()); smap.insert(output(i), out); smap.insert(output(i), out); } diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 480d2b384..604fed0ed 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -692,7 +692,7 @@ class backedge_result : public rvsdg::RegionResult } backedge_result & - Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; private: backedge_result(jlm::rvsdg::output * origin) @@ -722,7 +722,7 @@ class ExitResult final : public rvsdg::RegionResult ~ExitResult() noexcept override; private: - ExitResult(rvsdg::output & origin, rvsdg::structural_output & output) + ExitResult(rvsdg::output & origin, rvsdg::StructuralOutput & output) : rvsdg::RegionResult(origin.region(), &origin, &output, origin.Type()) { JLM_ASSERT(rvsdg::is(origin.region()->node())); @@ -730,12 +730,12 @@ class ExitResult final : public rvsdg::RegionResult public: ExitResult & - Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; // FIXME: This should not be public, but we currently still have some transformations that use // this one. Make it eventually private. static ExitResult & - Create(rvsdg::output & origin, rvsdg::structural_output & output) + Create(rvsdg::output & origin, rvsdg::StructuralOutput & output) { auto result = new ExitResult(origin, output); origin.region()->append_result(result); @@ -786,7 +786,7 @@ class loop_node final : public rvsdg::StructuralNode backedge_argument * add_backedge(std::shared_ptr type); - jlm::rvsdg::structural_output * + rvsdg::StructuralOutput * add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer = nullptr); jlm::rvsdg::output * diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 7f84b0bea..a9ac6ad66 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -202,8 +202,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & auto n2 = jlm::rvsdg::output::GetNode(*o2); if (is(n1) && is(n2) && n1 == n2) { - auto so1 = static_cast(o1); - auto so2 = static_cast(o2); + auto so1 = static_cast(o1); + auto so2 = static_cast(o2); vs.insert(o1, o2); auto r1 = so1->results.first(); auto r2 = so2->results.first(); @@ -225,8 +225,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (rvsdg::is(n1) && n1 == n2) { - auto so1 = static_cast(o1); - auto so2 = static_cast(o2); + auto so1 = static_cast(o1); + auto so2 = static_cast(o2); auto r1 = so1->results.begin(); auto r2 = so2->results.begin(); for (; r1 != so1->results.end(); r1++, r2++) diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 7107813f5..4fafa59fb 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -44,7 +44,7 @@ get_dot_name(jlm::rvsdg::output * output) { return jlm::util::strfmt(get_dot_name(no->node()), ":", "o", hex((intptr_t)output)); } - else if (dynamic_cast(output)) + else if (dynamic_cast(output)) { return jlm::util::strfmt("so", hex((intptr_t)output), ":", "default"); } @@ -113,7 +113,7 @@ structural_input_to_dot(jlm::rvsdg::structural_input * structuralInput) } std::string -structural_output_to_dot(jlm::rvsdg::structural_output * structuralOutput) +structural_output_to_dot(rvsdg::StructuralOutput * structuralOutput) { auto display_name = jlm::util::strfmt("so", structuralOutput->index()); auto dot_name = jlm::util::strfmt("so", hex((intptr_t)structuralOutput)); diff --git a/jlm/llvm/ir/RvsdgModule.cpp b/jlm/llvm/ir/RvsdgModule.cpp index c927f43c8..994bbb501 100644 --- a/jlm/llvm/ir/RvsdgModule.cpp +++ b/jlm/llvm/ir/RvsdgModule.cpp @@ -15,7 +15,7 @@ GraphImport::Copy(rvsdg::Region & region, rvsdg::structural_input * input) } GraphExport & -GraphExport::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +GraphExport::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { JLM_ASSERT(output == nullptr); return GraphExport::Create(origin, Name()); diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 1dc002039..370a5db1e 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -78,7 +78,7 @@ class GraphExport final : public rvsdg::GraphExport public: GraphExport & - Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; static GraphExport & Create(rvsdg::output & origin, std::string name) diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 316fa1c34..5c2c6b6ef 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -193,7 +193,7 @@ rvresult::~rvresult() {} rvresult & -rvresult::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +rvresult::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { auto phiOutput = util::AssertedCast(output); return *rvresult::create(origin.region(), &origin, phiOutput, origin.Type()); diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 568a81e0c..31615a6e5 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -598,7 +598,7 @@ class cvinput final : public jlm::rvsdg::structural_input class rvargument; class rvresult; -class rvoutput final : public jlm::rvsdg::structural_output +class rvoutput final : public rvsdg::StructuralOutput { friend class phi::builder; @@ -607,7 +607,7 @@ class rvoutput final : public jlm::rvsdg::structural_output private: rvoutput(phi::node * node, rvargument * argument, std::shared_ptr type) - : structural_output(node, std::move(type)), + : StructuralOutput(node, std::move(type)), argument_(argument) {} @@ -640,7 +640,7 @@ class rvoutput final : public jlm::rvsdg::structural_output phi::node * node() const noexcept { - return static_cast(structural_output::node()); + return static_cast(StructuralOutput::node()); } private: @@ -779,7 +779,7 @@ class rvresult final : public rvsdg::RegionResult operator=(rvresult &&) = delete; rvresult & - Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; static rvresult * create( diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index 8480cb384..c223ed48c 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -185,7 +185,7 @@ result::~result() {} result & -result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +result::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { JLM_ASSERT(output == nullptr); return *result::create(&origin); diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 70f1f324d..b458e6e62 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -377,7 +377,7 @@ class node::cvconstiterator final : public rvsdg::input::constiterator /** \brief Delta output */ -class output final : public rvsdg::structural_output +class output final : public rvsdg::StructuralOutput { friend ::jlm::llvm::delta::node; @@ -385,7 +385,7 @@ class output final : public rvsdg::structural_output ~output() override; output(delta::node * node, std::shared_ptr type) - : structural_output(node, std::move(type)) + : StructuralOutput(node, std::move(type)) {} private: @@ -400,7 +400,7 @@ class output final : public rvsdg::structural_output delta::node * node() const noexcept { - return static_cast(structural_output::node()); + return static_cast(StructuralOutput::node()); } }; @@ -447,7 +447,7 @@ class result final : public rvsdg::RegionResult ~result() override; result & - Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; private: explicit result(rvsdg::output * origin) diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 7a138b680..5658c63a3 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -440,7 +440,7 @@ cvargument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) result::~result() = default; result & -result::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +result::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { JLM_ASSERT(output == nullptr); return *result::create(&origin); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 3d67d22c8..0fbe53707 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -477,7 +477,7 @@ class node::cvconstiterator final : public jlm::rvsdg::input::constiterator type) - : structural_output(node, std::move(type)) + : StructuralOutput(node, std::move(type)) {} private: @@ -500,7 +500,7 @@ class output final : public jlm::rvsdg::structural_output lambda::node * node() const noexcept { - return jlm::util::AssertedCast(structural_output::node()); + return jlm::util::AssertedCast(StructuralOutput::node()); } }; @@ -636,7 +636,7 @@ class result final : public rvsdg::RegionResult ~result() override; result & - Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; private: explicit result(jlm::rvsdg::output * origin) diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 82f6a58b0..c3262444f 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -199,8 +199,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & auto n2 = jlm::rvsdg::output::GetNode(*o2); if (is(n1) && is(n2) && n1 == n2) { - auto so1 = static_cast(o1); - auto so2 = static_cast(o2); + auto so1 = static_cast(o1); + auto so2 = static_cast(o2); vs.insert(o1, o2); auto r1 = so1->results.first(); auto r2 = so2->results.first(); @@ -209,8 +209,8 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (rvsdg::is(n1) && n1 == n2) { - auto so1 = static_cast(o1); - auto so2 = static_cast(o2); + auto so1 = static_cast(o1); + auto so2 = static_cast(o2); auto r1 = so1->results.begin(); auto r2 = so2->results.begin(); for (; r1 != so1->results.end(); r1++, r2++) diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index cfa3f0a4d..d7546e445 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -130,10 +130,10 @@ to_argument(jlm::rvsdg::output * output) return dynamic_cast(output); } -static jlm::rvsdg::structural_output * +static rvsdg::StructuralOutput * to_structural_output(jlm::rvsdg::output * output) { - return dynamic_cast(output); + return dynamic_cast(output); } static void diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index c94c5caae..6d200d232 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -180,7 +180,7 @@ pullin_bottom(rvsdg::GammaNode * gamma) auto input = node->input(i); if (jlm::rvsdg::output::GetNode(*input->origin()) == gamma) { - auto output = static_cast(input->origin()); + auto output = static_cast(input->origin()); operands.push_back(gamma->subregion(r)->result(output->index())->origin()); } else diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 7bcf3b9ee..9a5fd5197 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -69,7 +69,7 @@ perform_invariant_reduction(GammaNode * gamma) return was_normalized; } -static std::unordered_set +static std::unordered_set is_control_constant_reducible(GammaNode * gamma) { /* check gamma predicate */ @@ -87,7 +87,7 @@ is_control_constant_reducible(GammaNode * gamma) return {}; /* check for constants */ - std::unordered_set outputs; + std::unordered_set outputs; for (auto it = gamma->begin_exitvar(); it != gamma->end_exitvar(); it++) { if (!is_ctltype(it->type())) @@ -112,7 +112,7 @@ is_control_constant_reducible(GammaNode * gamma) } static void -perform_control_constant_reduction(std::unordered_set & outputs) +perform_control_constant_reduction(std::unordered_set & outputs) { auto gamma = static_cast((*outputs.begin())->node()); auto origin = static_cast(gamma->predicate()->origin()); @@ -383,7 +383,7 @@ GammaArgument::Copy(rvsdg::Region & region, structural_input * input) GammaResult::~GammaResult() noexcept = default; GammaResult & -GammaResult::Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) +GammaResult::Copy(rvsdg::output & origin, StructuralOutput * output) { auto gammaOutput = util::AssertedCast(output); return GammaResult::Create(*origin.region(), origin, *gammaOutput); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index e450448e3..2f278ea60 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -378,7 +378,7 @@ class GammaInput final : public structural_input /* gamma output */ -class GammaOutput final : public structural_output +class GammaOutput final : public StructuralOutput { friend GammaNode; @@ -386,13 +386,13 @@ class GammaOutput final : public structural_output ~GammaOutput() noexcept override; GammaOutput(GammaNode * node, std::shared_ptr type) - : structural_output(node, std::move(type)) + : StructuralOutput(node, std::move(type)) {} GammaNode * node() const noexcept { - return static_cast(structural_output::node()); + return static_cast(StructuralOutput::node()); } inline result_list::iterator @@ -500,7 +500,7 @@ class GammaResult final : public RegionResult {} GammaResult & - Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, StructuralOutput * output) override; static GammaResult & Create(rvsdg::Region & region, rvsdg::output & origin, GammaOutput & gammaOutput) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 2605c861b..c53aad087 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -77,7 +77,7 @@ RegionResult::~RegionResult() noexcept RegionResult::RegionResult( rvsdg::Region * region, jlm::rvsdg::output * origin, - jlm::rvsdg::structural_output * output, + StructuralOutput * output, std::shared_ptr type) : input(origin, region, std::move(type)), output_(output) @@ -103,7 +103,7 @@ RegionResult::GetOwner() const noexcept } RegionResult & -RegionResult::Copy(rvsdg::output & origin, structural_output * output) +RegionResult::Copy(rvsdg::output & origin, StructuralOutput * output) { return RegionResult::Create(*origin.region(), origin, output, origin.Type()); } @@ -112,7 +112,7 @@ RegionResult & RegionResult::Create( rvsdg::Region & region, rvsdg::output & origin, - structural_output * output, + StructuralOutput * output, std::shared_ptr type) { JLM_ASSERT(origin.region() == ®ion); @@ -331,7 +331,7 @@ Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool auto oldResult = result(n); auto newOrigin = smap.lookup(oldResult->origin()); JLM_ASSERT(newOrigin != nullptr); - auto newOutput = dynamic_cast(smap.lookup(oldResult->output())); + auto newOutput = dynamic_cast(smap.lookup(oldResult->output())); oldResult->Copy(*newOrigin, newOutput); } } diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 15f76792d..38bb9c3f6 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -29,7 +29,7 @@ class simple_op; class structural_input; class StructuralNode; class structural_op; -class structural_output; +class StructuralOutput; class SubstitutionMap; /** @@ -120,7 +120,7 @@ class RegionArgument : public output * can be mapped back to the region arguments or the corresponding structural outputs * throughout the execution, but the concrete semantics of this mapping * depends on the structural node the region is part of. A region result is either linked - * with a \ref structural_output or is a standalone result. + * with a \ref StructuralOutput or is a standalone result. */ class RegionResult : public input { @@ -135,7 +135,7 @@ class RegionResult : public input RegionResult( rvsdg::Region * region, rvsdg::output * origin, - structural_output * output, + StructuralOutput * output, std::shared_ptr type); RegionResult(const RegionResult &) = delete; @@ -148,7 +148,7 @@ class RegionResult : public input RegionResult & operator=(RegionResult &&) = delete; - [[nodiscard]] structural_output * + [[nodiscard]] StructuralOutput * output() const noexcept { return output_; @@ -164,7 +164,7 @@ class RegionResult : public input * @return A reference to the copied result. */ virtual RegionResult & - Copy(rvsdg::output & origin, structural_output * output); + Copy(rvsdg::output & origin, StructuralOutput * output); [[nodiscard]] std::variant GetOwner() const noexcept override; @@ -194,11 +194,11 @@ class RegionResult : public input Create( rvsdg::Region & region, rvsdg::output & origin, - structural_output * output, + StructuralOutput * output, std::shared_ptr type); private: - structural_output * output_; + StructuralOutput * output_; }; /** diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index 3b68fb61a..b2acd782c 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -31,14 +31,14 @@ structural_input::structural_input( /* structural output */ -structural_output::~structural_output() noexcept +StructuralOutput::~StructuralOutput() noexcept { JLM_ASSERT(results.empty()); on_output_destroy(this); } -structural_output::structural_output(StructuralNode * node, std::shared_ptr type) +StructuralOutput::StructuralOutput(StructuralNode * node, std::shared_ptr type) : node_output(node, std::move(type)) { on_output_create(this); @@ -83,8 +83,8 @@ StructuralNode::append_input(std::unique_ptr input) return static_cast(node::add_input(std::move(sinput))); } -structural_output * -StructuralNode::append_output(std::unique_ptr output) +StructuralOutput * +StructuralNode::append_output(std::unique_ptr output) { if (output->node() != this) throw jlm::util::error("Appending output to wrong node."); @@ -95,7 +95,7 @@ StructuralNode::append_output(std::unique_ptr output) return this->output(index); auto soutput = std::unique_ptr(output.release()); - return static_cast(node::add_output(std::move(soutput))); + return static_cast(add_output(std::move(soutput))); } } diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 93164b18c..e4e986e65 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -16,7 +16,7 @@ namespace jlm::rvsdg class structural_input; class structural_op; -class structural_output; +class StructuralOutput; class StructuralNode : public node { @@ -47,14 +47,14 @@ class StructuralNode : public node inline jlm::rvsdg::structural_input * input(size_t index) const noexcept; - inline jlm::rvsdg::structural_output * + [[nodiscard]] inline StructuralOutput * output(size_t index) const noexcept; structural_input * append_input(std::unique_ptr input); - structural_output * - append_output(std::unique_ptr output); + StructuralOutput * + append_output(std::unique_ptr output); using node::RemoveInput; @@ -105,19 +105,19 @@ class structural_input : public node_input typedef jlm::util::intrusive_list result_list; -class structural_output : public node_output +class StructuralOutput : public node_output { friend StructuralNode; public: - virtual ~structural_output() noexcept; + ~StructuralOutput() noexcept override; - structural_output(StructuralNode * node, std::shared_ptr type); + StructuralOutput(StructuralNode * node, std::shared_ptr type); - static structural_output * + static StructuralOutput * create(StructuralNode * node, std::shared_ptr type) { - auto output = std::make_unique(node, std::move(type)); + auto output = std::make_unique(node, std::move(type)); return node->append_output(std::move(output)); } @@ -138,10 +138,10 @@ StructuralNode::input(size_t index) const noexcept return static_cast(node::input(index)); } -inline jlm::rvsdg::structural_output * +inline StructuralOutput * StructuralNode::output(size_t index) const noexcept { - return static_cast(node::output(index)); + return static_cast(node::output(index)); } template diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index f940963d8..62341940a 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -59,7 +59,7 @@ ThetaArgument::Copy(rvsdg::Region & region, structural_input * input) ThetaResult::~ThetaResult() noexcept = default; ThetaResult & -ThetaResult::Copy(rvsdg::output & origin, structural_output * output) +ThetaResult::Copy(rvsdg::output & origin, StructuralOutput * output) { auto thetaOutput = util::AssertedCast(output); return ThetaResult::Create(origin, *thetaOutput); @@ -68,7 +68,7 @@ ThetaResult::Copy(rvsdg::output & origin, structural_output * output) ThetaPredicateResult::~ThetaPredicateResult() noexcept = default; ThetaPredicateResult & -ThetaPredicateResult::Copy(rvsdg::output & origin, structural_output * output) +ThetaPredicateResult::Copy(rvsdg::output & origin, StructuralOutput * output) { JLM_ASSERT(output == nullptr); return ThetaPredicateResult::Create(origin); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 33348be3f..a578c6558 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -292,7 +292,7 @@ is_invariant(const ThetaInput * input) noexcept return input->result()->origin() == input->argument(); } -class ThetaOutput final : public structural_output +class ThetaOutput final : public StructuralOutput { friend ThetaNode; friend ThetaInput; @@ -301,14 +301,14 @@ class ThetaOutput final : public structural_output ~ThetaOutput() noexcept override; ThetaOutput(ThetaNode * node, const std::shared_ptr type) - : structural_output(node, std::move(type)), + : StructuralOutput(node, std::move(type)), input_(nullptr) {} ThetaNode * node() const noexcept { - return static_cast(structural_output::node()); + return static_cast(StructuralOutput::node()); } [[nodiscard]] ThetaInput * @@ -374,7 +374,7 @@ class ThetaResult final : public RegionResult ~ThetaResult() noexcept override; ThetaResult & - Copy(rvsdg::output & origin, jlm::rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, StructuralOutput * output) override; private: ThetaResult(rvsdg::output & origin, ThetaOutput & thetaOutput) @@ -403,7 +403,7 @@ class ThetaPredicateResult final : public RegionResult ~ThetaPredicateResult() noexcept override; ThetaPredicateResult & - Copy(rvsdg::output & origin, structural_output * output) override; + Copy(rvsdg::output & origin, StructuralOutput * output) override; private: explicit ThetaPredicateResult(rvsdg::output & origin) diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index 1e9fd4473..eb25ffd21 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -356,7 +356,7 @@ TestThetaLoad() // HLS_LOOP Node auto loopOutput = - jlm::util::AssertedCast(requestNode->input(0)->origin()); + jlm::util::AssertedCast(requestNode->input(0)->origin()); auto loopNode = jlm::util::AssertedCast(loopOutput->node()); jlm::util::AssertedCast(&loopNode->operation()); // Loop Result diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index ab19a9df0..ce8c0ca51 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -31,7 +31,7 @@ ResultNodeMismatch() auto & argument = TestGraphArgument::Create(*structuralNode1->subregion(0), structuralInput, valueType); - auto structuralOutput = structural_output::create(structuralNode1, valueType); + auto structuralOutput = StructuralOutput::create(structuralNode1, valueType); // Act bool outputErrorHandlerCalled = false; @@ -66,7 +66,7 @@ ResultInputTypeMismatch() jlm::rvsdg::Graph rvsdg; auto structuralNode = structural_node::create(rvsdg.root(), 1); - auto structuralOutput = jlm::rvsdg::structural_output::create(structuralNode, valueType); + auto structuralOutput = jlm::rvsdg::StructuralOutput::create(structuralNode, valueType); // Act & Assert bool exceptionWasCaught = false; diff --git a/tests/jlm/rvsdg/TestStructuralNode.cpp b/tests/jlm/rvsdg/TestStructuralNode.cpp index dce708237..9b40157a0 100644 --- a/tests/jlm/rvsdg/TestStructuralNode.cpp +++ b/tests/jlm/rvsdg/TestStructuralNode.cpp @@ -19,11 +19,11 @@ TestOutputRemoval() auto valueType = tests::valuetype::Create(); auto structuralNode = tests::structural_node::create(rvsdg.root(), 1); - auto output0 = rvsdg::structural_output::create(structuralNode, valueType); - auto output1 = rvsdg::structural_output::create(structuralNode, valueType); - auto output2 = rvsdg::structural_output::create(structuralNode, valueType); - auto output3 = rvsdg::structural_output::create(structuralNode, valueType); - auto output4 = rvsdg::structural_output::create(structuralNode, valueType); + auto output0 = rvsdg::StructuralOutput::create(structuralNode, valueType); + auto output1 = rvsdg::StructuralOutput::create(structuralNode, valueType); + auto output2 = rvsdg::StructuralOutput::create(structuralNode, valueType); + auto output3 = rvsdg::StructuralOutput::create(structuralNode, valueType); + auto output4 = rvsdg::StructuralOutput::create(structuralNode, valueType); // Act & Assert assert(structuralNode->noutputs() == 5); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index e74d6eca6..03dbc654a 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -45,7 +45,7 @@ test_recursive_prune() auto n4 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); auto n5 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); TestGraphResult::Create(*n4->output(0), nullptr); - auto o1 = structural_output::create(n3, t); + auto o1 = StructuralOutput::create(n3, t); auto n6 = jlm::tests::structural_node::create(n3->subregion(0), 1); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 53445de38..2e136f20b 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -25,8 +25,8 @@ test_node_copy(void) auto n1 = jlm::tests::structural_node::create(graph.root(), 3); auto i1 = structural_input::create(n1, s, stype); auto i2 = structural_input::create(n1, v, vtype); - auto o1 = structural_output::create(n1, stype); - auto o2 = structural_output::create(n1, vtype); + auto o1 = StructuralOutput::create(n1, stype); + auto o2 = StructuralOutput::create(n1, vtype); auto & a1 = TestGraphArgument::Create(*n1->subregion(0), i1, stype); auto & a2 = TestGraphArgument::Create(*n1->subregion(0), i2, vtype); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index df8a4b0d1..539edc780 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -15,7 +15,7 @@ GraphImport::Copy(rvsdg::Region & region, rvsdg::structural_input * input) } GraphExport & -GraphExport::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +GraphExport::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { JLM_ASSERT(output == nullptr); return GraphExport::Create(origin, Name()); @@ -181,7 +181,7 @@ structural_node::copy(rvsdg::Region * parent, rvsdg::SubstitutionMap & smap) con /* copy outputs */ for (size_t n = 0; n < noutputs(); n++) { - auto new_output = rvsdg::structural_output::create(node, output(n)->Type()); + auto new_output = rvsdg::StructuralOutput::create(node, output(n)->Type()); smap.insert(output(n), new_output); } @@ -252,7 +252,7 @@ StructuralNodeArgument::Copy(rvsdg::Region & region, rvsdg::structural_input * i StructuralNodeResult::~StructuralNodeResult() noexcept = default; StructuralNodeResult & -StructuralNodeResult::Copy(rvsdg::output & origin, rvsdg::structural_output * output) +StructuralNodeResult::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) { auto structuralNodeOutput = util::AssertedCast(output); return structuralNodeOutput != nullptr ? Create(origin, *structuralNodeOutput) : Create(origin); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 1a1eba15c..835f43571 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -55,7 +55,7 @@ class GraphExport final : public rvsdg::GraphExport public: GraphExport & - Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; static GraphExport & Create(rvsdg::output & origin, std::string name) @@ -280,7 +280,7 @@ class StructuralNodeInput final : public rvsdg::structural_input } }; -class StructuralNodeOutput final : public rvsdg::structural_output +class StructuralNodeOutput final : public rvsdg::StructuralOutput { friend structural_node; @@ -289,7 +289,7 @@ class StructuralNodeOutput final : public rvsdg::structural_output private: StructuralNodeOutput(structural_node & node, std::shared_ptr type) - : rvsdg::structural_output(&node, std::move(type)) + : StructuralOutput(&node, std::move(type)) {} }; @@ -336,7 +336,7 @@ class StructuralNodeResult final : public rvsdg::RegionResult ~StructuralNodeResult() noexcept override; StructuralNodeResult & - Copy(rvsdg::output & origin, rvsdg::structural_output * output) override; + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; private: StructuralNodeResult(rvsdg::output & origin, StructuralNodeOutput * output) @@ -513,26 +513,23 @@ class TestGraphResult final : public jlm::rvsdg::RegionResult TestGraphResult( rvsdg::Region & region, jlm::rvsdg::output & origin, - jlm::rvsdg::structural_output * output) + rvsdg::StructuralOutput * output) : jlm::rvsdg::RegionResult(®ion, &origin, output, origin.Type()) {} - TestGraphResult(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) + TestGraphResult(rvsdg::output & origin, rvsdg::StructuralOutput * output) : TestGraphResult(*origin.region(), origin, output) {} public: TestGraphResult & - Copy(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) override + Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override { return Create(origin, output); } static TestGraphResult & - Create( - rvsdg::Region & region, - jlm::rvsdg::output & origin, - jlm::rvsdg::structural_output * output) + Create(rvsdg::Region & region, jlm::rvsdg::output & origin, rvsdg::StructuralOutput * output) { auto graphResult = new TestGraphResult(region, origin, output); origin.region()->append_result(graphResult); @@ -540,7 +537,7 @@ class TestGraphResult final : public jlm::rvsdg::RegionResult } static TestGraphResult & - Create(jlm::rvsdg::output & origin, jlm::rvsdg::structural_output * output) + Create(rvsdg::output & origin, rvsdg::StructuralOutput * output) { return Create(*origin.region(), origin, output); } From 863ae971a58a30705ce92975a127367a39f54bdf Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 26 Nov 2024 13:51:23 +0100 Subject: [PATCH 121/170] Rename structural_input class to StructuralInput (#669) --- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 4 ++-- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 8 ++++---- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 8 ++++---- jlm/hls/ir/hls.cpp | 10 +++++----- jlm/hls/ir/hls.hpp | 8 ++++---- jlm/hls/opt/cne.cpp | 2 +- jlm/hls/util/view.cpp | 4 ++-- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 2 +- jlm/llvm/ir/RvsdgModule.cpp | 2 +- jlm/llvm/ir/RvsdgModule.hpp | 2 +- jlm/llvm/ir/operators/Phi.cpp | 4 ++-- jlm/llvm/ir/operators/Phi.hpp | 10 +++++----- jlm/llvm/ir/operators/call.cpp | 4 ++-- jlm/llvm/ir/operators/delta.cpp | 2 +- jlm/llvm/ir/operators/delta.hpp | 8 ++++---- jlm/llvm/ir/operators/lambda.cpp | 4 ++-- jlm/llvm/ir/operators/lambda.hpp | 10 +++++----- jlm/llvm/opt/cne.cpp | 2 +- jlm/llvm/opt/pull.cpp | 4 ++-- jlm/rvsdg/gamma.cpp | 2 +- jlm/rvsdg/gamma.hpp | 8 ++++---- jlm/rvsdg/region.cpp | 6 +++--- jlm/rvsdg/region.hpp | 17 +++++++---------- jlm/rvsdg/structural-node.cpp | 10 +++++----- jlm/rvsdg/structural-node.hpp | 22 +++++++++++----------- jlm/rvsdg/substitution.hpp | 17 ++++++++--------- jlm/rvsdg/theta.cpp | 2 +- jlm/rvsdg/theta.hpp | 8 ++++---- tests/jlm/rvsdg/ArgumentTests.cpp | 4 ++-- tests/jlm/rvsdg/RegionTests.cpp | 4 ++-- tests/jlm/rvsdg/ResultTests.cpp | 2 +- tests/jlm/rvsdg/test-graph.cpp | 2 +- tests/jlm/rvsdg/test-nodes.cpp | 4 ++-- tests/test-operation.cpp | 6 +++--- tests/test-operation.hpp | 14 +++++++------- 36 files changed, 112 insertions(+), 116 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp index 4f38650d6..c3a9947d0 100644 --- a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp @@ -73,7 +73,7 @@ class TraceAllocaUses } } } - else if (auto sti = dynamic_cast(user)) + else if (auto sti = dynamic_cast(user)) { for (auto & arg : sti->arguments) { diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index c6559f424..b558bd1c1 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -246,7 +246,7 @@ decouple_load( else { auto new_in = - jlm::rvsdg::structural_input::create(new_loop, arg->input()->origin(), arg->Type()); + rvsdg::StructuralInput::create(new_loop, arg->input()->origin(), arg->Type()); smap.insert(arg->input(), new_in); new_arg = &EntryArgument::Create(*new_loop->subregion(), *new_in, arg->Type()); } @@ -358,7 +358,7 @@ decouple_load( // use a buffer here to make ready logic for response easy and consistent auto buf = buffer_op::create(*dload_out[0], 2, true)[0]; // replace data output of loadNode - auto old_data_in = jlm::rvsdg::structural_input::create(loopNode, buf, dload_out[0]->Type()); + auto old_data_in = rvsdg::StructuralInput::create(loopNode, buf, dload_out[0]->Type()); auto & old_data_arg = EntryArgument::Create(*loopNode->subregion(), *old_data_in, dload_out[0]->Type()); loadNode->output(0)->divert_users(&old_data_arg); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 7aa582372..29b875450 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -30,7 +30,7 @@ jlm::hls::route_response(rvsdg::Region * target, jlm::rvsdg::output * response) auto parent_response = route_response(target->node()->region(), response); auto ln = dynamic_cast(target->node()); JLM_ASSERT(ln); - auto input = jlm::rvsdg::structural_input::create(ln, parent_response, parent_response->Type()); + auto input = rvsdg::StructuralInput::create(ln, parent_response, parent_response->Type()); auto & argument = EntryArgument::Create(*target, *input, response->Type()); return &argument; } @@ -205,7 +205,7 @@ trace_function_calls( } } } - else if (auto sti = dynamic_cast(user)) + else if (auto sti = dynamic_cast(user)) { for (auto & arg : sti->arguments) { @@ -414,7 +414,7 @@ TracePointer( } } } - else if (auto sti = dynamic_cast(user)) + else if (auto sti = dynamic_cast(user)) { for (auto & arg : sti->arguments) { @@ -495,7 +495,7 @@ IsDecoupledFunctionPointer( } } } - else if (auto structuralInput = dynamic_cast(user)) + else if (auto structuralInput = dynamic_cast(user)) { for (auto & arg : structuralInput->arguments) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index c395ff317..f5e0c8922 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -70,7 +70,7 @@ find_load_store( find_load_store(simplenode->output(i), load_nodes, store_nodes, visited); } } - else if (auto sti = dynamic_cast(user)) + else if (auto sti = dynamic_cast(user)) { for (auto & arg : sti->arguments) { @@ -96,7 +96,7 @@ find_load_store( } jlm::rvsdg::StructuralOutput * -find_loop_output(jlm::rvsdg::structural_input * sti) +find_loop_output(jlm::rvsdg::StructuralInput * sti) { auto sti_arg = sti->arguments.first(); JLM_ASSERT(sti_arg->nusers() == 1); @@ -207,7 +207,7 @@ separate_load_edge( JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN"); // end of region reached } - else if (auto sti = dynamic_cast(user)) + else if (auto sti = dynamic_cast(user)) { auto loop_node = dynamic_cast(sti->node()); JLM_ASSERT(loop_node); @@ -455,7 +455,7 @@ process_loops(jlm::rvsdg::output * state_edge) state_edge = sn->output(0); } } - else if (auto sti = dynamic_cast(user)) + else if (auto sti = dynamic_cast(user)) { auto ln = dynamic_cast(sti->node()); JLM_ASSERT(ln); diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 8be111665..1edfe456e 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -38,13 +38,13 @@ bundletype::ComputeHash() const noexcept EntryArgument::~EntryArgument() noexcept = default; EntryArgument & -EntryArgument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +EntryArgument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { return EntryArgument::Create(region, *input, Type()); } backedge_argument & -backedge_argument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) +backedge_argument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { JLM_ASSERT(input == nullptr); return *backedge_argument::create(®ion, Type()); @@ -68,7 +68,7 @@ ExitResult::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) rvsdg::StructuralOutput * loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { - auto input = jlm::rvsdg::structural_input::create(this, origin, origin->Type()); + auto input = rvsdg::StructuralInput::create(this, origin, origin->Type()); auto output = rvsdg::StructuralOutput::create(this, origin->Type()); auto & argument_in = EntryArgument::Create(*subregion(), *input, origin->Type()); @@ -91,7 +91,7 @@ loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer jlm::rvsdg::output * loop_node::add_loopconst(jlm::rvsdg::output * origin) { - auto input = jlm::rvsdg::structural_input::create(this, origin, origin->Type()); + auto input = rvsdg::StructuralInput::create(this, origin, origin->Type()); auto & argument_in = EntryArgument::Create(*subregion(), *input, origin->Type()); auto buffer = hls::loop_constant_buffer_op::create(*predicate_buffer(), argument_in)[0]; @@ -109,7 +109,7 @@ loop_node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const for (size_t i = 0; i < ninputs(); ++i) { auto in_origin = smap.lookup(input(i)->origin()); - auto inp = jlm::rvsdg::structural_input::create(loop, in_origin, in_origin->Type()); + auto inp = rvsdg::StructuralInput::create(loop, in_origin, in_origin->Type()); smap.insert(input(i), loop->input(i)); auto oarg = input(i)->arguments.begin().ptr(); auto & narg = EntryArgument::Create(*loop->subregion(), *inp, oarg->Type()); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 604fed0ed..4e4908a15 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -620,21 +620,21 @@ class EntryArgument : public rvsdg::RegionArgument private: EntryArgument( rvsdg::Region & region, - rvsdg::structural_input & input, + rvsdg::StructuralInput & input, const std::shared_ptr type) : rvsdg::RegionArgument(®ion, &input, std::move(type)) {} public: EntryArgument & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; // FIXME: This should not be public, but we currently still have some transformations that use // this one. Make it eventually private. static EntryArgument & Create( rvsdg::Region & region, - rvsdg::structural_input & input, + rvsdg::StructuralInput & input, const std::shared_ptr type) { auto argument = new EntryArgument(region, input, std::move(type)); @@ -658,7 +658,7 @@ class backedge_argument : public rvsdg::RegionArgument } backedge_argument & - Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: backedge_argument(rvsdg::Region * region, const std::shared_ptr & type) diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index a9ac6ad66..ae7b81fca 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -271,7 +271,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, cnectx & ctx) } static void -mark_arguments(jlm::rvsdg::structural_input * i1, jlm::rvsdg::structural_input * i2, cnectx & ctx) +mark_arguments(StructuralInput * i1, StructuralInput * i2, cnectx & ctx) { JLM_ASSERT(i1->node() && i1->node() == i2->node()); JLM_ASSERT(i1->arguments.size() == i2->arguments.size()); diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 4fafa59fb..87fa97aed 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -62,7 +62,7 @@ get_dot_name(jlm::rvsdg::input * input) { return jlm::util::strfmt(get_dot_name(ni->node()), ":", "i", hex((intptr_t)input)); } - else if (dynamic_cast(input)) + else if (dynamic_cast(input)) { return jlm::util::strfmt("si", hex((intptr_t)input), ":", "default"); } @@ -105,7 +105,7 @@ result_to_dot(rvsdg::RegionResult * result) } std::string -structural_input_to_dot(jlm::rvsdg::structural_input * structuralInput) +structural_input_to_dot(rvsdg::StructuralInput * structuralInput) { auto display_name = jlm::util::strfmt("si", structuralInput->index()); auto dot_name = jlm::util::strfmt("si", hex((intptr_t)structuralInput)); diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 064e1f31a..bc365bd2b 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -333,7 +333,7 @@ phi_needed(const rvsdg::input * i, const llvm::variable * v) auto node = rvsdg::input::GetNode(*i); JLM_ASSERT(is(node)); auto theta = static_cast(node); - auto input = static_cast(i); + auto input = static_cast(i); auto output = theta->output(input->index()); /* FIXME: solely decide on the input instead of using the variable */ diff --git a/jlm/llvm/ir/RvsdgModule.cpp b/jlm/llvm/ir/RvsdgModule.cpp index 994bbb501..47fb4fac4 100644 --- a/jlm/llvm/ir/RvsdgModule.cpp +++ b/jlm/llvm/ir/RvsdgModule.cpp @@ -9,7 +9,7 @@ namespace jlm::llvm { GraphImport & -GraphImport::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +GraphImport::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { return GraphImport::Create(*region.graph(), ValueType(), Name(), Linkage()); } diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 370a5db1e..1ca264b52 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -45,7 +45,7 @@ class GraphImport final : public rvsdg::GraphImport } GraphImport & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; static GraphImport & Create( diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 5c2c6b6ef..4831fc851 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -169,7 +169,7 @@ rvargument::~rvargument() {} rvargument & -rvargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +rvargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { JLM_ASSERT(input == nullptr); return *rvargument::create(®ion, Type()); @@ -181,7 +181,7 @@ cvargument::~cvargument() {} cvargument & -cvargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +cvargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { auto phiInput = util::AssertedCast(input); return *cvargument::create(®ion, phiInput, Type()); diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 31615a6e5..454e0f85c 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -547,7 +547,7 @@ class builder final /* phi context variable input class */ -class cvinput final : public jlm::rvsdg::structural_input +class cvinput final : public rvsdg::StructuralInput { friend class phi::node; @@ -558,7 +558,7 @@ class cvinput final : public jlm::rvsdg::structural_input phi::node * node, jlm::rvsdg::output * origin, std::shared_ptr type) - : structural_input(node, origin, std::move(type)) + : StructuralInput(node, origin, std::move(type)) {} private: @@ -589,7 +589,7 @@ class cvinput final : public jlm::rvsdg::structural_input phi::node * node() const noexcept { - return static_cast(structural_input::node()); + return static_cast(StructuralInput::node()); } }; @@ -698,7 +698,7 @@ class rvargument final : public rvsdg::RegionArgument } rvargument & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: rvoutput * output_; @@ -732,7 +732,7 @@ class cvargument final : public rvsdg::RegionArgument operator=(cvargument &&) = delete; cvargument & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; static cvargument * create(rvsdg::Region * region, phi::cvinput * input, std::shared_ptr type) diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index d0d995dae..a14cfd0ad 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -20,11 +20,11 @@ using InvariantOutputMap = std::unordered_maporigin(); diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index c223ed48c..d97756659 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -173,7 +173,7 @@ cvargument::~cvargument() {} cvargument & -cvargument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) +cvargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { auto deltaInput = util::AssertedCast(input); return *cvargument::create(®ion, deltaInput); diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index b458e6e62..eceb7b24f 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -305,7 +305,7 @@ class node final : public rvsdg::StructuralNode /** \brief Delta context variable input */ -class cvinput final : public rvsdg::structural_input +class cvinput final : public rvsdg::StructuralInput { friend ::jlm::llvm::delta::node; @@ -314,7 +314,7 @@ class cvinput final : public rvsdg::structural_input private: cvinput(delta::node * node, rvsdg::output * origin) - : structural_input(node, origin, origin->Type()) + : StructuralInput(node, origin, origin->Type()) {} static cvinput * @@ -331,7 +331,7 @@ class cvinput final : public rvsdg::structural_input delta::node * node() const noexcept { - return static_cast(structural_input::node()); + return static_cast(StructuralInput::node()); } }; @@ -414,7 +414,7 @@ class cvargument final : public rvsdg::RegionArgument ~cvargument() override; cvargument & - Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: cvargument(rvsdg::Region * region, cvinput * input) diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 5658c63a3..072efd658 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -418,7 +418,7 @@ output::~output() = default; fctargument::~fctargument() = default; fctargument & -fctargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +fctargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { JLM_ASSERT(input == nullptr); return *fctargument::create(®ion, Type()); @@ -429,7 +429,7 @@ fctargument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) cvargument::~cvargument() = default; cvargument & -cvargument::Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) +cvargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { auto lambdaInput = util::AssertedCast(input); return *cvargument::create(®ion, lambdaInput); diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 0fbe53707..321d04203 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -405,7 +405,7 @@ class node final : public rvsdg::StructuralNode /** \brief Lambda context variable input */ -class cvinput final : public jlm::rvsdg::structural_input +class cvinput final : public rvsdg::StructuralInput { friend ::jlm::llvm::lambda::node; @@ -414,7 +414,7 @@ class cvinput final : public jlm::rvsdg::structural_input private: cvinput(lambda::node * node, jlm::rvsdg::output * origin) - : structural_input(node, origin, origin->Type()) + : StructuralInput(node, origin, origin->Type()) {} static cvinput * @@ -431,7 +431,7 @@ class cvinput final : public jlm::rvsdg::structural_input [[nodiscard]] lambda::node * node() const noexcept { - return jlm::util::AssertedCast(structural_input::node()); + return jlm::util::AssertedCast(StructuralInput::node()); } }; @@ -526,7 +526,7 @@ class fctargument final : public rvsdg::RegionArgument } fctargument & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: fctargument(rvsdg::Region * region, std::shared_ptr type) @@ -603,7 +603,7 @@ class cvargument final : public rvsdg::RegionArgument ~cvargument() override; cvargument & - Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: cvargument(rvsdg::Region * region, cvinput * input) diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index c3262444f..c38f48ede 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -255,7 +255,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, cnectx & ctx) } static void -mark_arguments(jlm::rvsdg::structural_input * i1, jlm::rvsdg::structural_input * i2, cnectx & ctx) +mark_arguments(rvsdg::StructuralInput * i1, rvsdg::StructuralInput * i2, cnectx & ctx) { JLM_ASSERT(i1->node() && i1->node() == i2->node()); JLM_ASSERT(i1->arguments.size() == i2->arguments.size()); diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 6d200d232..133399b82 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -101,8 +101,8 @@ pullin_node(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) { for (const auto & user : *node->output(o)) { - JLM_ASSERT(dynamic_cast(user)); - auto sinput = static_cast(user); + JLM_ASSERT(dynamic_cast(user)); + auto sinput = static_cast(user); auto argument = gamma->subregion(r)->argument(sinput->index() - 1); argument->divert_users(copy->output(o)); } diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 9a5fd5197..e899a1a8b 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -374,7 +374,7 @@ GammaNode::copy(rvsdg::Region * region, SubstitutionMap & smap) const GammaArgument::~GammaArgument() noexcept = default; GammaArgument & -GammaArgument::Copy(rvsdg::Region & region, structural_input * input) +GammaArgument::Copy(rvsdg::Region & region, StructuralInput * input) { auto gammaInput = util::AssertedCast(input); return Create(region, *gammaInput); diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 2f278ea60..4e7bc3c54 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -317,7 +317,7 @@ class GammaNode : public StructuralNode /* gamma input */ -class GammaInput final : public structural_input +class GammaInput final : public StructuralInput { friend GammaNode; @@ -326,14 +326,14 @@ class GammaInput final : public structural_input private: GammaInput(GammaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) - : structural_input(node, origin, std::move(type)) + : StructuralInput(node, origin, std::move(type)) {} public: GammaNode * node() const noexcept { - return static_cast(structural_input::node()); + return static_cast(StructuralInput::node()); } inline argument_list::iterator @@ -468,7 +468,7 @@ class GammaArgument final : public RegionArgument ~GammaArgument() noexcept override; GammaArgument & - Copy(rvsdg::Region & region, structural_input * input) override; + Copy(rvsdg::Region & region, StructuralInput * input) override; private: GammaArgument(rvsdg::Region & region, GammaInput & input) diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index c53aad087..f451b79ac 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -24,7 +24,7 @@ RegionArgument::~RegionArgument() noexcept RegionArgument::RegionArgument( rvsdg::Region * region, - jlm::rvsdg::structural_input * input, + StructuralInput * input, std::shared_ptr type) : output(region, std::move(type)), input_(input) @@ -50,7 +50,7 @@ RegionArgument::GetOwner() const noexcept } RegionArgument & -RegionArgument::Copy(rvsdg::Region & region, structural_input * input) +RegionArgument::Copy(Region & region, StructuralInput * input) { return RegionArgument::Create(region, input, Type()); } @@ -58,7 +58,7 @@ RegionArgument::Copy(rvsdg::Region & region, structural_input * input) RegionArgument & RegionArgument::Create( rvsdg::Region & region, - rvsdg::structural_input * input, + StructuralInput * input, std::shared_ptr type) { auto argument = new RegionArgument(®ion, input, std::move(type)); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 38bb9c3f6..c31fadff8 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -26,7 +26,7 @@ namespace jlm::rvsdg class node; class simple_node; class simple_op; -class structural_input; +class StructuralInput; class StructuralNode; class structural_op; class StructuralOutput; @@ -38,7 +38,7 @@ class SubstitutionMap; * Region arguments represent the initial values of the region's acyclic graph. These values * are mapped to the arguments throughout the execution, and the concrete semantics of this mapping * depends on the structural node the region is part of. A region argument is either linked - * with a \ref structural_input or is a standalone argument. + * with a \ref StructuralInput or is a standalone argument. */ class RegionArgument : public output { @@ -52,7 +52,7 @@ class RegionArgument : public output RegionArgument( rvsdg::Region * region, - structural_input * input, + StructuralInput * input, std::shared_ptr type); RegionArgument(const RegionArgument &) = delete; @@ -65,7 +65,7 @@ class RegionArgument : public output RegionArgument & operator=(RegionArgument &&) = delete; - [[nodiscard]] structural_input * + [[nodiscard]] StructuralInput * input() const noexcept { return input_; @@ -80,7 +80,7 @@ class RegionArgument : public output * @return A reference to the copied argument. */ virtual RegionArgument & - Copy(rvsdg::Region & region, structural_input * input); + Copy(Region & region, StructuralInput * input); [[nodiscard]] std::variant GetOwner() const noexcept override; @@ -104,13 +104,10 @@ class RegionArgument : public output * Creates an argument and registers it with the given region. */ static RegionArgument & - Create( - rvsdg::Region & region, - rvsdg::structural_input * input, - std::shared_ptr type); + Create(rvsdg::Region & region, StructuralInput * input, std::shared_ptr type); private: - structural_input * input_; + StructuralInput * input_; }; /** diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index b2acd782c..b14882d54 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -13,14 +13,14 @@ namespace jlm::rvsdg /* structural input */ -structural_input::~structural_input() noexcept +StructuralInput::~StructuralInput() noexcept { JLM_ASSERT(arguments.empty()); on_input_destroy(this); } -structural_input::structural_input( +StructuralInput::StructuralInput( rvsdg::StructuralNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) @@ -68,8 +68,8 @@ StructuralNode::StructuralNode( on_node_create(this); } -structural_input * -StructuralNode::append_input(std::unique_ptr input) +StructuralInput * +StructuralNode::append_input(std::unique_ptr input) { if (input->node() != this) throw jlm::util::error("Appending input to wrong node."); @@ -80,7 +80,7 @@ StructuralNode::append_input(std::unique_ptr input) return this->input(index); auto sinput = std::unique_ptr(input.release()); - return static_cast(node::add_input(std::move(sinput))); + return static_cast(add_input(std::move(sinput))); } StructuralOutput * diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index e4e986e65..0e63d39fb 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -14,7 +14,7 @@ namespace jlm::rvsdg /* structural node */ -class structural_input; +class StructuralInput; class structural_op; class StructuralOutput; @@ -44,14 +44,14 @@ class StructuralNode : public node return subregions_[index].get(); } - inline jlm::rvsdg::structural_input * + [[nodiscard]] inline StructuralInput * input(size_t index) const noexcept; [[nodiscard]] inline StructuralOutput * output(size_t index) const noexcept; - structural_input * - append_input(std::unique_ptr input); + StructuralInput * + append_input(std::unique_ptr input); StructuralOutput * append_output(std::unique_ptr output); @@ -69,25 +69,25 @@ class StructuralNode : public node typedef jlm::util::intrusive_list argument_list; -class structural_input : public node_input +class StructuralInput : public node_input { friend StructuralNode; public: - virtual ~structural_input() noexcept; + ~StructuralInput() noexcept override; - structural_input( + StructuralInput( StructuralNode * node, jlm::rvsdg::output * origin, std::shared_ptr type); - static structural_input * + static StructuralInput * create( StructuralNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) { - auto input = std::make_unique(node, origin, std::move(type)); + auto input = std::make_unique(node, origin, std::move(type)); return node->append_input(std::move(input)); } @@ -132,10 +132,10 @@ class StructuralOutput : public node_output /* structural node method definitions */ -inline jlm::rvsdg::structural_input * +inline StructuralInput * StructuralNode::input(size_t index) const noexcept { - return static_cast(node::input(index)); + return static_cast(node::input(index)); } inline StructuralOutput * diff --git a/jlm/rvsdg/substitution.hpp b/jlm/rvsdg/substitution.hpp index a76627d04..199e945d3 100644 --- a/jlm/rvsdg/substitution.hpp +++ b/jlm/rvsdg/substitution.hpp @@ -16,7 +16,7 @@ namespace jlm::rvsdg class output; class Region; -class structural_input; +class StructuralInput; class SubstitutionMap final { @@ -34,7 +34,7 @@ class SubstitutionMap final } bool - contains(const structural_input & original) const noexcept + contains(const StructuralInput & original) const noexcept { return structinput_map_.find(&original) != structinput_map_.end(); } @@ -57,8 +57,8 @@ class SubstitutionMap final return *region_map_.find(&original)->second; } - structural_input & - lookup(const structural_input & original) const + [[nodiscard]] StructuralInput & + lookup(const StructuralInput & original) const { if (!contains(original)) throw jlm::util::error("Structural input not in substitution map."); @@ -80,8 +80,8 @@ class SubstitutionMap final return i != region_map_.end() ? i->second : nullptr; } - inline jlm::rvsdg::structural_input * - lookup(const jlm::rvsdg::structural_input * original) const noexcept + StructuralInput * + lookup(const StructuralInput * original) const noexcept { auto i = structinput_map_.find(original); return i != structinput_map_.end() ? i->second : nullptr; @@ -100,7 +100,7 @@ class SubstitutionMap final } inline void - insert(const jlm::rvsdg::structural_input * original, jlm::rvsdg::structural_input * substitute) + insert(const StructuralInput * original, StructuralInput * substitute) { structinput_map_[original] = substitute; } @@ -108,8 +108,7 @@ class SubstitutionMap final private: std::unordered_map region_map_; std::unordered_map output_map_; - std::unordered_map - structinput_map_; + std::unordered_map structinput_map_; }; } diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 62341940a..088df296d 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -50,7 +50,7 @@ ThetaOutput::~ThetaOutput() noexcept ThetaArgument::~ThetaArgument() noexcept = default; ThetaArgument & -ThetaArgument::Copy(rvsdg::Region & region, structural_input * input) +ThetaArgument::Copy(rvsdg::Region & region, StructuralInput * input) { auto thetaInput = util::AssertedCast(input); return ThetaArgument::Create(region, *thetaInput); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index a578c6558..d0f5e70e8 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -247,7 +247,7 @@ class ThetaNode final : public StructuralNode copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; }; -class ThetaInput final : public structural_input +class ThetaInput final : public StructuralInput { friend ThetaNode; friend ThetaOutput; @@ -256,14 +256,14 @@ class ThetaInput final : public structural_input ~ThetaInput() noexcept override; ThetaInput(ThetaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) - : structural_input(node, origin, std::move(type)), + : StructuralInput(node, origin, std::move(type)), output_(nullptr) {} ThetaNode * node() const noexcept { - return static_cast(structural_input::node()); + return static_cast(StructuralInput::node()); } ThetaOutput * @@ -345,7 +345,7 @@ class ThetaArgument final : public RegionArgument ~ThetaArgument() noexcept override; ThetaArgument & - Copy(rvsdg::Region & region, structural_input * input) override; + Copy(rvsdg::Region & region, StructuralInput * input) override; private: ThetaArgument(rvsdg::Region & region, ThetaInput & input) diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index f664db2e6..ee8f13bc9 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -27,7 +27,7 @@ ArgumentNodeMismatch() auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); - auto structuralInput = structural_input::create(structuralNode1, import, valueType); + auto structuralInput = StructuralInput::create(structuralNode1, import, valueType); // Act bool inputErrorHandlerCalled = false; @@ -62,7 +62,7 @@ ArgumentInputTypeMismatch() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "import"); auto structuralNode = structural_node::create(rvsdg.root(), 1); - auto structuralInput = jlm::rvsdg::structural_input::create(structuralNode, x, valueType); + auto structuralInput = jlm::rvsdg::StructuralInput::create(structuralNode, x, valueType); // Act & Assert bool exceptionWasCaught = false; diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index d0eee1f4e..8f7117094 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -95,13 +95,13 @@ Contains() auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); auto structuralNode1 = structural_node::create(graph.root(), 1); - auto structuralInput1 = jlm::rvsdg::structural_input::create(structuralNode1, import, valueType); + auto structuralInput1 = jlm::rvsdg::StructuralInput::create(structuralNode1, import, valueType); auto & regionArgument1 = TestGraphArgument::Create(*structuralNode1->subregion(0), structuralInput1, valueType); unary_op::create(structuralNode1->subregion(0), valueType, ®ionArgument1, valueType); auto structuralNode2 = structural_node::create(graph.root(), 1); - auto structuralInput2 = jlm::rvsdg::structural_input::create(structuralNode2, import, valueType); + auto structuralInput2 = jlm::rvsdg::StructuralInput::create(structuralNode2, import, valueType); auto & regionArgument2 = TestGraphArgument::Create(*structuralNode2->subregion(0), structuralInput2, valueType); binary_op::create(valueType, valueType, ®ionArgument2, ®ionArgument2); diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index ce8c0ca51..836e70844 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -27,7 +27,7 @@ ResultNodeMismatch() auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); - auto structuralInput = structural_input::create(structuralNode1, import, valueType); + auto structuralInput = StructuralInput::create(structuralNode1, import, valueType); auto & argument = TestGraphArgument::Create(*structuralNode1->subregion(0), structuralInput, valueType); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 03dbc654a..0cc0ff9ae 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -40,7 +40,7 @@ test_recursive_prune() auto n2 = jlm::tests::test_op::create(graph.root(), { imp }, { t }); auto n3 = jlm::tests::structural_node::create(graph.root(), 1); - structural_input::create(n3, imp, t); + StructuralInput::create(n3, imp, t); auto & a1 = TestGraphArgument::Create(*n3->subregion(0), nullptr, t); auto n4 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); auto n5 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 2e136f20b..be5d58454 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -23,8 +23,8 @@ test_node_copy(void) auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); auto n1 = jlm::tests::structural_node::create(graph.root(), 3); - auto i1 = structural_input::create(n1, s, stype); - auto i2 = structural_input::create(n1, v, vtype); + auto i1 = StructuralInput::create(n1, s, stype); + auto i2 = StructuralInput::create(n1, v, vtype); auto o1 = StructuralOutput::create(n1, stype); auto o2 = StructuralOutput::create(n1, vtype); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 539edc780..8589c4227 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -9,7 +9,7 @@ namespace jlm::tests { GraphImport & -GraphImport::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +GraphImport::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { return GraphImport::Create(*region.graph(), Type(), Name()); } @@ -174,7 +174,7 @@ structural_node::copy(rvsdg::Region * parent, rvsdg::SubstitutionMap & smap) con { auto origin = smap.lookup(input(n)->origin()); auto neworigin = origin ? origin : input(n)->origin(); - auto new_input = rvsdg::structural_input::create(node, neworigin, input(n)->Type()); + auto new_input = rvsdg::StructuralInput::create(node, neworigin, input(n)->Type()); smap.insert(input(n), new_input); } @@ -242,7 +242,7 @@ StructuralNodeOutput::~StructuralNodeOutput() noexcept = default; StructuralNodeArgument::~StructuralNodeArgument() noexcept = default; StructuralNodeArgument & -StructuralNodeArgument::Copy(rvsdg::Region & region, rvsdg::structural_input * input) +StructuralNodeArgument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) { auto structuralNodeInput = util::AssertedCast(input); return structuralNodeInput != nullptr ? Create(region, *structuralNodeInput) diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 835f43571..c39a6e8b4 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -32,7 +32,7 @@ class GraphImport final : public rvsdg::GraphImport public: GraphImport & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; static GraphImport & Create(rvsdg::Graph & graph, std::shared_ptr type, std::string name) @@ -242,7 +242,7 @@ class structural_node final : public rvsdg::StructuralNode copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; }; -class StructuralNodeInput final : public rvsdg::structural_input +class StructuralNodeInput final : public rvsdg::StructuralInput { friend structural_node; @@ -254,7 +254,7 @@ class StructuralNodeInput final : public rvsdg::structural_input structural_node & node, rvsdg::output & origin, std::shared_ptr type) - : rvsdg::structural_input(&node, &origin, std::move(type)) + : StructuralInput(&node, &origin, std::move(type)) {} public: @@ -301,7 +301,7 @@ class StructuralNodeArgument final : public rvsdg::RegionArgument ~StructuralNodeArgument() noexcept override; StructuralNodeArgument & - Copy(rvsdg::Region & region, rvsdg::structural_input * input) override; + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: StructuralNodeArgument( @@ -483,14 +483,14 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument private: TestGraphArgument( rvsdg::Region & region, - jlm::rvsdg::structural_input * input, + rvsdg::StructuralInput * input, std::shared_ptr type) : jlm::rvsdg::RegionArgument(®ion, input, type) {} public: TestGraphArgument & - Copy(rvsdg::Region & region, jlm::rvsdg::structural_input * input) override + Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override { return Create(region, input, Type()); } @@ -498,7 +498,7 @@ class TestGraphArgument final : public jlm::rvsdg::RegionArgument static TestGraphArgument & Create( rvsdg::Region & region, - jlm::rvsdg::structural_input * input, + rvsdg::StructuralInput * input, std::shared_ptr type) { auto graphArgument = new TestGraphArgument(region, input, std::move(type)); From d014a33c4e19970c59c79ede6a74823a07e40b7d Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 27 Nov 2024 06:54:38 +0100 Subject: [PATCH 122/170] Rename structural_op class to StructuralOperation (#670) --- jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/check-rhls.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 4 ++-- jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/ir/hls.hpp | 2 +- jlm/llvm/ir/operators/Phi.hpp | 2 +- jlm/llvm/ir/operators/delta.hpp | 2 +- jlm/llvm/ir/operators/lambda.hpp | 2 +- jlm/rvsdg/gamma.hpp | 4 ++-- jlm/rvsdg/operation.cpp | 7 ++++--- jlm/rvsdg/operation.hpp | 2 +- jlm/rvsdg/structural-node.cpp | 2 +- jlm/rvsdg/structural-node.hpp | 4 ++-- jlm/rvsdg/structural-normal-form.cpp | 2 +- jlm/rvsdg/theta.hpp | 2 +- tests/test-operation.hpp | 2 +- 17 files changed, 23 insertions(+), 22 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 6f145ff4f..0529a7de4 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -120,7 +120,7 @@ CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) return false; } } - else if (rvsdg::is(&node)) + else if (rvsdg::is(&node)) { throw util::error("Unexpected structural node: " + node.operation().debug_string()); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index a00b27390..05c03ccb8 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -89,7 +89,7 @@ add_triggers(rvsdg::Region * region) auto trigger = get_trigger(region); for (auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (rvsdg::is(node)) + if (rvsdg::is(node)) { if (auto ln = dynamic_cast(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp index 48edb9ded..9a44ec580 100644 --- a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp @@ -17,7 +17,7 @@ check_rhls(rvsdg::Region * sr) { for (auto & node : jlm::rvsdg::topdown_traverser(sr)) { - if (rvsdg::is(node)) + if (rvsdg::is(node)) { if (auto ln = dynamic_cast(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index b558bd1c1..32104e27b 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -391,7 +391,7 @@ process_loopnode(loop_node * loopNode) bool can_decouple = true; for (auto sn : data_slice) { - if (rvsdg::is(sn)) + if (rvsdg::is(sn)) { // data slice may not contain loops can_decouple = false; @@ -419,7 +419,7 @@ process_loopnode(loop_node * loopNode) JLM_ASSERT(!can_decouple || !data_slice.count(simplenode)); for (auto sn : state_slice) { - if (rvsdg::is(sn)) + if (rvsdg::is(sn)) { // state slice may not contain loops can_decouple = false; diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 065aef600..8f5bcd644 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -77,7 +77,7 @@ hls::distribute_constants(rvsdg::Region * region) // buffers for them for (auto & node : rvsdg::topdown_traverser(region)) { - if (rvsdg::is(node)) + if (rvsdg::is(node)) { if (auto ln = dynamic_cast(node)) { diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 4e4908a15..9480f6c4f 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -584,7 +584,7 @@ class print_op final : public jlm::rvsdg::simple_op } }; -class loop_op final : public jlm::rvsdg::structural_op +class loop_op final : public rvsdg::StructuralOperation { public: virtual ~loop_op() noexcept diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 454e0f85c..3d4df7d05 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -26,7 +26,7 @@ namespace phi /* phi operation class */ -class operation final : public jlm::rvsdg::structural_op +class operation final : public rvsdg::StructuralOperation { public: ~operation() override; diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index eceb7b24f..077f896fd 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -20,7 +20,7 @@ namespace delta /** \brief Delta operation */ -class operation final : public rvsdg::structural_op +class operation final : public rvsdg::StructuralOperation { public: ~operation() override; diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 321d04203..1a043a322 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -29,7 +29,7 @@ namespace lambda * * A lambda operation determines a lambda's name and \ref FunctionType "function type". */ -class operation final : public jlm::rvsdg::structural_op +class operation final : public rvsdg::StructuralOperation { public: ~operation() override; diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 4e7bc3c54..c77157b66 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -68,13 +68,13 @@ class gamma_normal_form final : public structural_normal_form class output; class Type; -class GammaOperation final : public structural_op +class GammaOperation final : public StructuralOperation { public: ~GammaOperation() noexcept override; explicit constexpr GammaOperation(size_t nalternatives) noexcept - : structural_op(), + : StructuralOperation(), nalternatives_(nalternatives) {} diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index fae80999b..a25021820 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -60,15 +60,16 @@ simple_op::normal_form(Graph * graph) noexcept /* structural operation */ bool -structural_op::operator==(const operation & other) const noexcept +StructuralOperation::operator==(const operation & other) const noexcept { return typeid(*this) == typeid(other); } jlm::rvsdg::structural_normal_form * -structural_op::normal_form(Graph * graph) noexcept +StructuralOperation::normal_form(Graph * graph) noexcept { - return static_cast(graph->node_normal_form(typeid(structural_op))); + return static_cast( + graph->node_normal_form(typeid(StructuralOperation))); } } diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index 5fd2193ec..dec192ac6 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -95,7 +95,7 @@ class simple_op : public operation /* structural operation */ -class structural_op : public operation +class StructuralOperation : public operation { public: virtual bool diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index b14882d54..3446b2220 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -54,7 +54,7 @@ StructuralNode::~StructuralNode() noexcept } StructuralNode::StructuralNode( - const jlm::rvsdg::structural_op & op, + const StructuralOperation & op, rvsdg::Region * region, size_t nsubregions) : node(op.copy(), region) diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 0e63d39fb..2f639c942 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -15,7 +15,7 @@ namespace jlm::rvsdg /* structural node */ class StructuralInput; -class structural_op; +class StructuralOperation; class StructuralOutput; class StructuralNode : public node @@ -26,7 +26,7 @@ class StructuralNode : public node protected: StructuralNode( /* FIXME: use move semantics instead of copy semantics for op */ - const jlm::rvsdg::structural_op & op, + const StructuralOperation & op, rvsdg::Region * region, size_t nsubregions); diff --git a/jlm/rvsdg/structural-normal-form.cpp b/jlm/rvsdg/structural-normal-form.cpp index 5fc358d57..122a39a69 100644 --- a/jlm/rvsdg/structural-normal-form.cpp +++ b/jlm/rvsdg/structural-normal-form.cpp @@ -34,6 +34,6 @@ static void __attribute__((constructor)) register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::structural_op), + typeid(jlm::rvsdg::StructuralOperation), get_default_normal_form); } diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index d0f5e70e8..407ddc8a4 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -15,7 +15,7 @@ namespace jlm::rvsdg { -class ThetaOperation final : public structural_op +class ThetaOperation final : public StructuralOperation { public: ~ThetaOperation() noexcept override; diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index c39a6e8b4..234b69444 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -193,7 +193,7 @@ class binary_op final : public rvsdg::binary_op /* structural operation */ -class structural_op final : public rvsdg::structural_op +class structural_op final : public rvsdg::StructuralOperation { public: virtual ~structural_op() noexcept; From 0760d15e26b7c51dfeac0a09a78ae74108666f78 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 27 Nov 2024 07:34:34 +0100 Subject: [PATCH 123/170] Rename simple_op class to SimpleOperation (#671) --- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp | 2 +- jlm/hls/ir/hls.hpp | 96 ++++++++------- jlm/hls/opt/cne.cpp | 2 +- jlm/llvm/backend/jlm2llvm/instruction.cpp | 60 +++++----- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 8 +- .../InterProceduralGraphConversion.cpp | 3 +- .../frontend/LlvmInstructionConversion.cpp | 7 +- jlm/llvm/ir/operators/GetElementPtr.hpp | 4 +- jlm/llvm/ir/operators/Load.cpp | 2 +- jlm/llvm/ir/operators/Load.hpp | 6 +- jlm/llvm/ir/operators/MemCpy.hpp | 4 +- .../ir/operators/MemoryStateOperations.hpp | 4 +- jlm/llvm/ir/operators/Store.cpp | 2 +- jlm/llvm/ir/operators/Store.hpp | 6 +- jlm/llvm/ir/operators/alloca.hpp | 4 +- jlm/llvm/ir/operators/call.cpp | 2 +- jlm/llvm/ir/operators/call.hpp | 4 +- jlm/llvm/ir/operators/operators.hpp | 112 +++++++++--------- jlm/llvm/ir/tac.cpp | 14 +-- jlm/llvm/ir/tac.hpp | 22 ++-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 2 +- jlm/llvm/opt/cne.cpp | 2 +- jlm/llvm/opt/unroll.hpp | 8 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 4 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 4 +- jlm/rvsdg/binary.cpp | 12 +- jlm/rvsdg/binary.hpp | 14 +-- jlm/rvsdg/bitstring/concat.cpp | 2 +- jlm/rvsdg/nullary.hpp | 4 +- jlm/rvsdg/operation.cpp | 15 ++- jlm/rvsdg/operation.hpp | 6 +- jlm/rvsdg/region.hpp | 2 +- jlm/rvsdg/simple-node.cpp | 4 +- jlm/rvsdg/simple-node.hpp | 14 +-- jlm/rvsdg/simple-normal-form.cpp | 4 +- jlm/rvsdg/simple-normal-form.hpp | 4 +- jlm/rvsdg/statemux.cpp | 2 +- jlm/rvsdg/statemux.hpp | 6 +- jlm/rvsdg/unary.cpp | 2 +- jlm/rvsdg/unary.hpp | 6 +- .../llvm/ThreeAddressCodeConversionTests.cpp | 4 +- tests/test-operation.hpp | 4 +- 43 files changed, 249 insertions(+), 242 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 8f5bcd644..2bcba83e7 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -15,7 +15,7 @@ namespace jlm { void -distribute_constant(const rvsdg::simple_op & op, rvsdg::simple_output * out) +distribute_constant(const rvsdg::SimpleOperation & op, rvsdg::simple_output * out) { JLM_ASSERT(jlm::hls::is_constant(out->node())); bool changed = true; diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp index 7feada423..d879cfa04 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp @@ -47,7 +47,7 @@ memstate_conv(rvsdg::Region * region) } remove(simplenode); } - // exit is handled as normal simple_op + // exit is handled as normal SimpleOperation } } } diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 9480f6c4f..b348df8a3 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -20,11 +20,11 @@ namespace jlm::hls { -class branch_op final : public jlm::rvsdg::simple_op +class branch_op final : public rvsdg::SimpleOperation { private: branch_op(size_t nalternatives, const std::shared_ptr & type, bool loop) - : jlm::rvsdg::simple_op( + : SimpleOperation( { rvsdg::ControlType::Create(nalternatives), type }, { nalternatives, type }), loop(loop) @@ -82,7 +82,7 @@ class branch_op final : public jlm::rvsdg::simple_op * single constant fork. Since the input of the fork is always the same value and is always valid. * No handshaking is necessary and the outputs of the fork is always valid. */ -class fork_op final : public jlm::rvsdg::simple_op +class fork_op final : public rvsdg::SimpleOperation { public: virtual ~fork_op() @@ -95,7 +95,7 @@ class fork_op final : public jlm::rvsdg::simple_op * /param value The signal type, which is the same for the input and all outputs. */ fork_op(size_t nalternatives, const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ type }, { nalternatives, type }) + : SimpleOperation({ type }, { nalternatives, type }) {} /** @@ -109,7 +109,7 @@ class fork_op final : public jlm::rvsdg::simple_op size_t nalternatives, const std::shared_ptr & type, bool isConstant) - : rvsdg::simple_op({ type }, { nalternatives, type }), + : SimpleOperation({ type }, { nalternatives, type }), IsConstant_(isConstant) {} @@ -172,14 +172,14 @@ class fork_op final : public jlm::rvsdg::simple_op bool IsConstant_ = false; }; -class merge_op final : public jlm::rvsdg::simple_op +class merge_op final : public rvsdg::SimpleOperation { public: virtual ~merge_op() {} merge_op(size_t nalternatives, const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ nalternatives, type }, { type }) + : SimpleOperation({ nalternatives, type }, { type }) {} bool @@ -213,7 +213,7 @@ class merge_op final : public jlm::rvsdg::simple_op } }; -class mux_op final : public jlm::rvsdg::simple_op +class mux_op final : public rvsdg::SimpleOperation { public: virtual ~mux_op() @@ -224,7 +224,7 @@ class mux_op final : public jlm::rvsdg::simple_op const std::shared_ptr & type, bool discarding, bool loop) - : jlm::rvsdg::simple_op(create_typevector(nalternatives, type), { type }), + : SimpleOperation(create_typevector(nalternatives, type), { type }), discarding(discarding), loop(loop) {} @@ -286,14 +286,14 @@ class mux_op final : public jlm::rvsdg::simple_op } }; -class sink_op final : public jlm::rvsdg::simple_op +class sink_op final : public rvsdg::SimpleOperation { public: virtual ~sink_op() {} explicit sink_op(const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ type }, {}) + : SimpleOperation({ type }, {}) {} bool @@ -324,14 +324,14 @@ class sink_op final : public jlm::rvsdg::simple_op } }; -class predicate_buffer_op final : public jlm::rvsdg::simple_op +class predicate_buffer_op final : public rvsdg::SimpleOperation { public: virtual ~predicate_buffer_op() {} explicit predicate_buffer_op(const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ type }, { type }) + : SimpleOperation({ type }, { type }) {} bool @@ -365,7 +365,7 @@ class predicate_buffer_op final : public jlm::rvsdg::simple_op } }; -class loop_constant_buffer_op final : public jlm::rvsdg::simple_op +class loop_constant_buffer_op final : public rvsdg::SimpleOperation { public: virtual ~loop_constant_buffer_op() @@ -374,7 +374,7 @@ class loop_constant_buffer_op final : public jlm::rvsdg::simple_op loop_constant_buffer_op( const std::shared_ptr & ctltype, const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ ctltype, type }, { type }) + : SimpleOperation({ ctltype, type }, { type }) {} bool @@ -408,7 +408,7 @@ class loop_constant_buffer_op final : public jlm::rvsdg::simple_op } }; -class buffer_op final : public jlm::rvsdg::simple_op +class buffer_op final : public rvsdg::SimpleOperation { public: virtual ~buffer_op() @@ -418,7 +418,7 @@ class buffer_op final : public jlm::rvsdg::simple_op const std::shared_ptr & type, size_t capacity, bool pass_through) - : jlm::rvsdg::simple_op({ type }, { type }), + : SimpleOperation({ type }, { type }), capacity(capacity), pass_through(pass_through) {} @@ -487,14 +487,14 @@ class triggertype final : public rvsdg::StateType Create(); }; -class trigger_op final : public jlm::rvsdg::simple_op +class trigger_op final : public rvsdg::SimpleOperation { public: virtual ~trigger_op() {} explicit trigger_op(const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ triggertype::Create(), type }, { type }) + : SimpleOperation({ triggertype::Create(), type }, { type }) {} bool @@ -529,7 +529,7 @@ class trigger_op final : public jlm::rvsdg::simple_op } }; -class print_op final : public jlm::rvsdg::simple_op +class print_op final : public rvsdg::SimpleOperation { private: size_t _id; @@ -539,7 +539,7 @@ class print_op final : public jlm::rvsdg::simple_op {} explicit print_op(const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ type }, { type }) + : SimpleOperation({ type }, { type }) { static size_t common_id{ 0 }; _id = common_id++; @@ -872,14 +872,16 @@ get_mem_req_type(std::shared_ptr elementType, bool write std::shared_ptr get_mem_res_type(std::shared_ptr dataType); -class load_op final : public jlm::rvsdg::simple_op +class load_op final : public rvsdg::SimpleOperation { public: virtual ~load_op() {} load_op(const std::shared_ptr & pointeeType, size_t numStates) - : simple_op(CreateInTypes(pointeeType, numStates), CreateOutTypes(pointeeType, numStates)) + : SimpleOperation( + CreateInTypes(pointeeType, numStates), + CreateOutTypes(pointeeType, numStates)) {} bool @@ -959,7 +961,7 @@ class load_op final : public jlm::rvsdg::simple_op } }; -class addr_queue_op final : public jlm::rvsdg::simple_op +class addr_queue_op final : public rvsdg::SimpleOperation { public: virtual ~addr_queue_op() @@ -969,7 +971,7 @@ class addr_queue_op final : public jlm::rvsdg::simple_op const std::shared_ptr & pointerType, size_t capacity, bool combinatorial) - : simple_op(CreateInTypes(pointerType), CreateOutTypes(pointerType)), + : SimpleOperation(CreateInTypes(pointerType), CreateOutTypes(pointerType)), combinatorial(combinatorial), capacity(capacity) {} @@ -1032,14 +1034,14 @@ class addr_queue_op final : public jlm::rvsdg::simple_op size_t capacity; }; -class state_gate_op final : public jlm::rvsdg::simple_op +class state_gate_op final : public rvsdg::SimpleOperation { public: virtual ~state_gate_op() {} state_gate_op(const std::shared_ptr & type, size_t numStates) - : simple_op(CreateInOutTypes(type, numStates), CreateInOutTypes(type, numStates)) + : SimpleOperation(CreateInOutTypes(type, numStates), CreateInOutTypes(type, numStates)) {} bool @@ -1085,14 +1087,14 @@ class state_gate_op final : public jlm::rvsdg::simple_op } }; -class decoupled_load_op final : public jlm::rvsdg::simple_op +class decoupled_load_op final : public rvsdg::SimpleOperation { public: virtual ~decoupled_load_op() {} decoupled_load_op(const std::shared_ptr & pointeeType) - : simple_op(CreateInTypes(pointeeType), CreateOutTypes(pointeeType)) + : SimpleOperation(CreateInTypes(pointeeType), CreateOutTypes(pointeeType)) {} bool @@ -1154,14 +1156,14 @@ class decoupled_load_op final : public jlm::rvsdg::simple_op } }; -class mem_resp_op final : public jlm::rvsdg::simple_op +class mem_resp_op final : public rvsdg::SimpleOperation { public: virtual ~mem_resp_op() {} explicit mem_resp_op(const std::vector> & output_types) - : simple_op(CreateInTypes(output_types), CreateOutTypes(output_types)) + : SimpleOperation(CreateInTypes(output_types), CreateOutTypes(output_types)) {} bool @@ -1226,7 +1228,7 @@ class mem_resp_op final : public jlm::rvsdg::simple_op } }; -class mem_req_op final : public jlm::rvsdg::simple_op +class mem_req_op final : public rvsdg::SimpleOperation { public: virtual ~mem_req_op() = default; @@ -1234,7 +1236,9 @@ class mem_req_op final : public jlm::rvsdg::simple_op mem_req_op( const std::vector> & load_types, const std::vector> & store_types) - : simple_op(CreateInTypes(load_types, store_types), CreateOutTypes(load_types, store_types)) + : SimpleOperation( + CreateInTypes(load_types, store_types), + CreateOutTypes(load_types, store_types)) { for (auto loadType : load_types) { @@ -1357,14 +1361,16 @@ class mem_req_op final : public jlm::rvsdg::simple_op std::vector> StoreTypes_; }; -class store_op final : public jlm::rvsdg::simple_op +class store_op final : public rvsdg::SimpleOperation { public: virtual ~store_op() {} store_op(const std::shared_ptr & pointeeType, size_t numStates) - : simple_op(CreateInTypes(pointeeType, numStates), CreateOutTypes(pointeeType, numStates)) + : SimpleOperation( + CreateInTypes(pointeeType, numStates), + CreateOutTypes(pointeeType, numStates)) {} bool @@ -1438,14 +1444,14 @@ class store_op final : public jlm::rvsdg::simple_op } }; -class local_mem_op final : public jlm::rvsdg::simple_op +class local_mem_op final : public rvsdg::SimpleOperation { public: virtual ~local_mem_op() {} explicit local_mem_op(std::shared_ptr at) - : simple_op({}, CreateOutTypes(std::move(at))) + : SimpleOperation({}, CreateOutTypes(std::move(at))) {} bool @@ -1484,14 +1490,14 @@ class local_mem_op final : public jlm::rvsdg::simple_op } }; -class local_mem_resp_op final : public jlm::rvsdg::simple_op +class local_mem_resp_op final : public rvsdg::SimpleOperation { public: virtual ~local_mem_resp_op() {} local_mem_resp_op(const std::shared_ptr & at, size_t resp_count) - : simple_op({ at }, CreateOutTypes(at, resp_count)) + : SimpleOperation({ at }, CreateOutTypes(at, resp_count)) {} bool @@ -1532,14 +1538,14 @@ class local_mem_resp_op final : public jlm::rvsdg::simple_op } }; -class local_load_op final : public jlm::rvsdg::simple_op +class local_load_op final : public rvsdg::SimpleOperation { public: virtual ~local_load_op() {} local_load_op(const std::shared_ptr & valuetype, size_t numStates) - : simple_op(CreateInTypes(valuetype, numStates), CreateOutTypes(valuetype, numStates)) + : SimpleOperation(CreateInTypes(valuetype, numStates), CreateOutTypes(valuetype, numStates)) {} bool @@ -1610,14 +1616,14 @@ class local_load_op final : public jlm::rvsdg::simple_op } }; -class local_store_op final : public jlm::rvsdg::simple_op +class local_store_op final : public rvsdg::SimpleOperation { public: virtual ~local_store_op() {} local_store_op(const std::shared_ptr & valuetype, size_t numStates) - : simple_op(CreateInTypes(valuetype, numStates), CreateOutTypes(valuetype, numStates)) + : SimpleOperation(CreateInTypes(valuetype, numStates), CreateOutTypes(valuetype, numStates)) {} bool @@ -1687,7 +1693,7 @@ class local_store_op final : public jlm::rvsdg::simple_op } }; -class local_mem_req_op final : public jlm::rvsdg::simple_op +class local_mem_req_op final : public rvsdg::SimpleOperation { public: virtual ~local_mem_req_op() @@ -1697,7 +1703,7 @@ class local_mem_req_op final : public jlm::rvsdg::simple_op const std::shared_ptr & at, size_t load_cnt, size_t store_cnt) - : simple_op(CreateInTypes(at, load_cnt, store_cnt), {}) + : SimpleOperation(CreateInTypes(at, load_cnt, store_cnt), {}) {} bool diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index ae7b81fca..c31472c86 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -246,7 +246,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); } - if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) + if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) && n1->operation() == n2->operation() && n1->ninputs() == n2->ninputs() && o1->index() == o2->index()) { diff --git a/jlm/llvm/backend/jlm2llvm/instruction.cpp b/jlm/llvm/backend/jlm2llvm/instruction.cpp index 1dcd2b3b3..38c170c5b 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.cpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.cpp @@ -26,14 +26,14 @@ namespace jlm2llvm ::llvm::Value * convert_operation( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & arguments, ::llvm::IRBuilder<> & builder, context & ctx); static inline ::llvm::Value * convert_assignment( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -44,7 +44,7 @@ convert_assignment( static inline ::llvm::Value * convert_bitsbinary( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -74,7 +74,7 @@ convert_bitsbinary( static inline ::llvm::Value * convert_bitscompare( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -112,7 +112,7 @@ convert_bitvalue_repr(const rvsdg::bitvalue_repr & vr) static inline ::llvm::Value * convert_bitconstant( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, context & ctx) @@ -130,7 +130,7 @@ convert_bitconstant( static inline ::llvm::Value * convert_ctlconstant( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, context & ctx) @@ -155,7 +155,7 @@ convert( static inline ::llvm::Value * convert_undef( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, context & ctx) @@ -224,7 +224,7 @@ is_identity_mapping(const rvsdg::match_op & op) static inline ::llvm::Value * convert_match( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -249,7 +249,7 @@ convert_match( static inline ::llvm::Value * convert_branch( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, context & ctx) @@ -260,7 +260,7 @@ convert_branch( static inline ::llvm::Value * convert_phi( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, context & ctx) @@ -339,7 +339,7 @@ CreateStoreInstruction( static inline ::llvm::Value * convert_store( - const rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -368,7 +368,7 @@ convert( static inline ::llvm::Value * convert_alloca( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -384,7 +384,7 @@ convert_alloca( static inline ::llvm::Value * convert_getelementptr( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -523,7 +523,7 @@ convert( static inline ::llvm::Value * convert_ptrcmp( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -547,7 +547,7 @@ convert_ptrcmp( static inline ::llvm::Value * convert_fpcmp( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -581,7 +581,7 @@ convert_fpcmp( static inline ::llvm::Value * convert_fpbin( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -604,7 +604,7 @@ convert_fpbin( static ::llvm::Value * convert_fpneg( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -616,7 +616,7 @@ convert_fpneg( static inline ::llvm::Value * convert_valist( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -653,7 +653,7 @@ convert( static inline ::llvm::Value * convert_select( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -672,7 +672,7 @@ convert_select( static inline ::llvm::Value * convert_ctl2bits( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -683,7 +683,7 @@ convert_ctl2bits( static ::llvm::Value * convert_constantvector( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -699,7 +699,7 @@ convert_constantvector( static ::llvm::Value * convert_constantdatavector( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -758,7 +758,7 @@ convert_constantdatavector( static ::llvm::Value * convert_extractelement( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & args, ::llvm::IRBuilder<> & builder, context & ctx) @@ -781,7 +781,7 @@ convert( static ::llvm::Value * convert_insertelement( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -796,7 +796,7 @@ convert_insertelement( static ::llvm::Value * convert_vectorunary( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -808,7 +808,7 @@ convert_vectorunary( static ::llvm::Value * convert_vectorbinary( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -834,7 +834,7 @@ convert( template<::llvm::Instruction::CastOps OPCODE> static ::llvm::Value * convert_cast( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -1004,7 +1004,7 @@ convert( template static ::llvm::Value * convert( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) @@ -1015,7 +1015,7 @@ convert( ::llvm::Value * convert_operation( - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & arguments, ::llvm::IRBuilder<> & builder, context & ctx) @@ -1028,7 +1028,7 @@ convert_operation( static std::unordered_map< std::type_index, - ::llvm::Value * (*)(const rvsdg::simple_op &, + ::llvm::Value * (*)(const rvsdg::SimpleOperation &, const std::vector &, ::llvm::IRBuilder<> &, context & ctx)> diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index bc365bd2b..af052cc97 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -90,7 +90,7 @@ create_initialization(const delta::node * delta, context & ctx) operands.push_back(ctx.variable(node->input(n)->origin())); /* convert node to tac */ - auto & op = *static_cast(&node->operation()); + auto & op = *static_cast(&node->operation()); tacs.push_back(tac::create(op, operands)); ctx.insert(output, tacs.back()->result(0)); } @@ -161,13 +161,13 @@ create_cfg(const lambda::node & lambda, context & ctx) static inline void convert_simple_node(const rvsdg::node & node, context & ctx) { - JLM_ASSERT(dynamic_cast(&node.operation())); + JLM_ASSERT(dynamic_cast(&node.operation())); std::vector operands; for (size_t n = 0; n < node.ninputs(); n++) operands.push_back(ctx.variable(node.input(n)->origin())); - auto & op = *static_cast(&node.operation()); + auto & op = *static_cast(&node.operation()); ctx.lpbb()->append_last(tac::create(op, operands)); for (size_t n = 0; n < node.noutputs(); n++) @@ -526,7 +526,7 @@ convert_node(const rvsdg::node & node, context & ctx) { typeid(phi::operation), convert_phi_node }, { typeid(delta::operation), convert_delta_node } }); - if (dynamic_cast(&node.operation())) + if (dynamic_cast(&node.operation())) { convert_simple_node(node, ctx); return; diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 758df47de..4af4abd23 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -569,7 +569,8 @@ ConvertThreeAddressCode( for (size_t n = 0; n < threeAddressCode.noperands(); n++) operands.push_back(variableMap.lookup(threeAddressCode.operand(n))); - auto & simpleOperation = static_cast(threeAddressCode.operation()); + auto & simpleOperation = + static_cast(threeAddressCode.operation()); auto results = rvsdg::simple_node::create_normalized(®ion, simpleOperation, operands); JLM_ASSERT(results.size() == threeAddressCode.nresults()); diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index 60c8647c8..3d9461a09 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -543,7 +543,7 @@ convert_icmp_instruction(::llvm::Instruction * instruction, tacsvector_t & tacs, } else { - tacs.push_back(tac::create(*static_cast(binop.get()), { op1, op2 })); + tacs.push_back(tac::create(*static_cast(binop.get()), { op1, op2 })); } return tacs.back()->result(0); @@ -1053,7 +1053,8 @@ convert_binary_operator(::llvm::Instruction * instruction, tacsvector_t & tacs, } else { - tacs.push_back(tac::create(*static_cast(operation.get()), { op1, op2 })); + tacs.push_back( + tac::create(*static_cast(operation.get()), { op1, op2 })); } return tacs.back()->result(0); @@ -1202,7 +1203,7 @@ convert_cast_instruction(::llvm::Instruction * i, tacsvector_t & tacs, context & if (dt->isVectorTy()) tacs.push_back(vectorunary_op::create(*static_cast(unop.get()), op, type)); else - tacs.push_back(tac::create(*static_cast(unop.get()), { op })); + tacs.push_back(tac::create(*static_cast(unop.get()), { op })); return tacs.back()->result(0); } diff --git a/jlm/llvm/ir/operators/GetElementPtr.hpp b/jlm/llvm/ir/operators/GetElementPtr.hpp index c9eea2f3e..e04d2b4c1 100644 --- a/jlm/llvm/ir/operators/GetElementPtr.hpp +++ b/jlm/llvm/ir/operators/GetElementPtr.hpp @@ -20,7 +20,7 @@ namespace jlm::llvm * FIXME: We currently do not support vector of pointers for the baseAddress. * */ -class GetElementPtrOperation final : public rvsdg::simple_op +class GetElementPtrOperation final : public rvsdg::SimpleOperation { public: ~GetElementPtrOperation() noexcept override; @@ -29,7 +29,7 @@ class GetElementPtrOperation final : public rvsdg::simple_op GetElementPtrOperation( const std::vector> & offsetTypes, std::shared_ptr pointeeType) - : simple_op(CreateOperandTypes(offsetTypes), { PointerType::Create() }), + : SimpleOperation(CreateOperandTypes(offsetTypes), { PointerType::Create() }), PointeeType_(std::move(pointeeType)) {} diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 145740a7f..3c0d8819e 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -632,7 +632,7 @@ load_normal_form::normalize_node(rvsdg::node * node) const std::vector load_normal_form::normalized_create( rvsdg::Region * region, - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands) const { JLM_ASSERT(is(op)); diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index 5c84611ee..700d8b901 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -33,7 +33,7 @@ class load_normal_form final : public rvsdg::simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands) const override; inline void @@ -123,14 +123,14 @@ class load_normal_form final : public rvsdg::simple_normal_form * @see LoadVolatileOperation * @see LoadNonVolatileOperation */ -class LoadOperation : public rvsdg::simple_op +class LoadOperation : public rvsdg::SimpleOperation { protected: LoadOperation( const std::vector> & operandTypes, const std::vector> & resultTypes, size_t alignment) - : simple_op(operandTypes, resultTypes), + : SimpleOperation(operandTypes, resultTypes), Alignment_(alignment) { JLM_ASSERT(!operandTypes.empty() && !resultTypes.empty()); diff --git a/jlm/llvm/ir/operators/MemCpy.hpp b/jlm/llvm/ir/operators/MemCpy.hpp index 418a0f99c..14e01ac01 100644 --- a/jlm/llvm/ir/operators/MemCpy.hpp +++ b/jlm/llvm/ir/operators/MemCpy.hpp @@ -20,13 +20,13 @@ namespace jlm::llvm * @see MemCpyNonVolatileOperation * @see MemCpyVolatileOperation */ -class MemCpyOperation : public rvsdg::simple_op +class MemCpyOperation : public rvsdg::SimpleOperation { protected: MemCpyOperation( const std::vector> & operandTypes, const std::vector> & resultTypes) - : simple_op(operandTypes, resultTypes) + : SimpleOperation(operandTypes, resultTypes) { JLM_ASSERT(operandTypes.size() >= 4); diff --git a/jlm/llvm/ir/operators/MemoryStateOperations.hpp b/jlm/llvm/ir/operators/MemoryStateOperations.hpp index 2212b5ee2..0f2c6e79f 100644 --- a/jlm/llvm/ir/operators/MemoryStateOperations.hpp +++ b/jlm/llvm/ir/operators/MemoryStateOperations.hpp @@ -16,11 +16,11 @@ namespace jlm::llvm /** * Abstract base class for all memory state operations. */ -class MemoryStateOperation : public rvsdg::simple_op +class MemoryStateOperation : public rvsdg::SimpleOperation { protected: MemoryStateOperation(size_t numOperands, size_t numResults) - : simple_op( + : SimpleOperation( { numOperands, MemoryStateType::Create() }, { numResults, MemoryStateType::Create() }) {} diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 2b394175e..8bea5dc1e 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -384,7 +384,7 @@ store_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector store_normal_form::normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & ops) const { JLM_ASSERT(is(op)); diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 70bcab3e2..bf23eeeb2 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -33,7 +33,7 @@ class store_normal_form final : public jlm::rvsdg::simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const rvsdg::SimpleOperation & op, const std::vector & operands) const override; virtual void @@ -85,14 +85,14 @@ class store_normal_form final : public jlm::rvsdg::simple_normal_form * @see StoreVolatileOperation * @see StoreNonVolatileOperation */ -class StoreOperation : public rvsdg::simple_op +class StoreOperation : public rvsdg::SimpleOperation { protected: StoreOperation( const std::vector> & operandTypes, const std::vector> & resultTypes, size_t alignment) - : simple_op(operandTypes, resultTypes), + : SimpleOperation(operandTypes, resultTypes), Alignment_(alignment) { JLM_ASSERT(operandTypes.size() >= 2); diff --git a/jlm/llvm/ir/operators/alloca.hpp b/jlm/llvm/ir/operators/alloca.hpp index 65a509475..101c8a126 100644 --- a/jlm/llvm/ir/operators/alloca.hpp +++ b/jlm/llvm/ir/operators/alloca.hpp @@ -18,7 +18,7 @@ namespace jlm::llvm /* alloca operator */ -class alloca_op final : public rvsdg::simple_op +class alloca_op final : public rvsdg::SimpleOperation { public: virtual ~alloca_op() noexcept; @@ -27,7 +27,7 @@ class alloca_op final : public rvsdg::simple_op std::shared_ptr allocatedType, std::shared_ptr btype, size_t alignment) - : simple_op({ btype }, { { PointerType::Create() }, { MemoryStateType::Create() } }), + : SimpleOperation({ btype }, { { PointerType::Create() }, { MemoryStateType::Create() } }), alignment_(alignment), AllocatedType_(std::move(allocatedType)) {} diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index a14cfd0ad..de2ed69d9 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -166,7 +166,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) if (is(origin)) return origin; - if (is(rvsdg::output::GetNode(*origin))) + if (is(rvsdg::output::GetNode(*origin))) return origin; if (is(origin)) diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index 32b4ffc90..f63554db4 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -19,13 +19,13 @@ namespace jlm::llvm /** \brief Call operation class * */ -class CallOperation final : public jlm::rvsdg::simple_op +class CallOperation final : public jlm::rvsdg::SimpleOperation { public: ~CallOperation() override; explicit CallOperation(std::shared_ptr functionType) - : simple_op(create_srctypes(*functionType), functionType->Results()), + : SimpleOperation(create_srctypes(*functionType), functionType->Results()), FunctionType_(std::move(functionType)) {} diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index 5b1fc042f..3e20383a8 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -26,7 +26,7 @@ class cfg_node; /* phi operator */ -class phi_op final : public jlm::rvsdg::simple_op +class phi_op final : public rvsdg::SimpleOperation { public: virtual ~phi_op() noexcept; @@ -34,7 +34,7 @@ class phi_op final : public jlm::rvsdg::simple_op inline phi_op( const std::vector & nodes, const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ nodes.size(), type }, { type }), + : SimpleOperation({ nodes.size(), type }, { type }), nodes_(nodes) {} @@ -97,13 +97,13 @@ class phi_op final : public jlm::rvsdg::simple_op /* assignment operator */ -class assignment_op final : public jlm::rvsdg::simple_op +class assignment_op final : public rvsdg::SimpleOperation { public: virtual ~assignment_op() noexcept; explicit inline assignment_op(const std::shared_ptr & type) - : simple_op({ type, type }, {}) + : SimpleOperation({ type, type }, {}) {} assignment_op(const assignment_op &) = default; @@ -131,13 +131,13 @@ class assignment_op final : public jlm::rvsdg::simple_op /* select operator */ -class select_op final : public jlm::rvsdg::simple_op +class select_op final : public rvsdg::SimpleOperation { public: virtual ~select_op() noexcept; explicit select_op(const std::shared_ptr & type) - : jlm::rvsdg::simple_op({ jlm::rvsdg::bittype::Create(1), type, type }, { type }) + : SimpleOperation({ jlm::rvsdg::bittype::Create(1), type, type }, { type }) {} virtual bool @@ -171,7 +171,7 @@ class select_op final : public jlm::rvsdg::simple_op /* vector select operator */ -class vectorselect_op final : public jlm::rvsdg::simple_op +class vectorselect_op final : public rvsdg::SimpleOperation { public: virtual ~vectorselect_op() noexcept; @@ -180,7 +180,7 @@ class vectorselect_op final : public jlm::rvsdg::simple_op vectorselect_op( const std::shared_ptr & pt, const std::shared_ptr & vt) - : jlm::rvsdg::simple_op({ pt, vt, vt }, { vt }) + : SimpleOperation({ pt, vt, vt }, { vt }) {} public: @@ -364,7 +364,7 @@ class fp2si_op final : public jlm::rvsdg::unary_op /* ctl2bits operator */ -class ctl2bits_op final : public jlm::rvsdg::simple_op +class ctl2bits_op final : public rvsdg::SimpleOperation { public: virtual ~ctl2bits_op() noexcept; @@ -372,7 +372,7 @@ class ctl2bits_op final : public jlm::rvsdg::simple_op inline ctl2bits_op( std::shared_ptr srctype, std::shared_ptr dsttype) - : jlm::rvsdg::simple_op({ std::move(srctype) }, { std::move(dsttype) }) + : SimpleOperation({ std::move(srctype) }, { std::move(dsttype) }) {} virtual bool @@ -402,13 +402,13 @@ class ctl2bits_op final : public jlm::rvsdg::simple_op /* branch operator */ -class branch_op final : public jlm::rvsdg::simple_op +class branch_op final : public rvsdg::SimpleOperation { public: virtual ~branch_op() noexcept; explicit inline branch_op(std::shared_ptr type) - : jlm::rvsdg::simple_op({ std::move(type) }, {}) + : SimpleOperation({ std::move(type) }, {}) {} virtual bool @@ -438,13 +438,13 @@ class branch_op final : public jlm::rvsdg::simple_op * * This operator is the Jlm equivalent of LLVM's ConstantPointerNull constant. */ -class ConstantPointerNullOperation final : public jlm::rvsdg::simple_op +class ConstantPointerNullOperation final : public rvsdg::SimpleOperation { public: ~ConstantPointerNullOperation() noexcept override; explicit ConstantPointerNullOperation(std::shared_ptr pointerType) - : simple_op({}, { std::move(pointerType) }) + : SimpleOperation({}, { std::move(pointerType) }) {} bool @@ -634,13 +634,13 @@ class ptr2bits_op final : public jlm::rvsdg::unary_op /* Constant Data Array operator */ -class ConstantDataArray final : public jlm::rvsdg::simple_op +class ConstantDataArray final : public rvsdg::SimpleOperation { public: virtual ~ConstantDataArray(); ConstantDataArray(const std::shared_ptr & type, size_t size) - : simple_op({ size, type }, { arraytype::Create(type, size) }) + : SimpleOperation({ size, type }, { arraytype::Create(type, size) }) { if (size == 0) throw jlm::util::error("size equals zero."); @@ -866,18 +866,18 @@ class zext_op final : public jlm::rvsdg::unary_op /* floating point constant operator */ -class ConstantFP final : public jlm::rvsdg::simple_op +class ConstantFP final : public rvsdg::SimpleOperation { public: virtual ~ConstantFP(); inline ConstantFP(const fpsize & size, const ::llvm::APFloat & constant) - : simple_op({}, { fptype::Create(size) }), + : SimpleOperation({}, { fptype::Create(size) }), constant_(constant) {} inline ConstantFP(std::shared_ptr fpt, const ::llvm::APFloat & constant) - : simple_op({}, { std::move(fpt) }), + : SimpleOperation({}, { std::move(fpt) }), constant_(constant) {} @@ -1006,13 +1006,13 @@ class fpcmp_op final : public jlm::rvsdg::binary_op * * This operator is the Jlm equivalent of LLVM's UndefValue constant. */ -class UndefValueOperation final : public jlm::rvsdg::simple_op +class UndefValueOperation final : public rvsdg::SimpleOperation { public: ~UndefValueOperation() noexcept override; explicit UndefValueOperation(std::shared_ptr type) - : simple_op({}, { std::move(type) }) + : SimpleOperation({}, { std::move(type) }) {} UndefValueOperation(const UndefValueOperation &) = default; @@ -1076,13 +1076,13 @@ class UndefValueOperation final : public jlm::rvsdg::simple_op * * This operator is the Jlm equivalent of LLVM's PoisonValue constant. */ -class PoisonValueOperation final : public jlm::rvsdg::simple_op +class PoisonValueOperation final : public rvsdg::SimpleOperation { public: ~PoisonValueOperation() noexcept override; explicit PoisonValueOperation(std::shared_ptr type) - : jlm::rvsdg::simple_op({}, { std::move(type) }) + : SimpleOperation({}, { std::move(type) }) {} PoisonValueOperation(const PoisonValueOperation &) = default; @@ -1432,13 +1432,13 @@ class fptrunc_op final : public jlm::rvsdg::unary_op /* valist operator */ -class valist_op final : public jlm::rvsdg::simple_op +class valist_op final : public rvsdg::SimpleOperation { public: virtual ~valist_op(); explicit valist_op(std::vector> types) - : simple_op(std::move(types), { varargtype::Create() }) + : SimpleOperation(std::move(types), { varargtype::Create() }) {} valist_op(const valist_op &) = default; @@ -1569,13 +1569,13 @@ class bitcast_op final : public jlm::rvsdg::unary_op /* ConstantStruct operator */ -class ConstantStruct final : public jlm::rvsdg::simple_op +class ConstantStruct final : public rvsdg::SimpleOperation { public: virtual ~ConstantStruct(); inline ConstantStruct(std::shared_ptr type) - : simple_op(create_srctypes(*type), { type }) + : SimpleOperation(create_srctypes(*type), { type }) {} virtual bool @@ -1847,13 +1847,13 @@ class sitofp_op final : public jlm::rvsdg::unary_op /* ConstantArray */ -class ConstantArray final : public jlm::rvsdg::simple_op +class ConstantArray final : public rvsdg::SimpleOperation { public: virtual ~ConstantArray(); ConstantArray(const std::shared_ptr & type, size_t size) - : jlm::rvsdg::simple_op({ size, type }, { arraytype::Create(type, size) }) + : SimpleOperation({ size, type }, { arraytype::Create(type, size) }) { if (size == 0) throw jlm::util::error("size equals zero.\n"); @@ -1913,13 +1913,13 @@ class ConstantArray final : public jlm::rvsdg::simple_op /* ConstantAggregateZero operator */ -class ConstantAggregateZero final : public jlm::rvsdg::simple_op +class ConstantAggregateZero final : public rvsdg::SimpleOperation { public: virtual ~ConstantAggregateZero(); ConstantAggregateZero(std::shared_ptr type) - : simple_op({}, { type }) + : SimpleOperation({}, { type }) { auto st = dynamic_cast(type.get()); auto at = dynamic_cast(type.get()); @@ -1954,7 +1954,7 @@ class ConstantAggregateZero final : public jlm::rvsdg::simple_op /* extractelement operator */ -class extractelement_op final : public jlm::rvsdg::simple_op +class extractelement_op final : public rvsdg::SimpleOperation { public: virtual ~extractelement_op(); @@ -1962,7 +1962,7 @@ class extractelement_op final : public jlm::rvsdg::simple_op inline extractelement_op( const std::shared_ptr & vtype, const std::shared_ptr & btype) - : simple_op({ vtype, btype }, { vtype->Type() }) + : SimpleOperation({ vtype, btype }, { vtype->Type() }) {} virtual bool @@ -1992,20 +1992,20 @@ class extractelement_op final : public jlm::rvsdg::simple_op /* shufflevector operator */ -class shufflevector_op final : public jlm::rvsdg::simple_op +class shufflevector_op final : public rvsdg::SimpleOperation { public: ~shufflevector_op() override; shufflevector_op(const std::shared_ptr & v, const std::vector & mask) - : simple_op({ v, v }, { v }), + : SimpleOperation({ v, v }, { v }), Mask_(mask) {} shufflevector_op( const std::shared_ptr & v, const std::vector & mask) - : simple_op({ v, v }, { v }), + : SimpleOperation({ v, v }, { v }), Mask_(mask) {} @@ -2051,13 +2051,13 @@ class shufflevector_op final : public jlm::rvsdg::simple_op /* constantvector operator */ -class constantvector_op final : public jlm::rvsdg::simple_op +class constantvector_op final : public rvsdg::SimpleOperation { public: virtual ~constantvector_op(); explicit inline constantvector_op(const std::shared_ptr & vt) - : simple_op({ vt->size(), vt->Type() }, { vt }) + : SimpleOperation({ vt->size(), vt->Type() }, { vt }) {} virtual bool @@ -2085,7 +2085,7 @@ class constantvector_op final : public jlm::rvsdg::simple_op /* insertelement operator */ -class insertelement_op final : public jlm::rvsdg::simple_op +class insertelement_op final : public rvsdg::SimpleOperation { public: virtual ~insertelement_op(); @@ -2094,7 +2094,7 @@ class insertelement_op final : public jlm::rvsdg::simple_op const std::shared_ptr & vectype, const std::shared_ptr & vtype, const std::shared_ptr & btype) - : simple_op({ vectype, vtype, btype }, { vectype }) + : SimpleOperation({ vectype, vtype, btype }, { vectype }) { if (vectype->type() != *vtype) { @@ -2135,7 +2135,7 @@ class insertelement_op final : public jlm::rvsdg::simple_op /* vectorunary operator */ -class vectorunary_op final : public jlm::rvsdg::simple_op +class vectorunary_op final : public rvsdg::SimpleOperation { public: virtual ~vectorunary_op(); @@ -2144,7 +2144,7 @@ class vectorunary_op final : public jlm::rvsdg::simple_op const jlm::rvsdg::unary_op & op, const std::shared_ptr & operand, const std::shared_ptr & result) - : simple_op({ operand }, { result }), + : SimpleOperation({ operand }, { result }), op_(op.copy()) { if (operand->type() != *op.argument(0)) @@ -2163,12 +2163,12 @@ class vectorunary_op final : public jlm::rvsdg::simple_op } inline vectorunary_op(const vectorunary_op & other) - : simple_op(other), + : SimpleOperation(other), op_(other.op_->copy()) {} inline vectorunary_op(vectorunary_op && other) - : simple_op(other), + : SimpleOperation(other), op_(std::move(other.op_)) {} @@ -2226,7 +2226,7 @@ class vectorunary_op final : public jlm::rvsdg::simple_op /* vectorbinary operator */ -class vectorbinary_op final : public jlm::rvsdg::simple_op +class vectorbinary_op final : public rvsdg::SimpleOperation { public: virtual ~vectorbinary_op(); @@ -2236,7 +2236,7 @@ class vectorbinary_op final : public jlm::rvsdg::simple_op const std::shared_ptr & op1, const std::shared_ptr & op2, const std::shared_ptr & result) - : simple_op({ op1, op2 }, { result }), + : SimpleOperation({ op1, op2 }, { result }), op_(binop.copy()) { if (*op1 != *op2) @@ -2258,12 +2258,12 @@ class vectorbinary_op final : public jlm::rvsdg::simple_op } inline vectorbinary_op(const vectorbinary_op & other) - : simple_op(other), + : SimpleOperation(other), op_(other.op_->copy()) {} inline vectorbinary_op(vectorbinary_op && other) - : simple_op(other), + : SimpleOperation(other), op_(std::move(other.op_)) {} @@ -2323,14 +2323,14 @@ class vectorbinary_op final : public jlm::rvsdg::simple_op /* constant data vector operator */ -class constant_data_vector_op final : public jlm::rvsdg::simple_op +class constant_data_vector_op final : public rvsdg::SimpleOperation { public: ~constant_data_vector_op() override; private: explicit constant_data_vector_op(const std::shared_ptr & vt) - : simple_op({ vt->size(), vt->Type() }, { vt }) + : SimpleOperation({ vt->size(), vt->Type() }, { vt }) {} public: @@ -2372,7 +2372,7 @@ class constant_data_vector_op final : public jlm::rvsdg::simple_op /* ExtractValue operator */ -class ExtractValue final : public jlm::rvsdg::simple_op +class ExtractValue final : public rvsdg::SimpleOperation { typedef std::vector::const_iterator const_iterator; @@ -2382,7 +2382,7 @@ class ExtractValue final : public jlm::rvsdg::simple_op inline ExtractValue( const std::shared_ptr & aggtype, const std::vector & indices) - : simple_op({ aggtype }, { dsttype(aggtype, indices) }), + : SimpleOperation({ aggtype }, { dsttype(aggtype, indices) }), indices_(indices) { if (indices.empty()) @@ -2458,13 +2458,13 @@ class ExtractValue final : public jlm::rvsdg::simple_op /* malloc operator */ -class malloc_op final : public jlm::rvsdg::simple_op +class malloc_op final : public rvsdg::SimpleOperation { public: virtual ~malloc_op(); explicit malloc_op(std::shared_ptr btype) - : simple_op({ std::move(btype) }, { PointerType::Create(), MemoryStateType::Create() }) + : SimpleOperation({ std::move(btype) }, { PointerType::Create(), MemoryStateType::Create() }) {} virtual bool @@ -2517,13 +2517,13 @@ class malloc_op final : public jlm::rvsdg::simple_op * * This operation has no equivalent LLVM instruction. */ -class FreeOperation final : public jlm::rvsdg::simple_op +class FreeOperation final : public rvsdg::SimpleOperation { public: ~FreeOperation() noexcept override; explicit FreeOperation(size_t numMemoryStates) - : simple_op(CreateOperandTypes(numMemoryStates), CreateResultTypes(numMemoryStates)) + : SimpleOperation(CreateOperandTypes(numMemoryStates), CreateResultTypes(numMemoryStates)) {} bool diff --git a/jlm/llvm/ir/tac.cpp b/jlm/llvm/ir/tac.cpp index f00a5a194..3ea301210 100644 --- a/jlm/llvm/ir/tac.cpp +++ b/jlm/llvm/ir/tac.cpp @@ -27,7 +27,7 @@ taclist::~taclist() static void check_operands( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands) { if (operands.size() != operation.narguments()) @@ -42,7 +42,7 @@ check_operands( static void check_results( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector> & results) { if (results.size() != operation.nresults()) @@ -55,7 +55,7 @@ check_results( } } -tac::tac(const jlm::rvsdg::simple_op & operation, const std::vector & operands) +tac::tac(const rvsdg::SimpleOperation & operation, const std::vector & operands) : operands_(operands), operation_(operation.copy()) { @@ -66,7 +66,7 @@ tac::tac(const jlm::rvsdg::simple_op & operation, const std::vector & operands, const std::vector & names) : operands_(operands), @@ -81,7 +81,7 @@ tac::tac( } tac::tac( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands, std::vector> results) : operands_(operands), @@ -94,7 +94,7 @@ tac::tac( void tac::convert( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands) { check_operands(operation, operands); @@ -109,7 +109,7 @@ tac::convert( void tac::replace( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands) { check_operands(operation, operands); diff --git a/jlm/llvm/ir/tac.hpp b/jlm/llvm/ir/tac.hpp index da0b0c227..463a2233e 100644 --- a/jlm/llvm/ir/tac.hpp +++ b/jlm/llvm/ir/tac.hpp @@ -58,13 +58,13 @@ class tac final inline ~tac() noexcept {} - tac(const jlm::rvsdg::simple_op & operation, const std::vector & operands); + tac(const rvsdg::SimpleOperation & operation, const std::vector & operands); - tac(const jlm::rvsdg::simple_op & operation, + tac(const rvsdg::SimpleOperation & operation, const std::vector & operands, const std::vector & names); - tac(const jlm::rvsdg::simple_op & operation, + tac(const rvsdg::SimpleOperation & operation, const std::vector & operands, std::vector> results); @@ -78,10 +78,10 @@ class tac final tac & operator=(llvm::tac &&) = delete; - inline const jlm::rvsdg::simple_op & + inline const rvsdg::SimpleOperation & operation() const noexcept { - return *static_cast(operation_.get()); + return *static_cast(operation_.get()); } inline size_t @@ -121,23 +121,23 @@ class tac final } void - replace(const jlm::rvsdg::simple_op & operation, const std::vector & operands); + replace(const rvsdg::SimpleOperation & operation, const std::vector & operands); void - convert(const jlm::rvsdg::simple_op & operation, const std::vector & operands); + convert(const rvsdg::SimpleOperation & operation, const std::vector & operands); static std::string ToAscii(const tac & threeAddressCode); static std::unique_ptr - create(const jlm::rvsdg::simple_op & operation, const std::vector & operands) + create(const rvsdg::SimpleOperation & operation, const std::vector & operands) { return std::make_unique(operation, operands); } static std::unique_ptr create( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands, const std::vector & names) { @@ -146,7 +146,7 @@ class tac final static std::unique_ptr create( - const jlm::rvsdg::simple_op & operation, + const rvsdg::SimpleOperation & operation, const std::vector & operands, std::vector> results) { @@ -155,7 +155,7 @@ class tac final private: void - create_results(const jlm::rvsdg::simple_op & operation, const std::vector & names) + create_results(const rvsdg::SimpleOperation & operation, const std::vector & names) { JLM_ASSERT(names.size() == operation.nresults()); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 01034dbe9..7d5ec54b5 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -202,7 +202,7 @@ class RegisterLocation final : public Location auto node = jlm::rvsdg::output::GetNode(*Output_); auto index = Output_->index(); - if (jlm::rvsdg::is(node)) + if (jlm::rvsdg::is(node)) { auto nodestr = node->operation().debug_string(); auto outputstr = Output_->type().debug_string(); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index c38f48ede..f4e3ed9d4 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -230,7 +230,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); } - if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) + if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) && n1->operation() == n2->operation() && n1->ninputs() == n2->ninputs() && o1->index() == o2->index()) { diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index 677dc7a98..1417890a7 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -115,10 +115,10 @@ class unrollinfo final return cmpnode_; } - inline const jlm::rvsdg::simple_op & + [[nodiscard]] const rvsdg::SimpleOperation & cmpoperation() const noexcept { - return *static_cast(&cmpnode()->operation()); + return *static_cast(&cmpnode()->operation()); } inline jlm::rvsdg::node * @@ -127,10 +127,10 @@ class unrollinfo final return armnode_; } - inline const jlm::rvsdg::simple_op & + [[nodiscard]] const rvsdg::SimpleOperation & armoperation() const noexcept { - return *static_cast(&armnode()->operation()); + return *static_cast(&armnode()->operation()); } inline rvsdg::RegionArgument * diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 8d14e83c6..5bb8a3182 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -177,7 +177,7 @@ JlmToMlirConverter::ConvertNode( ::mlir::Operation * JlmToMlirConverter::ConvertBitBinaryNode( - const jlm::rvsdg::simple_op & bitOp, + const rvsdg::SimpleOperation & bitOp, ::llvm::SmallVector<::mlir::Value> inputs) { ::mlir::Operation * MlirOp; @@ -264,7 +264,7 @@ JlmToMlirConverter::ConvertBitBinaryNode( ::mlir::Operation * JlmToMlirConverter::BitCompareNode( - const jlm::rvsdg::simple_op & bitOp, + const rvsdg::SimpleOperation & bitOp, ::llvm::SmallVector<::mlir::Value> inputs) { ::mlir::arith::CmpIPredicate compPredicate; diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index e68e6e18a..b5ff91471 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -121,7 +121,7 @@ class JlmToMlirConverter final */ ::mlir::Operation * ConvertBitBinaryNode( - const jlm::rvsdg::simple_op & bitOp, + const rvsdg::SimpleOperation & bitOp, ::llvm::SmallVector<::mlir::Value> inputs); /** @@ -131,7 +131,7 @@ class JlmToMlirConverter final * \return The converted MLIR RVSDG operation. */ ::mlir::Operation * - BitCompareNode(const jlm::rvsdg::simple_op & bitOp, ::llvm::SmallVector<::mlir::Value> inputs); + BitCompareNode(const rvsdg::SimpleOperation & bitOp, ::llvm::SmallVector<::mlir::Value> inputs); /** * Converts an RVSDG simple_node to an MLIR RVSDG operation. diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 79289fd00..09a3d6ca2 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -137,12 +137,12 @@ binary_normal_form::normalize_node(jlm::rvsdg::node * node, const binary_op & op if (changes) { - std::unique_ptr tmp_op; + std::unique_ptr tmp_op; if (new_args.size() > 2) tmp_op.reset(new flattened_binary_op(op, new_args.size())); JLM_ASSERT(new_args.size() >= 2); - const auto & new_op = tmp_op ? *tmp_op : static_cast(op); + const auto & new_op = tmp_op ? *tmp_op : static_cast(op); divert_users(node, simple_node::create_normalized(node->region(), new_op, new_args)); remove(node); return false; @@ -154,7 +154,7 @@ binary_normal_form::normalize_node(jlm::rvsdg::node * node, const binary_op & op std::vector binary_normal_form::normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & base_op, + const SimpleOperation & base_op, const std::vector & args) const { const auto & op = *static_cast(&base_op); @@ -187,14 +187,14 @@ binary_normal_form::normalized_create( /* FIXME: reorder for commutative operation */ /* FIXME: attempt distributive transform */ - std::unique_ptr tmp_op; + std::unique_ptr tmp_op; if (new_args.size() > 2) { tmp_op.reset(new flattened_binary_op(op, new_args.size())); } region = new_args[0]->region(); - const auto & new_op = tmp_op ? *tmp_op : static_cast(op); + const auto & new_op = tmp_op ? *tmp_op : static_cast(op); return simple_normal_form::normalized_create(region, new_op, new_args); } @@ -298,7 +298,7 @@ flattened_binary_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector flattened_binary_normal_form::normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & base_op, + const SimpleOperation & base_op, const std::vector & arguments) const { const auto & op = static_cast(base_op); diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index 7e1ee424b..dd5e66705 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -35,7 +35,7 @@ class binary_normal_form final : public simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const override; virtual void @@ -112,7 +112,7 @@ class flattened_binary_normal_form final : public simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const override; }; @@ -122,7 +122,7 @@ class flattened_binary_normal_form final : public simple_normal_form Operator taking two arguments (with well-defined reduction for more operands if operator is associative). */ -class binary_op : public simple_op +class binary_op : public SimpleOperation { public: enum class flags @@ -137,7 +137,7 @@ class binary_op : public simple_op inline binary_op( const std::vector> operands, std::shared_ptr result) - : simple_op(std::move(operands), { std::move(result) }) + : SimpleOperation(std::move(operands), { std::move(result) }) {} virtual binop_reduction_path_t @@ -167,7 +167,7 @@ class binary_op : public simple_op } }; -class flattened_binary_op final : public simple_op +class flattened_binary_op final : public SimpleOperation { public: enum class reduction @@ -179,14 +179,14 @@ class flattened_binary_op final : public simple_op virtual ~flattened_binary_op() noexcept; inline flattened_binary_op(std::unique_ptr op, size_t narguments) noexcept - : simple_op({ narguments, op->argument(0) }, { op->result(0) }), + : SimpleOperation({ narguments, op->argument(0) }, { op->result(0) }), op_(std::move(op)) { JLM_ASSERT(op_->is_associative()); } inline flattened_binary_op(const binary_op & op, size_t narguments) - : simple_op({ narguments, op.argument(0) }, { op.result(0) }), + : SimpleOperation({ narguments, op.argument(0) }, { op.result(0) }), op_(std::unique_ptr(static_cast(op.copy().release()))) { JLM_ASSERT(op_->is_associative()); diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index c4017bed5..001cf6ff0 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -146,7 +146,7 @@ class concat_normal_form final : public simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const override { std::vector new_args; diff --git a/jlm/rvsdg/nullary.hpp b/jlm/rvsdg/nullary.hpp index c5202a503..235bf6a71 100644 --- a/jlm/rvsdg/nullary.hpp +++ b/jlm/rvsdg/nullary.hpp @@ -20,13 +20,13 @@ class output; /** \brief Nullary operator (operator taking no formal arguments) */ -class nullary_op : public simple_op +class nullary_op : public SimpleOperation { public: virtual ~nullary_op() noexcept; inline explicit nullary_op(std::shared_ptr result) - : simple_op({}, { std::move(result) }) + : SimpleOperation({}, { std::move(result) }) {} }; diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index a25021820..d5159f72d 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -22,39 +22,38 @@ operation::normal_form(Graph * graph) noexcept /* simple operation */ -simple_op::~simple_op() -{} +SimpleOperation::~SimpleOperation() noexcept = default; size_t -simple_op::narguments() const noexcept +SimpleOperation::narguments() const noexcept { return operands_.size(); } const std::shared_ptr & -simple_op::argument(size_t index) const noexcept +SimpleOperation::argument(size_t index) const noexcept { JLM_ASSERT(index < narguments()); return operands_[index]; } size_t -simple_op::nresults() const noexcept +SimpleOperation::nresults() const noexcept { return results_.size(); } const std::shared_ptr & -simple_op::result(size_t index) const noexcept +SimpleOperation::result(size_t index) const noexcept { JLM_ASSERT(index < nresults()); return results_[index]; } jlm::rvsdg::simple_normal_form * -simple_op::normal_form(Graph * graph) noexcept +SimpleOperation::normal_form(Graph * graph) noexcept { - return static_cast(graph->node_normal_form(typeid(simple_op))); + return static_cast(graph->node_normal_form(typeid(SimpleOperation))); } /* structural operation */ diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index dec192ac6..ddafe793f 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -61,12 +61,12 @@ is(const jlm::rvsdg::operation & operation) noexcept /* simple operation */ -class simple_op : public operation +class SimpleOperation : public operation { public: - virtual ~simple_op(); + ~SimpleOperation() noexcept override; - simple_op( + SimpleOperation( std::vector> operands, std::vector> results) : operands_(std::move(operands)), diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index c31fadff8..e1efedda5 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -25,7 +25,7 @@ namespace jlm::rvsdg class node; class simple_node; -class simple_op; +class SimpleOperation; class StructuralInput; class StructuralNode; class structural_op; diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 0fe1cb826..dcecbb773 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -47,7 +47,7 @@ simple_node::~simple_node() simple_node::simple_node( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & operands) : node(op.copy(), region) { @@ -73,7 +73,7 @@ simple_node::simple_node( jlm::rvsdg::node * simple_node::copy(rvsdg::Region * region, const std::vector & operands) const { - auto node = create(region, *static_cast(&operation()), operands); + auto node = create(region, operation(), operands); graph()->mark_denormalized(); return node; } diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index 40de0070f..dac954122 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -14,7 +14,7 @@ namespace jlm::rvsdg { -class simple_op; +class SimpleOperation; class simple_input; class simple_output; @@ -28,7 +28,7 @@ class simple_node : public node protected: simple_node( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & operands); public: @@ -38,7 +38,7 @@ class simple_node : public node jlm::rvsdg::simple_output * output(size_t index) const noexcept; - const jlm::rvsdg::simple_op & + const SimpleOperation & operation() const noexcept; virtual jlm::rvsdg::node * @@ -50,7 +50,7 @@ class simple_node : public node static inline jlm::rvsdg::simple_node * create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & operands) { return new simple_node(region, op, operands); @@ -59,7 +59,7 @@ class simple_node : public node static inline std::vector create_normalized( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & operands) { auto nf = static_cast(region->graph()->node_normal_form(typeid(op))); @@ -122,10 +122,10 @@ simple_node::output(size_t index) const noexcept return static_cast(node::output(index)); } -inline const jlm::rvsdg::simple_op & +inline const SimpleOperation & simple_node::operation() const noexcept { - return *static_cast(&node::operation()); + return *static_cast(&node::operation()); } } diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 7cf958ff7..20a062af3 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -82,7 +82,7 @@ simple_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector simple_normal_form::normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const { jlm::rvsdg::node * node = nullptr; @@ -122,6 +122,6 @@ static void __attribute__((constructor)) register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::simple_op), + typeid(jlm::rvsdg::SimpleOperation), get_default_normal_form); } diff --git a/jlm/rvsdg/simple-normal-form.hpp b/jlm/rvsdg/simple-normal-form.hpp index e61928f6d..18bc3f615 100644 --- a/jlm/rvsdg/simple-normal-form.hpp +++ b/jlm/rvsdg/simple-normal-form.hpp @@ -11,7 +11,7 @@ namespace jlm::rvsdg { -class simple_op; +class SimpleOperation; class simple_normal_form : public node_normal_form { @@ -29,7 +29,7 @@ class simple_normal_form : public node_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const; virtual void diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index a2d13a74e..2509e8b50 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -150,7 +150,7 @@ mux_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector mux_normal_form::normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & operands) const { JLM_ASSERT(dynamic_cast(&op)); diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index 843468a78..5d57a453a 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -32,7 +32,7 @@ class mux_normal_form final : public simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const override; virtual void @@ -60,13 +60,13 @@ class mux_normal_form final : public simple_normal_form /* mux operation */ -class mux_op final : public simple_op +class mux_op final : public SimpleOperation { public: virtual ~mux_op() noexcept; inline mux_op(std::shared_ptr type, size_t narguments, size_t nresults) - : simple_op({ narguments, type }, { nresults, type }) + : SimpleOperation({ narguments, type }, { nresults, type }) {} virtual bool diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 52ec39366..83366af52 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -56,7 +56,7 @@ unary_normal_form::normalize_node(jlm::rvsdg::node * node) const std::vector unary_normal_form::normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const { JLM_ASSERT(arguments.size() == 1); diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index b5f0f2a13..be813b611 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -33,7 +33,7 @@ class unary_normal_form final : public simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const jlm::rvsdg::simple_op & op, + const SimpleOperation & op, const std::vector & arguments) const override; virtual void @@ -54,7 +54,7 @@ class unary_normal_form final : public simple_normal_form Operator taking a single argument. */ -class unary_op : public simple_op +class unary_op : public SimpleOperation { public: virtual ~unary_op() noexcept; @@ -62,7 +62,7 @@ class unary_op : public simple_op inline unary_op( std::shared_ptr operand, std::shared_ptr result) - : simple_op({ std::move(operand) }, { std::move(result) }) + : SimpleOperation({ std::move(operand) }, { std::move(result) }) {} virtual unop_reduction_path_t diff --git a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp index 03a48d629..14fbf1e05 100644 --- a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp @@ -20,7 +20,7 @@ static std::unique_ptr SetupControlFlowGraph( jlm::llvm::ipgraph_module & ipgModule, - const jlm::rvsdg::simple_op & operation) + const jlm::rvsdg::SimpleOperation & operation) { using namespace jlm::llvm; @@ -50,7 +50,7 @@ SetupControlFlowGraph( } static std::unique_ptr -SetupFunctionWithThreeAddressCode(const jlm::rvsdg::simple_op & operation) +SetupFunctionWithThreeAddressCode(const jlm::rvsdg::SimpleOperation & operation) { using namespace jlm::llvm; diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 234b69444..d7bb5d801 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -360,7 +360,7 @@ class StructuralNodeResult final : public rvsdg::RegionResult } }; -class test_op final : public rvsdg::simple_op +class test_op final : public rvsdg::SimpleOperation { public: virtual ~test_op(); @@ -368,7 +368,7 @@ class test_op final : public rvsdg::simple_op inline test_op( std::vector> arguments, std::vector> results) - : simple_op(std::move(arguments), std::move(results)) + : SimpleOperation(std::move(arguments), std::move(results)) {} test_op(const test_op &) = default; From adb2076b44ca33c0db9edcbe1c460a7b2c649f4c Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 30 Nov 2024 12:07:03 +0100 Subject: [PATCH 124/170] Rename operation() method of node class (#672) The method name collided with the class name, leading to compiler errors when I tried to perform other refactoring. This PR does the following: 1. Rename operation() method of node class to GetOperation() 2. Make it virtual such that the subclasses can overload it and give back the concrete operation type. --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 167 +++++++++--------- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 5 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 18 +- .../backend/rvsdg2rhls/GammaConversion.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-buffers.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 16 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 14 +- .../rvsdg2rhls/distribute-constants.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 11 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 37 ++-- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 33 ++-- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 6 +- .../rvsdg2rhls/remove-redundant-buf.cpp | 8 +- .../rvsdg2rhls/remove-unused-state.cpp | 9 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp | 24 +-- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 8 +- jlm/hls/opt/cne.cpp | 12 +- jlm/hls/util/view.cpp | 4 +- jlm/llvm/backend/dot/DotWriter.cpp | 2 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 14 +- jlm/llvm/ir/operators/Load.cpp | 14 +- jlm/llvm/ir/operators/Load.hpp | 4 +- jlm/llvm/ir/operators/Phi.cpp | 6 + jlm/llvm/ir/operators/Phi.hpp | 7 +- jlm/llvm/ir/operators/Store.cpp | 18 +- jlm/llvm/ir/operators/Store.hpp | 4 +- jlm/llvm/ir/operators/call.hpp | 4 +- jlm/llvm/ir/operators/delta.cpp | 6 + jlm/llvm/ir/operators/delta.hpp | 19 +- jlm/llvm/ir/operators/lambda.cpp | 6 + jlm/llvm/ir/operators/lambda.hpp | 15 +- jlm/llvm/ir/operators/operators.cpp | 2 +- jlm/llvm/ir/operators/sext.cpp | 8 +- jlm/llvm/opt/DeadNodeElimination.cpp | 2 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 4 +- jlm/llvm/opt/alias-analyses/PointsToGraph.cpp | 12 +- .../RegionAwareMemoryNodeProvider.cpp | 10 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 32 ++-- jlm/llvm/opt/cne.cpp | 12 +- jlm/llvm/opt/push.cpp | 2 +- jlm/llvm/opt/unroll.cpp | 2 +- jlm/llvm/opt/unroll.hpp | 12 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 28 +-- jlm/rvsdg/binary.cpp | 14 +- jlm/rvsdg/bitstring/bitoperation-classes.cpp | 10 +- jlm/rvsdg/bitstring/concat.cpp | 20 +-- jlm/rvsdg/bitstring/slice.cpp | 4 +- jlm/rvsdg/control.cpp | 2 +- jlm/rvsdg/gamma.cpp | 12 +- jlm/rvsdg/node.cpp | 2 +- jlm/rvsdg/node.hpp | 6 +- jlm/rvsdg/region.cpp | 4 +- jlm/rvsdg/simple-node.cpp | 21 ++- jlm/rvsdg/simple-node.hpp | 10 +- jlm/rvsdg/simple-normal-form.cpp | 4 +- jlm/rvsdg/statemux.cpp | 8 +- jlm/rvsdg/unary.cpp | 2 +- jlm/rvsdg/view.cpp | 8 +- .../rvsdg2rhls/MemoryConverterTests.cpp | 30 ++-- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 6 +- tests/jlm/llvm/backend/dot/DotWriterTests.cpp | 4 +- tests/jlm/llvm/ir/operators/StoreTests.cpp | 8 +- tests/jlm/mlir/TestJlmToMlirToJlm.cpp | 2 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 2 +- .../mlir/frontend/TestMlirToJlmConverter.cpp | 36 ++-- tests/jlm/rvsdg/bitstring/bitstring.cpp | 152 ++++++++-------- tests/jlm/rvsdg/test-gamma.cpp | 6 +- 71 files changed, 528 insertions(+), 492 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 62c4bfa65..cf5581fb2 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -20,7 +20,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Only handles nodes with a single output if (node->noutputs() != 1) { - throw std::logic_error(node->operation().debug_string() + " has more than 1 output"); + throw std::logic_error(node->GetOperation().debug_string() + " has more than 1 output"); } // Create the module and its input/output ports @@ -45,7 +45,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Get the data signal from the bundle auto outData = GetSubfield(body, outBundle, "data"); - if (dynamic_cast(&(node->operation()))) + if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -54,7 +54,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // We drop the carry bit Connect(body, outData, DropMSBs(body, op, 1)); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -63,7 +63,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // We drop the carry bit Connect(body, outData, DropMSBs(body, op, 1)); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -71,7 +71,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -79,7 +79,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -87,7 +87,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (auto bitmulOp = dynamic_cast(&(node->operation()))) + else if (auto bitmulOp = dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -96,7 +96,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Multiplication results are double the input width, so we drop the upper half of the result Connect(body, outData, DropMSBs(body, op, bitmulOp->type().nbits())); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -107,7 +107,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, DropMSBs(body, uIntOp, 1)); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -115,7 +115,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -125,7 +125,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, uIntOp); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -136,7 +136,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, slice); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -146,7 +146,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto uIntOp = AddAsUIntOp(body, remOp); Connect(body, outData, uIntOp); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -154,7 +154,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -162,7 +162,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -172,7 +172,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -180,7 +180,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -188,7 +188,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -196,7 +196,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -206,7 +206,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -216,28 +216,28 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) // Connect the op to the output data Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); Connect(body, outData, input0); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto inData = GetSubfield(body, inBundles[0], "data"); int outSize = JlmSize(&node->output(0)->type()); Connect(body, outData, AddBitsOp(body, inData, outSize - 1, 0)); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto inData = GetSubfield(body, inBundles[0], "data"); Connect(body, outData, inData); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto inData = GetSubfield(body, inBundles[0], "data"); Connect(body, outData, inData); } - else if (auto op = dynamic_cast(&(node->operation()))) + else if (auto op = dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto sintOp = AddAsSIntOp(body, input0); @@ -245,7 +245,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto uintOp = AddAsUIntOp(body, padOp); Connect(body, outData, uintOp); } - else if (auto op = dynamic_cast(&(node->operation()))) + else if (auto op = dynamic_cast(&(node->GetOperation()))) { auto value = op->value(); auto size = value.nbits(); @@ -253,14 +253,14 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto constant = GetConstant(body, size, value.to_uint()); Connect(body, outData, constant); } - else if (auto op = dynamic_cast(&(node->operation()))) + else if (auto op = dynamic_cast(&(node->GetOperation()))) { auto value = op->value().alternative(); auto size = ceil(log2(op->value().nalternatives())); auto constant = GetConstant(body, size, value); Connect(body, outData, constant); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); auto input1 = GetSubfield(body, inBundles[1], "data"); @@ -269,17 +269,17 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto op = AddLtOp(body, sInt0, sInt1); Connect(body, outData, op); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); Connect(body, outData, input0); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto input0 = GetSubfield(body, inBundles[0], "data"); Connect(body, outData, input0); } - else if (auto op = dynamic_cast(&(node->operation()))) + else if (auto op = dynamic_cast(&(node->GetOperation()))) { auto inData = GetSubfield(body, inBundles[0], "data"); auto outData = GetSubfield(body, outBundle, "data"); @@ -313,7 +313,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) Connect(body, outData, result); } } - else if (auto op = dynamic_cast(&(node->operation()))) + else if (auto op = dynamic_cast(&(node->GetOperation()))) { // Start of with base pointer auto input0 = GetSubfield(body, inBundles[0], "data"); @@ -348,13 +348,14 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) auto asUInt = AddAsUIntOp(body, result); Connect(body, outData, AddBitsOp(body, asUInt, GetPointerSizeInBits() - 1, 0)); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { Connect(body, outData, GetConstant(body, 1, 0)); } else { - throw std::logic_error("Simple node " + node->operation().debug_string() + " not implemented!"); + throw std::logic_error( + "Simple node " + node->GetOperation().debug_string() + " not implemented!"); } // Generate the output valid signal @@ -466,7 +467,7 @@ RhlsToFirrtlConverter::MlirGenLoopConstBuffer(const jlm::rvsdg::simple_node * no circt::firrtl::FModuleOp RhlsToFirrtlConverter::MlirGenFork(const jlm::rvsdg::simple_node * node) { - auto op = dynamic_cast(&node->operation()); + auto op = dynamic_cast(&node->GetOperation()); bool isConstant = op->IsConstant(); // Create the module and its input/output ports auto module = nodeToModule(node); @@ -744,7 +745,7 @@ RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) // Create the module and its input/output ports auto module = nodeToModule(node, false); auto body = module.getBodyBlock(); - auto op = dynamic_cast(&node->operation()); + auto op = dynamic_cast(&node->GetOperation()); auto loadTypes = op->GetLoadTypes(); ::llvm::SmallVector loadAddrReadys; @@ -907,8 +908,8 @@ RhlsToFirrtlConverter::MlirGenHlsLoad(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node, false); auto body = module.getBodyBlock(); - auto load = dynamic_cast(&(node->operation())); - auto local_load = dynamic_cast(&(node->operation())); + auto load = dynamic_cast(&(node->GetOperation())); + auto local_load = dynamic_cast(&(node->GetOperation())); JLM_ASSERT(load || local_load); // Input signals @@ -1082,7 +1083,7 @@ RhlsToFirrtlConverter::MlirGenHlsDLoad(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node, false); auto body = module.getBodyBlock(); - auto load = dynamic_cast(&(node->operation())); + auto load = dynamic_cast(&(node->GetOperation())); JLM_ASSERT(load); // Input signals @@ -1125,13 +1126,13 @@ RhlsToFirrtlConverter::MlirGenHlsDLoad(const jlm::rvsdg::simple_node * node) circt::firrtl::FModuleOp RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node) { - auto lmem_op = dynamic_cast(&(node->operation())); + auto lmem_op = dynamic_cast(&(node->GetOperation())); JLM_ASSERT(lmem_op); auto res_node = rvsdg::input::GetNode(**node->output(0)->begin()); - auto res_op = dynamic_cast(&res_node->operation()); + auto res_op = dynamic_cast(&res_node->GetOperation()); JLM_ASSERT(res_op); auto req_node = rvsdg::input::GetNode(**node->output(1)->begin()); - auto req_op = dynamic_cast(&req_node->operation()); + auto req_op = dynamic_cast(&req_node->GetOperation()); JLM_ASSERT(req_op); // Create the module and its input/output ports - we use a non-standard way here // Generate a vector with all inputs and outputs of the module @@ -1338,8 +1339,8 @@ RhlsToFirrtlConverter::MlirGenHlsStore(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node, false); auto body = module.getBodyBlock(); - auto store = dynamic_cast(&(node->operation())); - auto local_store = dynamic_cast(&(node->operation())); + auto store = dynamic_cast(&(node->GetOperation())); + auto local_store = dynamic_cast(&(node->GetOperation())); JLM_ASSERT(store || local_store); // Input signals @@ -1482,8 +1483,8 @@ RhlsToFirrtlConverter::MlirGenMem(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node, true); auto body = module.getBodyBlock(); - // Check if it's a load or store operation - bool store = dynamic_cast(&(node->operation())); + // Check if it's a load or store GetOperation + bool store = dynamic_cast(&(node->GetOperation())); InitializeMemReq(module); // Input signals @@ -1745,7 +1746,7 @@ RhlsToFirrtlConverter::MlirGenPrint(const jlm::rvsdg::simple_node * node) auto outBundle = GetOutPort(module, 0); Connect(body, outBundle, inBundle); auto trigger = AddAndOp(body, AddAndOp(body, inReady, inValid), AddNotOp(body, reset)); - auto pn = dynamic_cast(&node->operation()); + auto pn = dynamic_cast(&node->GetOperation()); auto formatString = "print node " + std::to_string(pn->id()) + ": %x\n"; auto name = "print_node_" + std::to_string(pn->id()); auto printValue = AddPadOp(body, inData, 64); @@ -1833,7 +1834,7 @@ RhlsToFirrtlConverter::MlirGenBuffer(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node); auto body = module.getBodyBlock(); - auto op = dynamic_cast(&(node->operation())); + auto op = dynamic_cast(&(node->GetOperation())); auto capacity = op->capacity; auto clock = GetClockSignal(module); @@ -1973,7 +1974,7 @@ RhlsToFirrtlConverter::MlirGenAddrQueue(const jlm::rvsdg::simple_node * node) auto module = nodeToModule(node); auto body = module.getBodyBlock(); - auto op = dynamic_cast(&(node->operation())); + auto op = dynamic_cast(&(node->GetOperation())); auto capacity = op->capacity; auto clock = GetClockSignal(module); @@ -2370,90 +2371,90 @@ RhlsToFirrtlConverter::MlirGenBranch(const jlm::rvsdg::simple_node * node) circt::firrtl::FModuleOp RhlsToFirrtlConverter::MlirGen(const jlm::rvsdg::simple_node * node) { - if (dynamic_cast(&(node->operation()))) + if (dynamic_cast(&(node->GetOperation()))) { return MlirGenSink(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenFork(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenLoopConstBuffer(node); - // } else if (dynamic_cast(&(node->operation()))) { + // } else if (dynamic_cast(&(node->GetOperation()))) { // return MlirGenMem(node); - // } else if (dynamic_cast(&(node->operation()))) { + // } else if (dynamic_cast(&(node->GetOperati()))) { // return MlirGenMem(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenHlsLoad(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenHlsDLoad(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenHlsStore(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { // same as normal load for now, but with index instead of address return MlirGenHlsLoad(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { // same as normal store for now, but with index instead of address return MlirGenHlsStore(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenHlsLocalMem(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenHlsMemResp(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenHlsMemReq(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenPredicationBuffer(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenBuffer(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenBranch(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenTrigger(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenStateGate(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenPrint(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { return MlirGenAddrQueue(node); } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { // return merge_to_firrtl(n); - throw std::logic_error(node->operation().debug_string() + " not implemented!"); + throw std::logic_error(node->GetOperation().debug_string() + " not implemented!"); } - else if (auto o = dynamic_cast(&(node->operation()))) + else if (auto o = dynamic_cast(&(node->GetOperation()))) { if (o->discarding) { @@ -2615,7 +2616,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB { // Get RVSDG node of the source auto source = o->node(); - if (dynamic_cast(&(source->operation()))) + if (dynamic_cast(&source->GetOperation())) { // Connect directly to mem auto mem_out = dynamic_cast(source->input(0)->origin()); @@ -2644,7 +2645,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB } } - if (dynamic_cast(&(rvsdgNode->operation()))) + if (dynamic_cast(&(rvsdgNode->GetOperation()))) { // hook up request port auto requestNode = rvsdg::input::GetNode(**rvsdgNode->output(1)->begin()); @@ -2749,8 +2750,8 @@ RhlsToFirrtlConverter::createInstances( { if (auto sn = dynamic_cast(node)) { - if (dynamic_cast(&(node->operation())) - || dynamic_cast(&(node->operation()))) + if (dynamic_cast(&(node->GetOperation())) + || dynamic_cast(&(node->GetOperation()))) { // these are virtual - connections go to local_mem instead continue; @@ -2769,7 +2770,7 @@ RhlsToFirrtlConverter::createInstances( else { throw util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); + "Unimplemented op (unexpected structural node) : " + node->GetOperation().debug_string()); } } return instances; @@ -2786,7 +2787,7 @@ RhlsToFirrtlConverter::TraceStructuralOutput(rvsdg::StructuralOutput * output) if (!dynamic_cast(node)) { throw std::logic_error( - "Expected a hls::loop_node but found: " + node->operation().debug_string()); + "Expected a hls::loop_node but found: " + node->GetOperation().debug_string()); } JLM_ASSERT(output->results.size() == 1); auto origin = output->results.begin().ptr()->origin(); @@ -3634,7 +3635,7 @@ RhlsToFirrtlConverter::check_module(circt::firrtl::FModuleOp & module) else { user->print(::llvm::outs()); - llvm_unreachable("unexpected operation"); + llvm_unreachable("unexpected GetOperation"); } } } @@ -3966,7 +3967,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) append.append(std::to_string(JlmSize(&node->output(i)->type()))); append.append("W"); } - if (auto op = dynamic_cast(&node->operation())) + if (auto op = dynamic_cast(&node->GetOperation())) { const jlm::rvsdg::Type * pointeeType = &op->GetPointeeType(); for (size_t i = 1; i < node->ninputs(); i++) @@ -3989,7 +3990,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) append.append(std::to_string(bytes)); } } - if (auto op = dynamic_cast(&node->operation())) + if (auto op = dynamic_cast(&node->GetOperation())) { auto loadTypes = op->GetLoadTypes(); for (size_t i = 0; i < loadTypes->size(); i++) @@ -4012,7 +4013,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) append.append(std::to_string(bitWidth)); } } - if (auto op = dynamic_cast(&node->operation())) + if (auto op = dynamic_cast(&node->GetOperation())) { append.append("_S"); append.append(std::to_string( @@ -4024,7 +4025,7 @@ RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) size_t stores = (rvsdg::input::GetNode(**node->output(1)->begin())->ninputs() - 1 - loads) / 2; append.append(std::to_string(stores)); } - auto name = jlm::util::strfmt("op_", node->operation().debug_string() + append); + auto name = jlm::util::strfmt("op_", node->GetOperation().debug_string() + append); // Remove characters that are not valid in firrtl module names std::replace_if(name.begin(), name.end(), isForbiddenChar, '_'); return name; diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index f59936e10..e85a1b58e 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -47,7 +47,8 @@ BaseHLS::get_node_name(const jlm::rvsdg::node * node) append.append("_W"); append.append(std::to_string(JlmSize(&node->output(outPorts - 1)->type()))); } - auto name = util::strfmt("op_", node->operation().debug_string(), append, "_", node_map.size()); + auto name = + util::strfmt("op_", node->GetOperation().debug_string(), append, "_", node_map.size()); // remove chars that are not valid in firrtl module names std::replace_if(name.begin(), name.end(), isForbiddenChar, '_'); node_map[node] = name; @@ -152,7 +153,7 @@ BaseHLS::create_node_names(rvsdg::Region * r) else { throw util::error( - "Unimplemented op (unexpected structural node) : " + node.operation().debug_string()); + "Unimplemented op (unexpected structural node) : " + node.GetOperation().debug_string()); } } } diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index a0558480f..9d0cde44e 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -68,7 +68,7 @@ DotHLS::node_to_dot(const jlm::rvsdg::node * node) { auto SPACER = " \n"; auto name = get_node_name(node); - auto opname = node->operation().debug_string(); + auto opname = node->GetOperation().debug_string(); std::replace_if(opname.begin(), opname.end(), isForbiddenChar, '_'); std::string inputs; @@ -240,7 +240,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) else { throw jlm::util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); + "Unimplemented op (unexpected structural node) : " + node->GetOperation().debug_string()); } } @@ -248,8 +248,8 @@ DotHLS::loop_to_dot(hls::loop_node * ln) dot << "{rank=same "; for (auto node : jlm::rvsdg::topdown_traverser(sr)) { - auto mx = dynamic_cast(&node->operation()); - auto lc = dynamic_cast(&node->operation()); + auto mx = dynamic_cast(&node->GetOperation()); + auto lc = dynamic_cast(&node->GetOperation()); if ((mx && !mx->discarding && mx->loop) || lc) { dot << get_node_name(node) << " "; @@ -260,7 +260,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) dot << "{rank=same "; for (auto node : jlm::rvsdg::topdown_traverser(sr)) { - auto br = dynamic_cast(&node->operation()); + auto br = dynamic_cast(&node->GetOperation()); if (br && br->loop) { dot << get_node_name(node) << " "; @@ -274,7 +274,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) { if (dynamic_cast(node)) { - auto mx = dynamic_cast(&node->operation()); + auto mx = dynamic_cast(&node->GetOperation()); auto node_name = get_node_name(node); for (size_t i = 0; i < node->ninputs(); ++i) { @@ -286,7 +286,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) && (/*i==0||*/ i == 2); // back_outputs.count(node->input(i)->origin()); auto origin_out = dynamic_cast(node->input(i)->origin()); if (origin_out - && dynamic_cast(&origin_out->node()->operation())) + && dynamic_cast(&origin_out->node()->GetOperation())) { // back = true; @@ -322,7 +322,7 @@ DotHLS::prepare_loop_out_port(hls::loop_node * ln) else { throw jlm::util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); + "Unimplemented op (unexpected structural node) : " + node->GetOperation().debug_string()); } } for (size_t i = 0; i < sr->narguments(); ++i) @@ -414,7 +414,7 @@ DotHLS::subregion_to_dot(rvsdg::Region * sr) else { throw jlm::util::error( - "Unimplemented op (unexpected structural node) : " + node->operation().debug_string()); + "Unimplemented op (unexpected structural node) : " + node->GetOperation().debug_string()); } } // process results diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 0529a7de4..d2d643128 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -122,7 +122,7 @@ CanGammaNodeBeSpeculative(const rvsdg::GammaNode & gammaNode) } else if (rvsdg::is(&node)) { - throw util::error("Unexpected structural node: " + node.operation().debug_string()); + throw util::error("Unexpected structural node: " + node.GetOperation().debug_string()); } } } diff --git a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp index a139c5a40..b0ac75705 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp @@ -31,7 +31,7 @@ add_buffers(rvsdg::Region * region, bool pass_through) JLM_ASSERT(out->nusers() == 1); if (auto ni = dynamic_cast(*out->begin())) { - auto buf = dynamic_cast(&ni->node()->operation()); + auto buf = dynamic_cast(&ni->node()->GetOperation()); if (buf && (buf->pass_through || !pass_through)) { continue; @@ -60,7 +60,7 @@ add_buffers(rvsdg::Region * region, bool pass_through) JLM_ASSERT(out->nusers() == 1); if (auto ni = dynamic_cast(*out->begin())) { - auto buf = dynamic_cast(&ni->node()->operation()); + auto buf = dynamic_cast(&ni->node()->GetOperation()); if (buf && (buf->pass_through || !pass_through)) { continue; diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index d3723485b..a64715d43 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -113,7 +113,7 @@ convert_prints( convert_prints(structnode->subregion(n), printf, functionType); } } - else if (auto po = dynamic_cast(&(node->operation()))) + else if (auto po = dynamic_cast(&(node->GetOperation()))) { auto printf_local = route_to_region(printf, region); // TODO: prevent repetition? auto bc = jlm::rvsdg::create_bitconstant(region, 64, po->id()); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 05c03ccb8..41c757245 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -119,7 +119,7 @@ add_triggers(rvsdg::Region * region) } else { - throw jlm::util::error("Unexpected node type: " + node->operation().debug_string()); + throw jlm::util::error("Unexpected node type: " + node->GetOperation().debug_string()); } } else if (auto sn = dynamic_cast(node)) @@ -138,7 +138,7 @@ add_triggers(rvsdg::Region * region) } else { - throw jlm::util::error("Unexpected node type: " + node->operation().debug_string()); + throw jlm::util::error("Unexpected node type: " + node->GetOperation().debug_string()); } } } diff --git a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp index c3a9947d0..45d08eba9 100644 --- a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp @@ -51,16 +51,16 @@ class TraceAllocaUses if (auto si = dynamic_cast(user)) { auto simplenode = si->node(); - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { store_nodes.push_back(simplenode); } else if (dynamic_cast( - &simplenode->operation())) + &simplenode->GetOperation())) { load_nodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast(&simplenode->GetOperation())) { // TODO: verify this is the right type of function call throw jlm::util::error("encountered a call for an alloca"); @@ -107,7 +107,7 @@ gep_to_index(jlm::rvsdg::output * o) // TODO: handle geps that are not direct predecessors auto no = dynamic_cast(o); JLM_ASSERT(no); - auto gep = dynamic_cast(&no->node()->operation()); + auto gep = dynamic_cast(&no->node()->GetOperation()); JLM_ASSERT(gep); // pointer to array, i.e. first index is zero // TODO: check @@ -127,14 +127,14 @@ alloca_conv(rvsdg::Region * region) alloca_conv(structnode->subregion(n)); } } - else if (auto po = dynamic_cast(&(node->operation()))) + else if (auto po = dynamic_cast(&(node->GetOperation()))) { // ensure that the size is one JLM_ASSERT(node->ninputs() == 1); auto constant_output = dynamic_cast(node->input(0)->origin()); JLM_ASSERT(constant_output); - auto constant_operation = - dynamic_cast(&constant_output->node()->operation()); + auto constant_operation = dynamic_cast( + &constant_output->node()->GetOperation()); JLM_ASSERT(constant_operation); JLM_ASSERT(constant_operation->value().to_uint() == 1); // ensure that the alloca is an array type @@ -200,7 +200,7 @@ alloca_conv(rvsdg::Region * region) JLM_ASSERT(node->output(1)->nusers() == 1); auto merge_in = *node->output(1)->begin(); auto merge_node = rvsdg::input::GetNode(*merge_in); - if (dynamic_cast(&merge_node->operation())) + if (dynamic_cast(&merge_node->GetOperation())) { // merge after alloca -> remove merge JLM_ASSERT(merge_node->ninputs() == 2); diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 32104e27b..a832fd3fc 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -379,7 +379,7 @@ process_loopnode(loop_node * loopNode) } else if (auto simplenode = dynamic_cast(node)) { - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { // can currently only generate dae one loop deep // find load slice within loop - three slices - complete, data and state-edge @@ -398,14 +398,14 @@ process_loopnode(loop_node * loopNode) break; } else if ( - dynamic_cast(&sn->operation()) - || dynamic_cast(&sn->operation())) + dynamic_cast(&sn->GetOperation()) + || dynamic_cast(&sn->GetOperation())) { // data slice may not contain loads or stores - this includes node can_decouple = false; break; } - else if (dynamic_cast(&sn->operation())) + else if (dynamic_cast(&sn->GetOperation())) { // decoupled load has to be exclusive to load slice - e.g. not needed once load slice is // removed @@ -426,8 +426,8 @@ process_loopnode(loop_node * loopNode) break; } else if ( - dynamic_cast(&sn->operation()) - || dynamic_cast(&sn->operation())) + dynamic_cast(&sn->GetOperation()) + || dynamic_cast(&sn->GetOperation())) { // state slice may not contain loads or stores except for node if (sn != dynamic_cast(simplenode)) @@ -436,7 +436,7 @@ process_loopnode(loop_node * loopNode) break; } } - else if (dynamic_cast(&sn->operation())) + else if (dynamic_cast(&sn->GetOperation())) { // decoupled load has to be exclusive to load slice - e.g. not needed once load slice is // removed diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 2bcba83e7..4f8a729bc 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -96,19 +96,19 @@ hls::distribute_constants(rvsdg::Region * region) } else { - throw util::error("Unexpected node type: " + node->operation().debug_string()); + throw util::error("Unexpected node type: " + node->GetOperation().debug_string()); } } else if (auto sn = dynamic_cast(node)) { if (is_constant(node)) { - distribute_constant(sn->operation(), sn->output(0)); + distribute_constant(sn->GetOperation(), sn->output(0)); } } else { - throw util::error("Unexpected node type: " + node->operation().debug_string()); + throw util::error("Unexpected node type: " + node->GetOperation().debug_string()); } } } diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 5f219b7c1..153eec06d 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -172,7 +172,7 @@ instrument_ref( } else if ( auto loadOp = - dynamic_cast(&(node->operation()))) + dynamic_cast(&(node->GetOperation()))) { auto addr = node->input(0)->origin(); JLM_ASSERT(dynamic_cast(&addr->type())); @@ -193,14 +193,14 @@ instrument_ref( // Divert the memory state of the load to the new memstate from the call operation node->input(1)->divert_to(callOp[1]); } - else if (auto ao = dynamic_cast(&(node->operation()))) + else if (auto ao = dynamic_cast(&(node->GetOperation()))) { // ensure that the size is one JLM_ASSERT(node->ninputs() == 1); auto constant_output = dynamic_cast(node->input(0)->origin()); JLM_ASSERT(constant_output); - auto constant_operation = - dynamic_cast(&constant_output->node()->operation()); + auto constant_operation = dynamic_cast( + &constant_output->node()->GetOperation()); JLM_ASSERT(constant_operation); JLM_ASSERT(constant_operation->value().to_uint() == 1); jlm::rvsdg::output * addr = node->output(0); @@ -229,7 +229,8 @@ instrument_ref( } } else if ( - auto so = dynamic_cast(&(node->operation()))) + auto so = + dynamic_cast(&(node->GetOperation()))) { auto addr = node->input(0)->origin(); JLM_ASSERT(dynamic_cast(&addr->type())); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 29b875450..114c0d308 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -90,7 +90,7 @@ trace_channel(const jlm::rvsdg::output * dst) } else if (auto so = dynamic_cast(dst)) { - if (auto co = dynamic_cast(&so->node()->operation())) + if (auto co = dynamic_cast(&so->node()->GetOperation())) { return co; } @@ -192,7 +192,7 @@ trace_function_calls( if (auto si = dynamic_cast(user)) { auto simplenode = si->node(); - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { // TODO: verify this is the right type of function call calls.push_back(simplenode); @@ -267,7 +267,7 @@ replace_decouple( jlm::rvsdg::simple_node * decouple_request, jlm::rvsdg::output * resp) { - JLM_ASSERT(dynamic_cast(&decouple_request->operation())); + JLM_ASSERT(dynamic_cast(&decouple_request->GetOperation())); auto channel = decouple_request->input(1)->origin(); auto channel_constant = trace_channel(channel); @@ -343,15 +343,16 @@ gather_mem_nodes( { continue; } - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { storeNodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast( + &simplenode->GetOperation())) { loadNodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast(&simplenode->GetOperation())) { // TODO: verify this is the right type of function call decoupleNodes.push_back(simplenode); @@ -393,15 +394,16 @@ TracePointer( if (auto si = dynamic_cast(user)) { auto simplenode = si->node(); - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { storeNodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast( + &simplenode->GetOperation())) { loadNodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast(&simplenode->GetOperation())) { // TODO: verify this is the right type of function call decoupleNodes.push_back(simplenode); @@ -475,7 +477,7 @@ IsDecoupledFunctionPointer( if (auto simpleInput = dynamic_cast(user)) { auto simpleNode = simpleInput->node(); - if (dynamic_cast(&simpleNode->operation())) + if (dynamic_cast(&simpleNode->GetOperation())) { if (simpleNode->input(0)->origin() == output) { @@ -765,7 +767,7 @@ jlm::hls::ConnectRequestResponseMemPorts( auto loadOutput = dynamic_cast(smap.lookup(loadNode->output(0))); loadNodes.push_back(loadOutput->node()); auto loadOp = jlm::util::AssertedCast( - &loadOutput->node()->operation()); + &loadOutput->node()->GetOperation()); loadTypes.push_back(loadOp->GetLoadedType()); } std::vector storeNodes; @@ -800,19 +802,19 @@ jlm::hls::ConnectRequestResponseMemPorts( auto address = route_request(lambdaRegion, replacement->output(replacement->noutputs() - 1)); loadAddresses.push_back(address); std::shared_ptr type; - if (auto loadOperation = dynamic_cast(&replacement->operation())) + if (auto loadOperation = dynamic_cast(&replacement->GetOperation())) { type = loadOperation->GetLoadedType(); } else if ( auto loadOperation = - dynamic_cast(&replacement->operation())) + dynamic_cast(&replacement->GetOperation())) { type = loadOperation->GetLoadedType(); } else { - JLM_UNREACHABLE("Unknown load operation"); + JLM_UNREACHABLE("Unknown load GetOperation"); } JLM_ASSERT(type); loadTypes.push_back(type); @@ -829,8 +831,9 @@ jlm::hls::ConnectRequestResponseMemPorts( // TODO: routing is probably not necessary auto addr = route_request(lambdaRegion, replacement->output(1)); loadAddresses.push_back(addr); - loadTypes.push_back(dynamic_cast(&replacement->operation()) - ->GetLoadedType()); + loadTypes.push_back( + dynamic_cast(&replacement->GetOperation()) + ->GetLoadedType()); } std::vector storeOperands; for (size_t i = 0; i < storeNodes.size(); ++i) @@ -926,7 +929,7 @@ ReplaceDecouple( auto decoupleRequest = ((jlm::rvsdg::simple_output *)smap.lookup(originalDecoupleRequest->output(0)))->node(); - JLM_ASSERT(dynamic_cast(&decoupleRequest->operation())); + JLM_ASSERT(dynamic_cast(&decoupleRequest->GetOperation())); auto channel = decoupleRequest->input(1)->origin(); auto channelConstant = trace_channel(channel); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index f5e0c8922..7f2b49917 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -57,11 +57,12 @@ find_load_store( if (auto si = dynamic_cast(user)) { auto simplenode = si->node(); - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { store_nodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast( + &simplenode->GetOperation())) { load_nodes.push_back(simplenode); } @@ -102,7 +103,7 @@ find_loop_output(jlm::rvsdg::StructuralInput * sti) JLM_ASSERT(sti_arg->nusers() == 1); auto user = *sti_arg->begin(); auto si = dynamic_cast(user); - JLM_ASSERT(dynamic_cast(&si->node()->operation())); + JLM_ASSERT(dynamic_cast(&si->node()->GetOperation())); for (size_t i = 1; i < 3; ++i) { auto arg = si->node()->input(i)->origin(); @@ -112,11 +113,11 @@ find_loop_output(jlm::rvsdg::StructuralInput * sti) JLM_ASSERT(res); auto buffer_out = dynamic_cast(res->origin()); JLM_ASSERT(buffer_out); - JLM_ASSERT(dynamic_cast(&buffer_out->node()->operation())); + JLM_ASSERT(dynamic_cast(&buffer_out->node()->GetOperation())); auto branch_out = dynamic_cast(buffer_out->node()->input(0)->origin()); JLM_ASSERT(branch_out); - JLM_ASSERT(dynamic_cast(&branch_out->node()->operation())); + JLM_ASSERT(dynamic_cast(&branch_out->node()->GetOperation())); // branch for (size_t j = 0; j < 2; ++j) { @@ -138,7 +139,8 @@ get_parent_regions(jlm::rvsdg::Region * region) { std::deque regions; jlm::rvsdg::Region * target_region = region; - while (!dynamic_cast(&target_region->node()->operation())) + while ( + !dynamic_cast(&target_region->node()->GetOperation())) { regions.push_front(target_region); target_region = target_region->node()->region(); @@ -220,7 +222,7 @@ separate_load_edge( JLM_ASSERT(sti_arg->nusers() == 1); auto user = *sti_arg->begin(); auto si = dynamic_cast(user); - JLM_ASSERT(dynamic_cast(&si->node()->operation())); + JLM_ASSERT(dynamic_cast(&si->node()->GetOperation())); JLM_ASSERT(buffer->nusers() == 1); separate_load_edge( si->node()->output(0), @@ -235,7 +237,7 @@ separate_load_edge( else if (auto si = dynamic_cast(user)) { auto sn = si->node(); - auto op = &si->node()->operation(); + auto op = &si->node()->GetOperation(); if (auto br = dynamic_cast(op)) { @@ -272,7 +274,7 @@ separate_load_edge( JLM_ASSERT(mem_edge->nusers() == 1); auto mux_user = dynamic_cast(*mem_edge->begin()); JLM_ASSERT(mux_user); - auto mux_op = dynamic_cast(&mux_user->node()->operation()); + auto mux_op = dynamic_cast(&mux_user->node()->GetOperation()); JLM_ASSERT(mux_op); addr_edge = jlm::hls::mux_op::create( *mux_user->node()->input(0)->origin(), @@ -288,7 +290,7 @@ separate_load_edge( auto load_user_input = dynamic_cast(addr_edge_user); JLM_ASSERT(load_user_input); JLM_ASSERT( - dynamic_cast(&load_user_input->node()->operation())); + dynamic_cast(&load_user_input->node()->GetOperation())); return nullptr; } } @@ -316,10 +318,11 @@ separate_load_edge( user = *mem_edge->begin(); auto ui = dynamic_cast(user); if (ui - && dynamic_cast(&ui->node()->operation())) + && dynamic_cast( + &ui->node()->GetOperation())) { - auto msso = - dynamic_cast(&ui->node()->operation()); + auto msso = dynamic_cast( + &ui->node()->GetOperation()); // handle case where output of store is already connected to a MemStateSplit by adding an // output auto store_split = @@ -421,7 +424,7 @@ process_loops(jlm::rvsdg::output * state_edge) else if (auto si = dynamic_cast(user)) { auto sn = si->node(); - auto op = &si->node()->operation(); + auto op = &si->node()->GetOperation(); auto br = dynamic_cast(op); if (br && !br->loop) { @@ -535,7 +538,7 @@ jlm::hls::mem_queue(jlm::rvsdg::Region * region) JLM_ASSERT(entry_input); auto entry_node = entry_input->node(); JLM_ASSERT(dynamic_cast( - &entry_node->operation())); + &entry_node->GetOperation())); // for each state edge: // for each outer loop (theta/loop in lambda region): // split state edge before the loop diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 0ef624bc8..97785a40a 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -78,11 +78,11 @@ gather_mem_nodes(rvsdg::Region * region, std::vector } else if (auto simplenode = dynamic_cast(node)) { - if (dynamic_cast(&simplenode->operation())) + if (dynamic_cast(&simplenode->GetOperation())) { mem_nodes.push_back(simplenode); } - else if (dynamic_cast(&simplenode->operation())) + else if (dynamic_cast(&simplenode->GetOperation())) { mem_nodes.push_back(simplenode); } @@ -223,7 +223,7 @@ trace_edge( else if (auto si = dynamic_cast(user)) { auto sn = si->node(); - auto op = &si->node()->operation(); + auto op = &si->node()->GetOperation(); if (dynamic_cast(op)) { JLM_ASSERT(sn->noutputs() == 1); diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp index d879cfa04..d3d0a7991 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp @@ -36,8 +36,10 @@ memstate_conv(rvsdg::Region * region) } else if (auto simplenode = dynamic_cast(node)) { - if (dynamic_cast(&simplenode->operation()) - || dynamic_cast(&simplenode->operation())) + if (dynamic_cast( + &simplenode->GetOperation()) + || dynamic_cast( + &simplenode->GetOperation())) { auto new_outs = hls::fork_op::create(simplenode->noutputs(), *simplenode->input(0)->origin()); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index c0beebcbb..cf0faef79 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -37,7 +37,7 @@ eliminate_gamma_ctl(rvsdg::GammaNode * gamma) auto r = gamma->subregion(j)->result(i); if (auto so = dynamic_cast(r->origin())) { - if (auto ctl = dynamic_cast(&so->node()->operation())) + if (auto ctl = dynamic_cast(&so->node()->GetOperation())) { if (j == ctl->value().alternative()) { @@ -82,7 +82,7 @@ fix_match_inversion(rvsdg::GammaNode * old_gamma) auto r = old_gamma->subregion(j)->result(i); if (auto so = dynamic_cast(r->origin())) { - if (auto ctl = dynamic_cast(&so->node()->operation())) + if (auto ctl = dynamic_cast(&so->node()->GetOperation())) { if (j != ctl->value().alternative()) { @@ -104,7 +104,7 @@ fix_match_inversion(rvsdg::GammaNode * old_gamma) { return false; } - if (auto match = dynamic_cast(&no->node()->operation())) + if (auto match = dynamic_cast(&no->node()->GetOperation())) { if (match->nalternatives() == 2) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index 85f49215f..c7f69d296 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -16,15 +16,15 @@ eliminate_buf(jlm::rvsdg::output * o) if (auto so = dynamic_cast(o)) { auto node = so->node(); - if (dynamic_cast(&node->operation())) + if (dynamic_cast(&node->GetOperation())) { return eliminate_buf(node->input(1)->origin()); } - else if (dynamic_cast(&node->operation())) + else if (dynamic_cast(&node->GetOperation())) { return true; } - else if (dynamic_cast(&node->operation())) + else if (dynamic_cast(&node->GetOperation())) { return true; } @@ -46,7 +46,7 @@ remove_redundant_buf(rvsdg::Region * region) } else if (dynamic_cast(node)) { - if (auto buf = dynamic_cast(&node->operation())) + if (auto buf = dynamic_cast(&node->GetOperation())) { if (std::dynamic_pointer_cast(buf->argument(0))) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index 09dbf890b..6fbb4c493 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -46,7 +46,7 @@ remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) { if (auto simplenode = dynamic_cast(node)) { - if (dynamic_cast(&node->operation())) + if (dynamic_cast(&node->GetOperation())) { std::vector nv; for (size_t i = 0; i < simplenode->ninputs(); ++i) @@ -54,7 +54,7 @@ remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) if (auto so = dynamic_cast(simplenode->input(i)->origin())) { if (dynamic_cast( - &so->node()->operation())) + &so->node()->GetOperation())) { // skip things coming from entry continue; @@ -68,7 +68,7 @@ remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) auto entry_node = dynamic_cast(simplenode->input(0)->origin())->node(); JLM_ASSERT(dynamic_cast( - &entry_node->operation())); + &entry_node->GetOperation())); simplenode->output(0)->divert_users(entry_node->input(0)->origin()); remove(simplenode); remove(entry_node); @@ -80,7 +80,8 @@ remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) remove(simplenode); } } - else if (dynamic_cast(&node->operation())) + else if (dynamic_cast( + &node->GetOperation())) { std::vector nv; for (size_t i = 0; i < simplenode->noutputs(); ++i) diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp index 21d942d93..e2d4ad476 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp @@ -128,7 +128,7 @@ remove_unused_loop_inputs(loop_node * ln) bool dead_spec_gamma(jlm::rvsdg::node * dmux_node) { - auto mux_op = dynamic_cast(&dmux_node->operation()); + auto mux_op = dynamic_cast(&dmux_node->GetOperation()); JLM_ASSERT(mux_op); JLM_ASSERT(mux_op->discarding); // check if all inputs have the same origin @@ -154,7 +154,7 @@ dead_spec_gamma(jlm::rvsdg::node * dmux_node) bool dead_nonspec_gamma(jlm::rvsdg::node * ndmux_node) { - auto mux_op = dynamic_cast(&ndmux_node->operation()); + auto mux_op = dynamic_cast(&ndmux_node->GetOperation()); JLM_ASSERT(mux_op); JLM_ASSERT(!mux_op->discarding); // check if all inputs go to outputs of same branch @@ -164,7 +164,7 @@ dead_nonspec_gamma(jlm::rvsdg::node * ndmux_node) { if (auto no = dynamic_cast(ndmux_node->input(i)->origin())) { - if (dynamic_cast(&no->node()->operation()) && no->nusers() == 1) + if (dynamic_cast(&no->node()->GetOperation()) && no->nusers() == 1) { if (i == 1) { @@ -195,7 +195,7 @@ dead_nonspec_gamma(jlm::rvsdg::node * ndmux_node) bool dead_loop(jlm::rvsdg::node * ndmux_node) { - auto mux_op = dynamic_cast(&ndmux_node->operation()); + auto mux_op = dynamic_cast(&ndmux_node->GetOperation()); JLM_ASSERT(mux_op); JLM_ASSERT(!mux_op->discarding); // origin is a backedege argument @@ -210,7 +210,7 @@ dead_loop(jlm::rvsdg::node * ndmux_node) return false; } auto branch_in = dynamic_cast(*ndmux_node->output(0)->begin()); - if (!branch_in || !dynamic_cast(&branch_in->node()->operation())) + if (!branch_in || !dynamic_cast(&branch_in->node()->GetOperation())) { return false; } @@ -220,7 +220,7 @@ dead_loop(jlm::rvsdg::node * ndmux_node) return false; } auto buf_in = dynamic_cast(*branch_in->node()->output(1)->begin()); - if (!buf_in || !dynamic_cast(&buf_in->node()->operation())) + if (!buf_in || !dynamic_cast(&buf_in->node()->GetOperation())) { return false; } @@ -234,14 +234,14 @@ dead_loop(jlm::rvsdg::node * ndmux_node) auto branch_cond_origin = branch_in->node()->input(0)->origin(); auto pred_buf_out = dynamic_cast(ndmux_node->input(0)->origin()); if (!pred_buf_out - || !dynamic_cast(&pred_buf_out->node()->operation())) + || !dynamic_cast(&pred_buf_out->node()->GetOperation())) { return false; } auto pred_buf_cond_origin = pred_buf_out->node()->input(0)->origin(); // TODO: remove this once predicate buffers decouple combinatorial loops auto extra_buf_out = dynamic_cast(pred_buf_cond_origin); - if (!extra_buf_out || !dynamic_cast(&extra_buf_out->node()->operation())) + if (!extra_buf_out || !dynamic_cast(&extra_buf_out->node()->GetOperation())) { return false; } @@ -279,16 +279,16 @@ dne(rvsdg::Region * sr) { if (!node->has_users()) { - if (dynamic_cast(&node->operation())) + if (dynamic_cast(&node->GetOperation())) { // TODO: fix this once memory connections are explicit continue; } - else if (dynamic_cast(&node->operation())) + else if (dynamic_cast(&node->GetOperation())) { continue; } - else if (dynamic_cast(&node->operation())) + else if (dynamic_cast(&node->GetOperation())) { // TODO: fix - this scenario has only stores and should just be optimized away completely continue; @@ -304,7 +304,7 @@ dne(rvsdg::Region * sr) changed |= remove_loop_passthrough(ln); changed |= dne(ln->subregion()); } - else if (auto mux = dynamic_cast(&node->operation())) + else if (auto mux = dynamic_cast(&node->GetOperation())) { if (mux->discarding) { diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 532e52209..2f6dd010d 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -145,7 +145,7 @@ inline_calls(rvsdg::Region * region) inline_calls(structnode->subregion(n)); } } - else if (dynamic_cast(&(node->operation()))) + else if (dynamic_cast(&(node->GetOperation()))) { auto traced = jlm::hls::trace_call(node->input(0)); auto so = dynamic_cast(traced); @@ -187,7 +187,7 @@ convert_alloca(rvsdg::Region * region) convert_alloca(structnode->subregion(n)); } } - else if (auto po = dynamic_cast(&(node->operation()))) + else if (auto po = dynamic_cast(&(node->GetOperation()))) { auto rr = region->graph()->root(); auto delta_name = jlm::util::strfmt("hls_alloca_", alloca_cnt++); @@ -220,7 +220,7 @@ convert_alloca(rvsdg::Region * region) JLM_ASSERT(node->output(1)->nusers() == 1); auto mux_in = *node->output(1)->begin(); auto mux_node = rvsdg::input::GetNode(*mux_in); - if (dynamic_cast(&mux_node->operation())) + if (dynamic_cast(&mux_node->GetOperation())) { // merge after alloca -> remove merge JLM_ASSERT(mux_node->ninputs() == 2); @@ -379,7 +379,7 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) } else { - throw jlm::util::error("Unsupported node type: " + orig_node->operation().debug_string()); + throw util::error("Unsupported node type: " + orig_node->GetOperation().debug_string()); } } // copy function into rhls diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index c31472c86..91be7cef4 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -247,7 +247,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & } if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) - && n1->operation() == n2->operation() && n1->ninputs() == n2->ninputs() + && n1->GetOperation() == n2->GetOperation() && n1->ninputs() == n2->ninputs() && o1->index() == o2->index()) { for (size_t n = 0; n < n1->ninputs(); n++) @@ -292,7 +292,7 @@ mark(jlm::rvsdg::Region *, cnectx &); static void mark_gamma(const rvsdg::StructuralNode * node, cnectx & ctx) { - JLM_ASSERT(rvsdg::is(node->operation())); + JLM_ASSERT(rvsdg::is(node->GetOperation())); /* mark entry variables */ for (size_t i1 = 1; i1 < node->ninputs(); i1++) @@ -418,7 +418,7 @@ mark(const rvsdg::StructuralNode * node, cnectx & ctx) { typeid(llvm::phi::operation), mark_phi }, { typeid(llvm::delta::operation), mark_delta } }); - auto & op = node->operation(); + auto & op = node->GetOperation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); map[typeid(op)](node, ctx); } @@ -430,7 +430,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) { for (const auto & other : node->region()->TopNodes()) { - if (&other != node && node->operation() == other.operation()) + if (&other != node && node->GetOperation() == other.GetOperation()) { ctx.mark(node, &other); break; @@ -446,7 +446,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) { auto ni = dynamic_cast(user); auto other = ni ? ni->node() : nullptr; - if (!other || other == node || other->operation() != node->operation() + if (!other || other == node || other->GetOperation() != node->GetOperation() || other->ninputs() != node->ninputs()) continue; @@ -580,7 +580,7 @@ divert(rvsdg::StructuralNode * node, cnectx & ctx) { typeid(llvm::phi::operation), divert_phi }, { typeid(llvm::delta::operation), divert_delta } }); - auto & op = node->operation(); + auto & op = node->GetOperation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); map[typeid(op)](node, ctx); } diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 87fa97aed..5c1b7a6de 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -168,7 +168,7 @@ structural_node_to_dot(rvsdg::StructuralNode * structuralNode) dot << "subgraph cluster_sn" << hex((intptr_t)structuralNode) << " {\n"; dot << "color=\"#ff8080\"\n"; dot << "penwidth=6\n"; - dot << "label=\"" << structuralNode->operation().debug_string() << "\"\n"; + dot << "label=\"" << structuralNode->GetOperation().debug_string() << "\"\n"; dot << "labeljust=l\n"; // input nodes @@ -240,7 +240,7 @@ simple_node_to_dot(jlm::rvsdg::simple_node * simpleNode) { auto SPACER = " \n"; auto name = get_dot_name(simpleNode); - auto opname = simpleNode->operation().debug_string(); + auto opname = simpleNode->GetOperation().debug_string(); std::replace_if(opname.begin(), opname.end(), isForbiddenChar, '_'); std::ostringstream inputs; diff --git a/jlm/llvm/backend/dot/DotWriter.cpp b/jlm/llvm/backend/dot/DotWriter.cpp index 2993ea5dd..8576bcf4f 100644 --- a/jlm/llvm/backend/dot/DotWriter.cpp +++ b/jlm/llvm/backend/dot/DotWriter.cpp @@ -162,7 +162,7 @@ CreateGraphNodes(util::Graph & graph, rvsdg::Region & region, util::Graph * type for (const auto rvsdgNode : traverser) { auto & node = graph.CreateInOutNode(rvsdgNode->ninputs(), rvsdgNode->noutputs()); - node.SetLabel(rvsdgNode->operation().debug_string()); + node.SetLabel(rvsdgNode->GetOperation().debug_string()); node.SetProgramObject(*rvsdgNode); for (size_t i = 0; i < rvsdgNode->ninputs(); i++) diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index af052cc97..7532530a3 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -90,7 +90,7 @@ create_initialization(const delta::node * delta, context & ctx) operands.push_back(ctx.variable(node->input(n)->origin())); /* convert node to tac */ - auto & op = *static_cast(&node->operation()); + auto & op = *static_cast(&node->GetOperation()); tacs.push_back(tac::create(op, operands)); ctx.insert(output, tacs.back()->result(0)); } @@ -161,13 +161,13 @@ create_cfg(const lambda::node & lambda, context & ctx) static inline void convert_simple_node(const rvsdg::node & node, context & ctx) { - JLM_ASSERT(dynamic_cast(&node.operation())); + JLM_ASSERT(dynamic_cast(&node.GetOperation())); std::vector operands; for (size_t n = 0; n < node.ninputs(); n++) operands.push_back(ctx.variable(node.input(n)->origin())); - auto & op = *static_cast(&node.operation()); + auto & op = *static_cast(&node.GetOperation()); ctx.lpbb()->append_last(tac::create(op, operands)); for (size_t n = 0; n < node.noutputs(); n++) @@ -207,7 +207,7 @@ convert_empty_gamma_node(const rvsdg::GammaNode * gamma, context & ctx) auto matchnode = rvsdg::output::GetNode(*predicate); if (is(matchnode)) { - auto matchop = static_cast(&matchnode->operation()); + auto matchop = static_cast(&matchnode->GetOperation()); auto d = matchop->default_alternative(); auto c = ctx.variable(matchnode->input(0)->origin()); auto t = d == 0 ? ctx.variable(o1) : ctx.variable(o0); @@ -309,7 +309,7 @@ convert_gamma_node(const rvsdg::node & node, context & ctx) { /* use select instead of phi */ auto matchnode = rvsdg::output::GetNode(*predicate); - auto matchop = static_cast(&matchnode->operation()); + auto matchop = static_cast(&matchnode->GetOperation()); auto d = matchop->default_alternative(); auto c = ctx.variable(matchnode->input(0)->origin()); auto t = d == 0 ? arguments[1].first : arguments[0].first; @@ -526,13 +526,13 @@ convert_node(const rvsdg::node & node, context & ctx) { typeid(phi::operation), convert_phi_node }, { typeid(delta::operation), convert_delta_node } }); - if (dynamic_cast(&node.operation())) + if (dynamic_cast(&node.GetOperation())) { convert_simple_node(node, ctx); return; } - auto & op = node.operation(); + auto & op = node.GetOperation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); map[typeid(op)](node, ctx); } diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 3c0d8819e..7222fa1db 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -11,6 +11,12 @@ namespace jlm::llvm { +const LoadOperation & +LoadNode::GetOperation() const noexcept +{ + return *util::AssertedCast(&simple_node::GetOperation()); +} + LoadNonVolatileOperation::~LoadNonVolatileOperation() noexcept = default; bool @@ -44,7 +50,7 @@ LoadNonVolatileOperation::NumMemoryStates() const noexcept const LoadNonVolatileOperation & LoadNonVolatileNode::GetOperation() const noexcept { - return *util::AssertedCast(&operation()); + return *util::AssertedCast(&simple_node::GetOperation()); } [[nodiscard]] LoadNode::MemoryStateInputRange @@ -120,7 +126,7 @@ LoadVolatileOperation::NumMemoryStates() const noexcept [[nodiscard]] const LoadVolatileOperation & LoadVolatileNode::GetOperation() const noexcept { - return *util::AssertedCast(&operation()); + return *util::AssertedCast(&LoadNode::GetOperation()); } [[nodiscard]] LoadNode::MemoryStateInputRange @@ -577,8 +583,8 @@ load_normal_form::load_normal_form( bool load_normal_form::normalize_node(rvsdg::node * node) const { - JLM_ASSERT(is(node->operation())); - auto op = static_cast(&node->operation()); + JLM_ASSERT(is(node->GetOperation())); + auto op = static_cast(&node->GetOperation()); auto operands = rvsdg::operands(node); if (!get_mutable()) diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index 700d8b901..c1f8aaa54 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -303,8 +303,8 @@ class LoadNode : public rvsdg::simple_node using MemoryStateInputRange = util::iterator_range; using MemoryStateOutputRange = util::iterator_range; - [[nodiscard]] virtual const LoadOperation & - GetOperation() const noexcept = 0; + [[nodiscard]] const LoadOperation & + GetOperation() const noexcept override; [[nodiscard]] size_t NumMemoryStates() const noexcept diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 4831fc851..8c5c7e957 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -35,6 +35,12 @@ operation::copy() const node::~node() {} +[[nodiscard]] const phi::operation & +node::GetOperation() const noexcept +{ + return *static_cast(&StructuralNode::GetOperation()); +} + cvinput * node::input(size_t n) const noexcept { diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index 3d4df7d05..b079113c0 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -381,11 +381,8 @@ class node final : public rvsdg::StructuralNode return StructuralNode::subregion(0); } - const phi::operation & - operation() const noexcept - { - return *static_cast(&jlm::rvsdg::node::operation()); - } + [[nodiscard]] const phi::operation & + GetOperation() const noexcept override; cvargument * add_ctxvar(jlm::rvsdg::output * origin); diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 8bea5dc1e..291297717 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -10,6 +10,12 @@ namespace jlm::llvm { +const StoreOperation & +StoreNode::GetOperation() const noexcept +{ + return *util::AssertedCast(&simple_node::GetOperation()); +} + StoreNonVolatileOperation::~StoreNonVolatileOperation() noexcept = default; bool @@ -42,7 +48,7 @@ StoreNonVolatileOperation::NumMemoryStates() const noexcept [[nodiscard]] const StoreNonVolatileOperation & StoreNonVolatileNode::GetOperation() const noexcept { - return *util::AssertedCast(&operation()); + return *util::AssertedCast(&StoreNode::GetOperation()); } [[nodiscard]] StoreNode::MemoryStateInputRange @@ -118,7 +124,7 @@ StoreVolatileOperation::NumMemoryStates() const noexcept [[nodiscard]] const StoreVolatileOperation & StoreVolatileNode::GetOperation() const noexcept { - return *util::AssertedCast(&operation()); + return *util::AssertedCast(&StoreNode::GetOperation()); } [[nodiscard]] StoreNode::MemoryStateInputRange @@ -205,7 +211,7 @@ is_store_store_reducible( return false; } - auto other = static_cast(&storenode->operation()); + auto other = static_cast(&storenode->GetOperation()); JLM_ASSERT(op.GetAlignment() == other->GetAlignment()); return true; } @@ -217,7 +223,7 @@ is_store_alloca_reducible(const std::vector & operands) return false; auto alloca = jlm::rvsdg::output::GetNode(*operands[0]); - if (!alloca || !is(alloca->operation())) + if (!alloca || !is(alloca->GetOperation())) return false; std::unordered_set states( @@ -327,8 +333,8 @@ store_normal_form::store_normal_form( bool store_normal_form::normalize_node(jlm::rvsdg::node * node) const { - JLM_ASSERT(is(node->operation())); - auto op = static_cast(&node->operation()); + JLM_ASSERT(is(node->GetOperation())); + auto op = static_cast(&node->GetOperation()); auto operands = jlm::rvsdg::operands(node); if (!get_mutable()) diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index bf23eeeb2..b38b20625 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -260,8 +260,8 @@ class StoreNode : public rvsdg::simple_node using MemoryStateInputRange = util::iterator_range; using MemoryStateOutputRange = util::iterator_range; - [[nodiscard]] virtual const StoreOperation & - GetOperation() const noexcept = 0; + [[nodiscard]] const StoreOperation & + GetOperation() const noexcept override; [[nodiscard]] size_t NumMemoryStates() const noexcept diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index f63554db4..bbfbc454d 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -264,9 +264,9 @@ class CallNode final : public jlm::rvsdg::simple_node public: [[nodiscard]] const CallOperation & - GetOperation() const noexcept + GetOperation() const noexcept override { - return *jlm::util::AssertedCast(&operation()); + return *jlm::util::AssertedCast(&simple_node::GetOperation()); } /** diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index d97756659..5903ff7dc 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -42,6 +42,12 @@ operation::operator==(const jlm::rvsdg::operation & other) const noexcept node::~node() {} +const delta::operation & +node::GetOperation() const noexcept +{ + return *util::AssertedCast(&StructuralNode::GetOperation()); +} + delta::node * node::copy(rvsdg::Region * region, const std::vector & operands) const { diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 077f896fd..6505c6469 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -156,46 +156,43 @@ class node final : public rvsdg::StructuralNode return StructuralNode::subregion(0); } - const delta::operation & - operation() const noexcept - { - return *static_cast(&StructuralNode::operation()); - } + [[nodiscard]] const delta::operation & + GetOperation() const noexcept override; [[nodiscard]] const rvsdg::ValueType & type() const noexcept { - return operation().type(); + return GetOperation().type(); } [[nodiscard]] const std::shared_ptr & Type() const noexcept { - return operation().Type(); + return GetOperation().Type(); } const std::string & name() const noexcept { - return operation().name(); + return GetOperation().name(); } [[nodiscard]] const std::string & Section() const noexcept { - return operation().Section(); + return GetOperation().Section(); } const llvm::linkage & linkage() const noexcept { - return operation().linkage(); + return GetOperation().linkage(); } bool constant() const noexcept { - return operation().constant(); + return GetOperation().constant(); } size_t diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 072efd658..c208266ff 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -41,6 +41,12 @@ operation::copy() const node::~node() = default; +const lambda::operation & +node::GetOperation() const noexcept +{ + return *jlm::util::AssertedCast(&StructuralNode::GetOperation()); +} + node::fctargument_range node::fctarguments() { diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 1a043a322..987962302 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -186,39 +186,36 @@ class node final : public rvsdg::StructuralNode } [[nodiscard]] const lambda::operation & - operation() const noexcept - { - return *jlm::util::AssertedCast(&StructuralNode::operation()); - } + GetOperation() const noexcept override; [[nodiscard]] const jlm::llvm::FunctionType & type() const noexcept { - return operation().type(); + return GetOperation().type(); } [[nodiscard]] const std::shared_ptr & Type() const noexcept { - return operation().Type(); + return GetOperation().Type(); } [[nodiscard]] const std::string & name() const noexcept { - return operation().name(); + return GetOperation().name(); } [[nodiscard]] const jlm::llvm::linkage & linkage() const noexcept { - return operation().linkage(); + return GetOperation().linkage(); } [[nodiscard]] const jlm::llvm::attributeset & attributes() const noexcept { - return operation().attributes(); + return GetOperation().attributes(); } [[nodiscard]] size_t diff --git a/jlm/llvm/ir/operators/operators.cpp b/jlm/llvm/ir/operators/operators.cpp index 5c341b216..14166f5ea 100644 --- a/jlm/llvm/ir/operators/operators.cpp +++ b/jlm/llvm/ir/operators/operators.cpp @@ -437,7 +437,7 @@ zext_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * opera { if (path == rvsdg::unop_reduction_constant) { - auto c = static_cast(&producer(operand)->operation()); + auto c = static_cast(&producer(operand)->GetOperation()); return create_bitconstant( rvsdg::output::GetNode(*operand)->region(), c->value().zext(ndstbits() - nsrcbits())); diff --git a/jlm/llvm/ir/operators/sext.cpp b/jlm/llvm/ir/operators/sext.cpp index 9c31d9c4f..9e54460c6 100644 --- a/jlm/llvm/ir/operators/sext.cpp +++ b/jlm/llvm/ir/operators/sext.cpp @@ -33,7 +33,7 @@ is_inverse_reducible(const sext_op & op, const rvsdg::output * operand) if (!node) return false; - auto top = dynamic_cast(&node->operation()); + auto top = dynamic_cast(&node->GetOperation()); return top && top->nsrcbits() == op.ndstbits(); } @@ -43,7 +43,7 @@ perform_bitunary_reduction(const sext_op & op, rvsdg::output * operand) JLM_ASSERT(is_bitunary_reducible(operand)); auto unary = rvsdg::output::GetNode(*operand); auto region = operand->region(); - auto uop = static_cast(&unary->operation()); + auto uop = static_cast(&unary->GetOperation()); auto output = sext_op::create(op.ndstbits(), unary->input(0)->origin()); return rvsdg::simple_node::create_normalized(region, *uop->create(op.ndstbits()), { output })[0]; @@ -55,7 +55,7 @@ perform_bitbinary_reduction(const sext_op & op, rvsdg::output * operand) JLM_ASSERT(is_bitbinary_reducible(operand)); auto binary = rvsdg::output::GetNode(*operand); auto region = operand->region(); - auto bop = static_cast(&binary->operation()); + auto bop = static_cast(&binary->GetOperation()); JLM_ASSERT(binary->ninputs() == 2); auto op1 = sext_op::create(op.ndstbits(), binary->input(0)->origin()); @@ -119,7 +119,7 @@ sext_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * opera { if (path == rvsdg::unop_reduction_constant) { - auto c = static_cast(&producer(operand)->operation()); + auto c = static_cast(&producer(operand)->GetOperation()); return create_bitconstant(operand->region(), c->value().sext(ndstbits() - nsrcbits())); } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index f369805a6..e06c64b89 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -372,7 +372,7 @@ DeadNodeElimination::SweepStructuralNode(rvsdg::StructuralNode & node) const { typeid(phi::operation), sweepPhi }, { typeid(delta::operation), sweepDelta } }); - auto & op = node.operation(); + auto & op = node.GetOperation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); map[typeid(op)](*this, node); } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 784c05098..b708c0d42 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -610,7 +610,7 @@ class Andersen::Statistics final : public util::Statistics void Andersen::AnalyzeSimpleNode(const rvsdg::simple_node & node) { - const auto & op = node.operation(); + const auto & op = node.GetOperation(); if (is(op)) AnalyzeAlloca(node); @@ -663,7 +663,7 @@ Andersen::AnalyzeSimpleNode(const rvsdg::simple_node & node) void Andersen::AnalyzeAlloca(const rvsdg::simple_node & node) { - const auto allocaOp = util::AssertedCast(&node.operation()); + const auto allocaOp = util::AssertedCast(&node.GetOperation()); const auto & outputRegister = *node.output(0); const auto outputRegisterPO = Set_->CreateRegisterPointerObject(outputRegister); diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp index 078476ebb..bde84174f 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.cpp @@ -516,11 +516,11 @@ PointsToGraph::RegisterNode::ToString(const rvsdg::output & output) auto node = jlm::rvsdg::output::GetNode(*&output); if (node != nullptr) - return util::strfmt(node->operation().debug_string(), ":o", output.index()); + return util::strfmt(node->GetOperation().debug_string(), ":o", output.index()); node = output.region()->node(); if (node != nullptr) - return util::strfmt(node->operation().debug_string(), ":a", output.index()); + return util::strfmt(node->GetOperation().debug_string(), ":a", output.index()); if (auto graphImport = dynamic_cast(&output)) { @@ -554,7 +554,7 @@ PointsToGraph::AllocaNode::~AllocaNode() noexcept = default; std::string PointsToGraph::AllocaNode::DebugString() const { - return GetAllocaNode().operation().debug_string(); + return GetAllocaNode().GetOperation().debug_string(); } PointsToGraph::DeltaNode::~DeltaNode() noexcept = default; @@ -562,7 +562,7 @@ PointsToGraph::DeltaNode::~DeltaNode() noexcept = default; std::string PointsToGraph::DeltaNode::DebugString() const { - return GetDeltaNode().operation().debug_string(); + return GetDeltaNode().GetOperation().debug_string(); } PointsToGraph::LambdaNode::~LambdaNode() noexcept = default; @@ -570,7 +570,7 @@ PointsToGraph::LambdaNode::~LambdaNode() noexcept = default; std::string PointsToGraph::LambdaNode::DebugString() const { - return GetLambdaNode().operation().debug_string(); + return GetLambdaNode().GetOperation().debug_string(); } PointsToGraph::MallocNode::~MallocNode() noexcept = default; @@ -578,7 +578,7 @@ PointsToGraph::MallocNode::~MallocNode() noexcept = default; std::string PointsToGraph::MallocNode::DebugString() const { - return GetMallocNode().operation().debug_string(); + return GetMallocNode().GetOperation().debug_string(); } PointsToGraph::ImportNode::~ImportNode() noexcept = default; diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index 1ab3198ec..ce6eb5dbf 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -744,7 +744,7 @@ RegionAwareMemoryNodeProvider::AnnotateStore(const StoreNode & storeNode) void RegionAwareMemoryNodeProvider::AnnotateAlloca(const rvsdg::simple_node & allocaNode) { - JLM_ASSERT(is(allocaNode.operation())); + JLM_ASSERT(is(allocaNode.GetOperation())); auto & memoryNode = Provisioning_->GetPointsToGraph().GetAllocaNode(allocaNode); auto & regionSummary = Provisioning_->GetRegionSummary(*allocaNode.region()); @@ -754,7 +754,7 @@ RegionAwareMemoryNodeProvider::AnnotateAlloca(const rvsdg::simple_node & allocaN void RegionAwareMemoryNodeProvider::AnnotateMalloc(const rvsdg::simple_node & mallocNode) { - JLM_ASSERT(is(mallocNode.operation())); + JLM_ASSERT(is(mallocNode.GetOperation())); auto & memoryNode = Provisioning_->GetPointsToGraph().GetMallocNode(mallocNode); auto & regionSummary = Provisioning_->GetRegionSummary(*mallocNode.region()); @@ -764,7 +764,7 @@ RegionAwareMemoryNodeProvider::AnnotateMalloc(const rvsdg::simple_node & mallocN void RegionAwareMemoryNodeProvider::AnnotateFree(const rvsdg::simple_node & freeNode) { - JLM_ASSERT(is(freeNode.operation())); + JLM_ASSERT(is(freeNode.GetOperation())); auto memoryNodes = Provisioning_->GetOutputNodes(*freeNode.input(0)->origin()); auto & regionSummary = Provisioning_->GetRegionSummary(*freeNode.region()); @@ -818,7 +818,7 @@ RegionAwareMemoryNodeProvider::AnnotateCall(const CallNode & callNode) void RegionAwareMemoryNodeProvider::AnnotateMemcpy(const rvsdg::simple_node & memcpyNode) { - JLM_ASSERT(is(memcpyNode.operation())); + JLM_ASSERT(is(memcpyNode.GetOperation())); auto & regionSummary = Provisioning_->GetRegionSummary(*memcpyNode.region()); @@ -1048,7 +1048,7 @@ RegionAwareMemoryNodeProvider::ToRegionTree( { if (auto structuralNode = dynamic_cast(&node)) { - subtree += util::strfmt(indent(depth), structuralNode->operation().debug_string(), "\n"); + subtree += util::strfmt(indent(depth), structuralNode->GetOperation().debug_string(), "\n"); for (size_t n = 0; n < structuralNode->nsubregions(); n++) { subtree += toRegionTree(structuralNode->subregion(n), depth + 1); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 7d5ec54b5..26332e561 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -204,50 +204,50 @@ class RegisterLocation final : public Location if (jlm::rvsdg::is(node)) { - auto nodestr = node->operation().debug_string(); + auto nodestr = node->GetOperation().debug_string(); auto outputstr = Output_->type().debug_string(); return jlm::util::strfmt(nodestr, ":", index, "[" + outputstr + "]"); } if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":cv:", index); } if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg:", index); } if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":cv:", index); } if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg", index); } if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg", index); } if (is(Output_)) { - auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->operation().debug_string(); + auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); } if (is(Output_)) { - auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->operation().debug_string(); + auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); } @@ -258,18 +258,18 @@ class RegisterLocation final : public Location if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":rvarg", index); } if (is(Output_)) { - auto dbgstr = Output_->region()->node()->operation().debug_string(); + auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":cvarg", index); } return jlm::util::strfmt( - jlm::rvsdg::output::GetNode(*Output_)->operation().debug_string(), + rvsdg::output::GetNode(*Output_)->GetOperation().debug_string(), ":", index); } @@ -330,7 +330,7 @@ class AllocaLocation final : public MemoryLocation [[nodiscard]] std::string DebugString() const noexcept override { - return Node_.operation().debug_string(); + return Node_.GetOperation().debug_string(); } static std::unique_ptr @@ -368,7 +368,7 @@ class MallocLocation final : public MemoryLocation [[nodiscard]] std::string DebugString() const noexcept override { - return Node_.operation().debug_string(); + return Node_.GetOperation().debug_string(); } static std::unique_ptr @@ -404,7 +404,7 @@ class LambdaLocation final : public MemoryLocation [[nodiscard]] std::string DebugString() const noexcept override { - return Lambda_.operation().debug_string(); + return Lambda_.GetOperation().debug_string(); } static std::unique_ptr @@ -441,7 +441,7 @@ class DeltaLocation final : public MemoryLocation [[nodiscard]] std::string DebugString() const noexcept override { - return Delta_.operation().debug_string(); + return Delta_.GetOperation().debug_string(); } static std::unique_ptr @@ -1158,7 +1158,7 @@ Steensgaard::AnalyzeCall(const CallNode & callNode) void Steensgaard::AnalyzeDirectCall(const CallNode & callNode, const lambda::node & lambdaNode) { - auto & lambdaFunctionType = lambdaNode.operation().type(); + auto & lambdaFunctionType = lambdaNode.GetOperation().type(); auto & callFunctionType = *callNode.GetOperation().GetFunctionType(); if (callFunctionType != lambdaFunctionType) { diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index f4e3ed9d4..5d8bb071f 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -231,7 +231,7 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & } if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) - && n1->operation() == n2->operation() && n1->ninputs() == n2->ninputs() + && n1->GetOperation() == n2->GetOperation() && n1->ninputs() == n2->ninputs() && o1->index() == o2->index()) { for (size_t n = 0; n < n1->ninputs(); n++) @@ -276,7 +276,7 @@ mark(rvsdg::Region *, cnectx &); static void mark_gamma(const rvsdg::StructuralNode * node, cnectx & ctx) { - JLM_ASSERT(rvsdg::is(node->operation())); + JLM_ASSERT(rvsdg::is(node->GetOperation())); /* mark entry variables */ for (size_t i1 = 1; i1 < node->ninputs(); i1++) @@ -379,7 +379,7 @@ mark(const rvsdg::StructuralNode * node, cnectx & ctx) { typeid(phi::operation), mark_phi }, { typeid(delta::operation), mark_delta } }); - auto & op = node->operation(); + auto & op = node->GetOperation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); map[typeid(op)](node, ctx); } @@ -391,7 +391,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) { for (const auto & other : node->region()->TopNodes()) { - if (&other != node && node->operation() == other.operation()) + if (&other != node && node->GetOperation() == other.GetOperation()) { ctx.mark(node, &other); break; @@ -407,7 +407,7 @@ mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) { auto ni = dynamic_cast(user); auto other = ni ? ni->node() : nullptr; - if (!other || other == node || other->operation() != node->operation() + if (!other || other == node || other->GetOperation() != node->GetOperation() || other->ninputs() != node->ninputs()) continue; @@ -532,7 +532,7 @@ divert(rvsdg::StructuralNode * node, cnectx & ctx) { typeid(phi::operation), divert_phi }, { typeid(delta::operation), divert_delta } }); - auto & op = node->operation(); + auto & op = node->GetOperation(); JLM_ASSERT(map.find(typeid(op)) != map.end()); map[typeid(op)](node, ctx); } diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index d39a2b4db..02751a2fc 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -329,7 +329,7 @@ pushout_store(jlm::rvsdg::node * storenode) JLM_ASSERT(is(storenode->region()->node())); JLM_ASSERT(jlm::rvsdg::is(storenode) && is_movable_store(storenode)); auto theta = static_cast(storenode->region()->node()); - auto storeop = static_cast(&storenode->operation()); + auto storeop = static_cast(&storenode->GetOperation()); auto oaddress = static_cast(storenode->input(0)->origin()); auto ovalue = storenode->input(1)->origin(); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 692aa6cc9..88ab3cd67 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -117,7 +117,7 @@ unrollinfo::niterations() const noexcept auto step = is_additive() ? *step_value() : step_value()->neg(); auto end = is_additive() ? *end_value() : *init_value(); - if (is_eqcmp(cmpnode()->operation())) + if (is_eqcmp(cmpnode()->GetOperation())) end = end.add({ nbits(), 1 }); auto range = end.sub(start); diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index 1417890a7..8c780e509 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -118,7 +118,7 @@ class unrollinfo final [[nodiscard]] const rvsdg::SimpleOperation & cmpoperation() const noexcept { - return *static_cast(&cmpnode()->operation()); + return *static_cast(&cmpnode()->GetOperation()); } inline jlm::rvsdg::node * @@ -130,7 +130,7 @@ class unrollinfo final [[nodiscard]] const rvsdg::SimpleOperation & armoperation() const noexcept { - return *static_cast(&armnode()->operation()); + return *static_cast(&armnode()->GetOperation()); } inline rvsdg::RegionArgument * @@ -190,8 +190,8 @@ class unrollinfo final inline size_t nbits() const noexcept { - JLM_ASSERT(dynamic_cast(&cmpnode()->operation())); - return static_cast(&cmpnode()->operation())->type().nbits(); + JLM_ASSERT(dynamic_cast(&cmpnode()->GetOperation())); + return static_cast(&cmpnode()->GetOperation())->type().nbits(); } inline jlm::rvsdg::bitvalue_repr @@ -211,7 +211,7 @@ class unrollinfo final if (!p) return false; - auto op = dynamic_cast(&p->operation()); + auto op = dynamic_cast(&p->GetOperation()); return op && op->value().is_known(); } @@ -222,7 +222,7 @@ class unrollinfo final return nullptr; auto p = producer(output); - return &static_cast(&p->operation())->value(); + return &static_cast(&p->GetOperation())->value(); } rvsdg::RegionArgument * end_; diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 5bb8a3182..1b46c697f 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -137,7 +137,7 @@ JlmToMlirConverter::GetConvertedInputs( ": ", node.input(i)->origin()->type().debug_string(), " for node: ", - node.operation().debug_string(), + node.GetOperation().debug_string(), " at index: ", i); JLM_UNREACHABLE(message.c_str()); @@ -170,7 +170,8 @@ JlmToMlirConverter::ConvertNode( } else { - auto message = util::strfmt("Unimplemented structural node: ", node.operation().debug_string()); + auto message = + util::strfmt("Unimplemented structural node: ", node.GetOperation().debug_string()); JLM_UNREACHABLE(message.c_str()); } } @@ -306,7 +307,8 @@ JlmToMlirConverter::ConvertSimpleNode( const ::llvm::SmallVector<::mlir::Value> & inputs) { ::mlir::Operation * MlirOp; - if (auto bitOp = dynamic_cast(&(node.operation()))) + auto & operation = node.GetOperation(); + if (auto bitOp = dynamic_cast(&operation)) { auto value = bitOp->value(); MlirOp = Builder_->create<::mlir::arith::ConstantIntOp>( @@ -314,15 +316,15 @@ JlmToMlirConverter::ConvertSimpleNode( value.to_uint(), value.nbits()); } - else if (jlm::rvsdg::is(node.operation())) + else if (jlm::rvsdg::is(operation)) { - MlirOp = ConvertBitBinaryNode(node.operation(), inputs); + MlirOp = ConvertBitBinaryNode(operation, inputs); } - else if (jlm::rvsdg::is(node.operation())) + else if (jlm::rvsdg::is(operation)) { - MlirOp = BitCompareNode(node.operation(), inputs); + MlirOp = BitCompareNode(operation, inputs); } - else if (auto bitOp = dynamic_cast(&(node.operation()))) + else if (auto bitOp = dynamic_cast(&operation)) { MlirOp = Builder_->create<::mlir::arith::ExtUIOp>( Builder_->getUnknownLoc(), @@ -330,20 +332,20 @@ JlmToMlirConverter::ConvertSimpleNode( inputs[0]); } // ** region structural nodes ** - else if (auto ctlOp = dynamic_cast(&node.operation())) + else if (auto ctlOp = dynamic_cast(&operation)) { MlirOp = Builder_->create<::mlir::rvsdg::ConstantCtrl>( Builder_->getUnknownLoc(), ConvertType(node.output(0)->type()), // Control, ouput type ctlOp->value().alternative()); } - else if (auto undefOp = dynamic_cast(&node.operation())) + else if (auto undefOp = dynamic_cast(&operation)) { MlirOp = Builder_->create<::mlir::jlm::Undef>( Builder_->getUnknownLoc(), ConvertType(undefOp->GetType())); } - else if (auto matchOp = dynamic_cast(&(node.operation()))) + else if (auto matchOp = dynamic_cast(&operation)) { // ** region Create the MLIR mapping vector ** //! MLIR match operation can match multiple values to one index @@ -375,7 +377,7 @@ JlmToMlirConverter::ConvertSimpleNode( // ** endregion structural nodes ** else { - auto message = util::strfmt("Unimplemented simple node: ", node.operation().debug_string()); + auto message = util::strfmt("Unimplemented simple node: ", operation.debug_string()); JLM_UNREACHABLE(message.c_str()); } @@ -441,7 +443,7 @@ JlmToMlirConverter::ConvertGamma( ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs) { - auto & gammaOp = *util::AssertedCast(&gammaNode.operation()); + auto & gammaOp = *util::AssertedCast(&gammaNode.GetOperation()); ::llvm::SmallVector<::mlir::Type> typeRangeOuput; for (size_t i = 0; i < gammaNode.noutputs(); ++i) diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 09a3d6ca2..1072a2916 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -79,7 +79,7 @@ binary_normal_form::binary_normal_form( bool binary_normal_form::normalize_node(jlm::rvsdg::node * node) const { - const jlm::rvsdg::operation & base_op = node->operation(); + const operation & base_op = node->GetOperation(); const auto & op = *static_cast(&base_op); return normalize_node(node, op); @@ -107,8 +107,8 @@ binary_normal_form::normalize_node(jlm::rvsdg::node * node, const binary_op & op return false; auto node = static_cast(arg)->node(); - auto fb_op = dynamic_cast(&node->operation()); - return node->operation() == op || (fb_op && fb_op->bin_operation() == op); + auto fb_op = dynamic_cast(&node->GetOperation()); + return node->GetOperation() == op || (fb_op && fb_op->bin_operation() == op); }); } else @@ -172,8 +172,8 @@ binary_normal_form::normalized_create( return false; auto node = static_cast(arg)->node(); - auto fb_op = dynamic_cast(&node->operation()); - return node->operation() == op || (fb_op && fb_op->bin_operation() == op); + auto fb_op = dynamic_cast(&node->GetOperation()); + return node->GetOperation() == op || (fb_op && fb_op->bin_operation() == op); }); } @@ -288,7 +288,7 @@ flattened_binary_normal_form::flattened_binary_normal_form( bool flattened_binary_normal_form::normalize_node(jlm::rvsdg::node * node) const { - const auto & op = static_cast(node->operation()); + const auto & op = static_cast(node->GetOperation()); const auto & bin_op = op.bin_operation(); auto nf = graph()->node_normal_form(typeid(bin_op)); @@ -424,7 +424,7 @@ flattened_binary_op::reduce( { if (is(node)) { - auto op = static_cast(&node->operation()); + auto op = static_cast(&node->GetOperation()); auto output = op->reduce(reduction, operands(node)); node->output(0)->divert_users(output); remove(node); diff --git a/jlm/rvsdg/bitstring/bitoperation-classes.cpp b/jlm/rvsdg/bitstring/bitoperation-classes.cpp index c6af03362..d5f2f266f 100644 --- a/jlm/rvsdg/bitstring/bitoperation-classes.cpp +++ b/jlm/rvsdg/bitstring/bitoperation-classes.cpp @@ -30,7 +30,7 @@ bitunary_op::reduce_operand(unop_reduction_path_t path, jlm::rvsdg::output * arg if (path == unop_reduction_constant) { auto p = producer(arg); - auto & c = static_cast(p->operation()); + auto & c = static_cast(p->GetOperation()); return create_bitconstant(p->region(), reduce_constant(c.value())); } @@ -61,8 +61,8 @@ bitbinary_op::reduce_operand_pair( { if (path == binop_reduction_constants) { - auto & c1 = static_cast(producer(arg1)->operation()); - auto & c2 = static_cast(producer(arg2)->operation()); + auto & c1 = static_cast(producer(arg1)->GetOperation()); + auto & c2 = static_cast(producer(arg2)->GetOperation()); return create_bitconstant(arg1->region(), reduce_constants(c1.value(), c2.value())); } @@ -82,12 +82,12 @@ bitcompare_op::can_reduce_operand_pair( auto p = producer(arg1); const bitconstant_op * c1_op = nullptr; if (p) - c1_op = dynamic_cast(&p->operation()); + c1_op = dynamic_cast(&p->GetOperation()); p = producer(arg2); const bitconstant_op * c2_op = nullptr; if (p) - c2_op = dynamic_cast(&p->operation()); + c2_op = dynamic_cast(&p->GetOperation()); bitvalue_repr arg1_repr = c1_op ? c1_op->value() : bitvalue_repr::repeat(type().nbits(), 'D'); bitvalue_repr arg2_repr = c2_op ? c2_op->value() : bitvalue_repr::repeat(type().nbits(), 'D'); diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 001cf6ff0..38f018993 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -40,8 +40,8 @@ concat_reduce_arg_pair(jlm::rvsdg::output * arg1, jlm::rvsdg::output * arg2) if (!node1 || !node2) return nullptr; - auto arg1_constant = dynamic_cast(&node1->operation()); - auto arg2_constant = dynamic_cast(&node2->operation()); + auto arg1_constant = dynamic_cast(&node1->GetOperation()); + auto arg2_constant = dynamic_cast(&node2->GetOperation()); if (arg1_constant && arg2_constant) { size_t nbits = arg1_constant->value().nbits() + arg2_constant->value().nbits(); @@ -56,8 +56,8 @@ concat_reduce_arg_pair(jlm::rvsdg::output * arg1, jlm::rvsdg::output * arg2) return create_bitconstant(node1->region(), s.c_str()); } - auto arg1_slice = dynamic_cast(&node1->operation()); - auto arg2_slice = dynamic_cast(&node2->operation()); + auto arg1_slice = dynamic_cast(&node1->GetOperation()); + auto arg2_slice = dynamic_cast(&node2->GetOperation()); if (arg1_slice && arg2_slice && arg1_slice->high() == arg2_slice->low() && node1->input(0)->origin() == node2->input(0)->origin()) { @@ -296,8 +296,8 @@ bitconcat_op::can_reduce_operand_pair( return binop_reduction_constants; } - auto arg1_slice = dynamic_cast(&node1->operation()); - auto arg2_slice = dynamic_cast(&node2->operation()); + auto arg1_slice = dynamic_cast(&node1->GetOperation()); + auto arg2_slice = dynamic_cast(&node2->GetOperation()); if (arg1_slice && arg2_slice) { @@ -326,8 +326,8 @@ bitconcat_op::reduce_operand_pair( if (path == binop_reduction_constants) { - auto & arg1_constant = static_cast(node1->operation()); - auto & arg2_constant = static_cast(node2->operation()); + auto & arg1_constant = static_cast(node1->GetOperation()); + auto & arg2_constant = static_cast(node2->GetOperation()); size_t nbits = arg1_constant.value().nbits() + arg2_constant.value().nbits(); std::vector bits(nbits); @@ -342,8 +342,8 @@ bitconcat_op::reduce_operand_pair( if (path == binop_reduction_merge) { - auto arg1_slice = static_cast(&node1->operation()); - auto arg2_slice = static_cast(&node2->operation()); + auto arg1_slice = static_cast(&node1->GetOperation()); + auto arg2_slice = static_cast(&node2->GetOperation()); return jlm::rvsdg::bitslice(node1->input(0)->origin(), arg1_slice->low(), arg2_slice->high()); /* FIXME: support sign bit */ diff --git a/jlm/rvsdg/bitstring/slice.cpp b/jlm/rvsdg/bitstring/slice.cpp index 32e7f9529..651cebc46 100644 --- a/jlm/rvsdg/bitstring/slice.cpp +++ b/jlm/rvsdg/bitstring/slice.cpp @@ -61,13 +61,13 @@ bitslice_op::reduce_operand(unop_reduction_path_t path, jlm::rvsdg::output * arg if (path == unop_reduction_narrow) { - auto op = static_cast(node->operation()); + auto op = static_cast(node->GetOperation()); return jlm::rvsdg::bitslice(node->input(0)->origin(), low() + op.low(), high() + op.low()); } if (path == unop_reduction_constant) { - auto op = static_cast(node->operation()); + auto op = static_cast(node->GetOperation()); std::string s(&op.value()[0] + low(), high() - low()); return create_bitconstant(arg->region(), s.c_str()); } diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index 94fe1c720..2c8311150 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -119,7 +119,7 @@ match_op::reduce_operand(unop_reduction_path_t path, jlm::rvsdg::output * arg) c { if (path == unop_reduction_constant) { - auto op = static_cast(producer(arg)->operation()); + auto op = static_cast(producer(arg)->GetOperation()); return jlm::rvsdg::control_constant( arg->region(), nalternatives(), diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index e899a1a8b..7e3946d95 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -17,7 +17,7 @@ static bool is_predicate_reducible(const GammaNode * gamma) { auto constant = output::GetNode(*gamma->predicate()->origin()); - return constant && is_ctlconstant_op(constant->operation()); + return constant && is_ctlconstant_op(constant->GetOperation()); } static void @@ -25,7 +25,7 @@ perform_predicate_reduction(GammaNode * gamma) { auto origin = gamma->predicate()->origin(); auto constant = static_cast(origin)->node(); - auto cop = static_cast(&constant->operation()); + auto cop = static_cast(&constant->GetOperation()); auto alternative = cop->value().alternative(); rvsdg::SubstitutionMap smap; @@ -78,7 +78,7 @@ is_control_constant_reducible(GammaNode * gamma) return {}; /* check number of alternatives */ - auto match_op = static_cast(&match->operation()); + auto match_op = static_cast(&match->GetOperation()); std::unordered_set set({ match_op->default_alternative() }); for (const auto & pair : *match_op) set.insert(pair.second); @@ -100,7 +100,7 @@ is_control_constant_reducible(GammaNode * gamma) if (!is(node)) break; - auto op = static_cast(&node->operation()); + auto op = static_cast(&node->GetOperation()); if (op->value().nalternatives() != 2) break; } @@ -117,7 +117,7 @@ perform_control_constant_reduction(std::unordered_set & outp auto gamma = static_cast((*outputs.begin())->node()); auto origin = static_cast(gamma->predicate()->origin()); auto match = origin->node(); - auto & match_op = to_match_op(match->operation()); + auto & match_op = to_match_op(match->GetOperation()); std::unordered_map map; for (const auto & pair : match_op) @@ -134,7 +134,7 @@ perform_control_constant_reduction(std::unordered_set & outp for (size_t n = 0; n < xv->nresults(); n++) { auto origin = static_cast(xv->result(n)->origin()); - auto & value = to_ctlconstant_op(origin->node()->operation()).value(); + auto & value = to_ctlconstant_op(origin->node()->GetOperation()).value(); nalternatives = value.nalternatives(); if (map.find(n) != map.end()) new_mapping[map[n]] = value.alternative(); diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 1d0aef836..0efc8a9bf 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -356,7 +356,7 @@ producer(const jlm::rvsdg::output * output) noexcept bool normalize(jlm::rvsdg::node * node) { - const auto & op = node->operation(); + const auto & op = node->GetOperation(); auto nf = node->graph()->node_normal_form(typeid(op)); return nf->normalize_node(node); } diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 85dfd7dca..0950d6e56 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -635,8 +635,8 @@ class node node(std::unique_ptr op, rvsdg::Region * region); - inline const jlm::rvsdg::operation & - operation() const noexcept + [[nodiscard]] virtual const operation & + GetOperation() const noexcept { return *operation_; } @@ -1083,7 +1083,7 @@ is(const jlm::rvsdg::node * node) noexcept if (!node) return false; - return is(node->operation()); + return is(node->GetOperation()); } jlm::rvsdg::node * diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index f451b79ac..2a79aaeaa 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -367,7 +367,7 @@ Region::normalize(bool recursive) structnode->subregion(n)->normalize(recursive); } - const auto & op = node->operation(); + const auto & op = node->GetOperation(); graph()->node_normal_form(typeid(op))->normalize_node(node); } } @@ -440,7 +440,7 @@ Region::ToTree( { if (auto structuralNode = dynamic_cast(&node)) { - auto nodeString = structuralNode->operation().debug_string(); + auto nodeString = structuralNode->GetOperation().debug_string(); auto annotationString = GetAnnotationString( structuralNode, annotationMap, diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index dcecbb773..5e53ed561 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -51,29 +51,36 @@ simple_node::simple_node( const std::vector & operands) : node(op.copy(), region) { - if (operation().narguments() != operands.size()) + if (simple_node::GetOperation().narguments() != operands.size()) throw jlm::util::error(jlm::util::strfmt( "Argument error - expected ", - operation().narguments(), + simple_node::GetOperation().narguments(), ", received ", operands.size(), " arguments.")); - for (size_t n = 0; n < operation().narguments(); n++) + for (size_t n = 0; n < simple_node::GetOperation().narguments(); n++) { - node::add_input(std::make_unique(this, operands[n], operation().argument(n))); + add_input( + std::make_unique(this, operands[n], simple_node::GetOperation().argument(n))); } - for (size_t n = 0; n < operation().nresults(); n++) - node::add_output(std::make_unique(this, operation().result(n))); + for (size_t n = 0; n < simple_node::GetOperation().nresults(); n++) + add_output(std::make_unique(this, simple_node::GetOperation().result(n))); on_node_create(this); } +const SimpleOperation & +simple_node::GetOperation() const noexcept +{ + return *util::AssertedCast(&node::GetOperation()); +} + jlm::rvsdg::node * simple_node::copy(rvsdg::Region * region, const std::vector & operands) const { - auto node = create(region, operation(), operands); + auto node = create(region, GetOperation(), operands); graph()->mark_denormalized(); return node; } diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index dac954122..c6da042dd 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -38,8 +38,8 @@ class simple_node : public node jlm::rvsdg::simple_output * output(size_t index) const noexcept; - const SimpleOperation & - operation() const noexcept; + [[nodiscard]] const SimpleOperation & + GetOperation() const noexcept override; virtual jlm::rvsdg::node * copy(rvsdg::Region * region, const std::vector & operands) const override; @@ -122,12 +122,6 @@ simple_node::output(size_t index) const noexcept return static_cast(node::output(index)); } -inline const SimpleOperation & -simple_node::operation() const noexcept -{ - return *static_cast(&node::operation()); -} - } #endif diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 20a062af3..2990d8d1b 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -14,7 +14,7 @@ node_cse( { auto cse_test = [&](const jlm::rvsdg::node * node) { - return node->operation() == op && arguments == jlm::rvsdg::operands(node); + return node->GetOperation() == op && arguments == operands(node); }; if (!arguments.empty()) @@ -66,7 +66,7 @@ simple_normal_form::normalize_node(jlm::rvsdg::node * node) const if (get_cse()) { - auto new_node = node_cse(node->region(), node->operation(), operands(node)); + auto new_node = node_cse(node->region(), node->GetOperation(), operands(node)); JLM_ASSERT(new_node); if (new_node != node) { diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 2509e8b50..96706d09e 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -45,7 +45,7 @@ is_mux_mux_reducible(const std::vector & ops) for (const auto & operand : operands) { auto node = output::GetNode(*operand); - if (!node || !is_mux_op(node->operation())) + if (!node || !is_mux_op(node->GetOperation())) continue; size_t n; @@ -84,7 +84,7 @@ perform_mux_mux_reduction( const jlm::rvsdg::node * muxnode, const std::vector & old_operands) { - JLM_ASSERT(is_mux_op(muxnode->operation())); + JLM_ASSERT(is_mux_op(muxnode->GetOperation())); bool reduced = false; std::vector new_operands; @@ -123,8 +123,8 @@ mux_normal_form::mux_normal_form( bool mux_normal_form::normalize_node(jlm::rvsdg::node * node) const { - JLM_ASSERT(dynamic_cast(&node->operation())); - auto op = static_cast(&node->operation()); + JLM_ASSERT(dynamic_cast(&node->GetOperation())); + auto op = static_cast(&node->GetOperation()); if (!get_mutable()) return true; diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 83366af52..5f21495e7 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -36,7 +36,7 @@ unary_normal_form::normalize_node(jlm::rvsdg::node * node) const return true; } - const auto & op = static_cast(node->operation()); + const auto & op = static_cast(node->GetOperation()); if (get_reducible()) { diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 3a129e8e8..be3e9a8ab 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -47,7 +47,7 @@ node_to_string( s = s + name + " "; } - s += ":= " + node->operation().debug_string() + " "; + s += ":= " + node->GetOperation().debug_string() + " "; for (size_t n = 0; n < node->ninputs(); n++) { @@ -262,10 +262,10 @@ edge_tag(const std::string & srcid, const std::string & dstid) static inline std::string type(const jlm::rvsdg::node * n) { - if (dynamic_cast(&n->operation())) + if (dynamic_cast(&n->GetOperation())) return "gamma"; - if (dynamic_cast(&n->operation())) + if (dynamic_cast(&n->GetOperation())) return "theta"; return ""; @@ -279,7 +279,7 @@ convert_simple_node(const jlm::rvsdg::simple_node * node) { std::string s; - s += node_starttag(id(node), node->operation().debug_string(), ""); + s += node_starttag(id(node), node->GetOperation().debug_string(), ""); for (size_t n = 0; n < node->ninputs(); n++) s += input_tag(id(node->input(n))); for (size_t n = 0; n < node->noutputs(); n++) diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index eb25ffd21..75ea8087a 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -131,22 +131,22 @@ TestLoad() // Load Address auto loadNode = jlm::util::AssertedCast(lambdaRegion->result(0)->origin())->node(); - jlm::util::AssertedCast(&loadNode->operation()); + jlm::util::AssertedCast(&loadNode->GetOperation()); // Load Data loadNode = jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); - jlm::util::AssertedCast(&loadNode->operation()); + jlm::util::AssertedCast(&loadNode->GetOperation()); // Request Node auto requestNode = jlm::util::AssertedCast(lambdaRegion->result(2)->origin())->node(); - jlm::util::AssertedCast(&requestNode->operation()); + jlm::util::AssertedCast(&requestNode->GetOperation()); // Response Node auto responseNode = jlm::util::AssertedCast(loadNode->input(2)->origin())->node(); - jlm::util::AssertedCast(&responseNode->operation()); + jlm::util::AssertedCast(&responseNode->GetOperation()); // Response source auto responseSource = responseNode->input(0)->origin(); @@ -218,27 +218,27 @@ TestLoadStore() // Store Node auto storeNode = jlm::util::AssertedCast(lambdaRegion->result(0)->origin())->node(); - jlm::util::AssertedCast(&storeNode->operation()); + jlm::util::AssertedCast(&storeNode->GetOperation()); // Request Node auto firstRequestNode = jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); - jlm::util::AssertedCast(&firstRequestNode->operation()); + jlm::util::AssertedCast(&firstRequestNode->GetOperation()); // Request Node auto secondRequestNode = jlm::util::AssertedCast(lambdaRegion->result(2)->origin())->node(); - jlm::util::AssertedCast(&secondRequestNode->operation()); + jlm::util::AssertedCast(&secondRequestNode->GetOperation()); // Load node auto loadNode = jlm::util::AssertedCast(storeNode->input(0)->origin())->node(); - jlm::util::AssertedCast(&loadNode->operation()); + jlm::util::AssertedCast(&loadNode->GetOperation()); // Response Node auto responseNode = jlm::util::AssertedCast(loadNode->input(2)->origin())->node(); - jlm::util::AssertedCast(&responseNode->operation()); + jlm::util::AssertedCast(&responseNode->GetOperation()); return 0; } @@ -315,11 +315,11 @@ TestThetaLoad() auto * const entryMemoryStateSplitInput = *lambdaRegion->argument(4)->begin(); auto * entryMemoryStateSplitNode = jlm::rvsdg::input::GetNode(*entryMemoryStateSplitInput); jlm::util::AssertedCast( - &entryMemoryStateSplitNode->operation()); + &entryMemoryStateSplitNode->GetOperation()); auto exitMemoryStateMergeNode = jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); jlm::util::AssertedCast( - &exitMemoryStateMergeNode->operation()); + &exitMemoryStateMergeNode->GetOperation()); // Act ConvertThetaNodes(*rvsdgModule); @@ -352,20 +352,20 @@ TestThetaLoad() // Request Node auto requestNode = jlm::util::AssertedCast(lambdaRegion->result(2)->origin())->node(); - jlm::util::AssertedCast(&requestNode->operation()); + jlm::util::AssertedCast(&requestNode->GetOperation()); // HLS_LOOP Node auto loopOutput = jlm::util::AssertedCast(requestNode->input(0)->origin()); auto loopNode = jlm::util::AssertedCast(loopOutput->node()); - jlm::util::AssertedCast(&loopNode->operation()); + jlm::util::AssertedCast(&loopNode->GetOperation()); // Loop Result auto & thetaResult = loopOutput->results; assert(thetaResult.size() == 1); // Load Node auto loadNode = jlm::util::AssertedCast(thetaResult.first()->origin())->node(); - jlm::util::AssertedCast(&loadNode->operation()); + jlm::util::AssertedCast(&loadNode->GetOperation()); // Loop Argument auto thetaArgument = jlm::util::AssertedCast(loadNode->input(1)->origin()); @@ -374,7 +374,7 @@ TestThetaLoad() // Response Node auto responseNode = jlm::util::AssertedCast(thetaInput->origin())->node(); - jlm::util::AssertedCast(&responseNode->operation()); + jlm::util::AssertedCast(&responseNode->GetOperation()); // Lambda argument jlm::util::AssertedCast(responseNode->input(0)->origin()); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 2b1997010..6a5752413 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -72,7 +72,7 @@ TestFork() forkNodeOutput = dynamic_cast(loop->subregion()->result(0)->origin())); auto forkNode = forkNodeOutput->node(); - auto forkOp = util::AssertedCast(&forkNode->operation()); + auto forkOp = util::AssertedCast(&forkNode->GetOperation()); assert(forkNode->ninputs() == 1); assert(forkNode->noutputs() == 4); assert(forkOp->IsConstant() == false); @@ -142,7 +142,7 @@ TestConstantFork() forkNodeOutput = dynamic_cast(loop->subregion()->result(0)->origin())); auto forkNode = forkNodeOutput->node(); - auto forkOp = util::AssertedCast(&forkNode->operation()); + auto forkOp = util::AssertedCast(&forkNode->GetOperation()); assert(forkNode->ninputs() == 1); assert(forkNode->noutputs() == 2); assert(forkOp->IsConstant() == false); @@ -152,7 +152,7 @@ TestConstantFork() auto bitsUltNode = bitsUltNodeOutput->node(); auto cforkNodeOutput = dynamic_cast(bitsUltNode->input(1)->origin()); auto cforkNode = cforkNodeOutput->node(); - auto cforkOp = util::AssertedCast(&cforkNode->operation()); + auto cforkOp = util::AssertedCast(&cforkNode->GetOperation()); assert(cforkNode->ninputs() == 1); assert(cforkNode->noutputs() == 2); assert(cforkOp->IsConstant() == true); diff --git a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp index 8a664a6da..4d8badbd9 100644 --- a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp +++ b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp @@ -35,7 +35,7 @@ TestWriteGraphs() auto & lambdaNode = *AssertedCast(&rootGraph.GetNode(0)); // The lambda only has one output, and a single subgraph - assert(lambdaNode.GetLabel() == gammaTest.lambda->operation().debug_string()); + assert(lambdaNode.GetLabel() == gammaTest.lambda->GetOperation().debug_string()); assert(lambdaNode.NumInputPorts() == 0); assert(lambdaNode.NumOutputPorts() == 1); assert(lambdaNode.NumSubgraphs() == 1); @@ -48,7 +48,7 @@ TestWriteGraphs() auto & connections = fctBody.GetArgumentNode(1).GetConnections(); assert(connections.size() == 1); auto & gammaNode = *AssertedCast(&connections[0]->GetTo().GetNode()); - assert(gammaNode.GetLabel() == gammaTest.gamma->operation().debug_string()); + assert(gammaNode.GetLabel() == gammaTest.gamma->GetOperation().debug_string()); assert(gammaNode.NumInputPorts() == 5); assert(gammaNode.NumOutputPorts() == 2); assert(gammaNode.NumSubgraphs() == 2); diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index 261c41a07..aac44a41c 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -248,9 +248,9 @@ TestStoreMuxReduction() auto n0 = jlm::rvsdg::output::GetNode(*muxnode->input(0)->origin()); auto n1 = jlm::rvsdg::output::GetNode(*muxnode->input(1)->origin()); auto n2 = jlm::rvsdg::output::GetNode(*muxnode->input(2)->origin()); - assert(jlm::rvsdg::is(n0->operation())); - assert(jlm::rvsdg::is(n1->operation())); - assert(jlm::rvsdg::is(n2->operation())); + assert(jlm::rvsdg::is(n0->GetOperation())); + assert(jlm::rvsdg::is(n1->GetOperation())); + assert(jlm::rvsdg::is(n2->GetOperation())); } static void @@ -289,7 +289,7 @@ TestMultipleOriginReduction() // Assert auto node = jlm::rvsdg::output::GetNode(*ex.origin()); - assert(jlm::rvsdg::is(node->operation()) && node->ninputs() == 3); + assert(jlm::rvsdg::is(node->GetOperation()) && node->ninputs() == 3); } static void diff --git a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp index 02495ac5c..e30f32320 100644 --- a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp +++ b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp @@ -57,7 +57,7 @@ TestUndef() // Get the undef op auto convertedUndef = - dynamic_cast(®ion->Nodes().begin()->operation()); + dynamic_cast(®ion->Nodes().begin()->GetOperation()); assert(convertedUndef != nullptr); diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 0c08be6ee..f646f3fe4 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -101,7 +101,7 @@ TestLambda() * recursively. For each operation the operand 0 is checked until the definingOperations is empty. * * \param operation The starting operation to check. (the lambda result for example) - * \param succesorOperations The trace of operations to check. The last operation is the direct user + * \param definingOperations The trace of operations to check. The last operation is the direct user * of the given operation operand and the first operation is the last operation that will be checked * on the chain. */ diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 8cb826ca7..5f455411c 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -280,7 +280,7 @@ TestDivOperation() lambdaResultOriginNodeOuput = dynamic_cast( convertedLambda->subregion()->result(0)->origin())); jlm::rvsdg::node * lambdaResultOriginNode = lambdaResultOriginNodeOuput->node(); - assert(is(lambdaResultOriginNode->operation())); + assert(is(lambdaResultOriginNode->GetOperation())); assert(lambdaResultOriginNode->ninputs() == 2); // Check first input @@ -297,9 +297,9 @@ TestDivOperation() DivInput1NodeOuput = dynamic_cast(lambdaResultOriginNode->input(1)->origin())); jlm::rvsdg::node * DivInput1Node = DivInput1NodeOuput->node(); - assert(is(DivInput1Node->operation())); + assert(is(DivInput1Node->GetOperation())); const jlm::rvsdg::bitconstant_op * DivInput1Constant = - dynamic_cast(&DivInput1Node->operation()); + dynamic_cast(&DivInput1Node->GetOperation()); assert(DivInput1Constant->value() == 5); assert(is(DivInput1Constant->result(0))); assert(std::dynamic_pointer_cast(DivInput1Constant->result(0))->nbits() == 32); @@ -459,12 +459,12 @@ TestCompZeroExt() lambdaResultOriginNodeOuput = dynamic_cast( convertedLambda->subregion()->result(0)->origin())); jlm::rvsdg::node * ZExtNode = lambdaResultOriginNodeOuput->node(); - assert(is(ZExtNode->operation())); + assert(is(ZExtNode->GetOperation())); assert(ZExtNode->ninputs() == 1); // Check ZExt const jlm::llvm::zext_op * ZExtOp = - dynamic_cast(&ZExtNode->operation()); + dynamic_cast(&ZExtNode->GetOperation()); assert(ZExtOp->nsrcbits() == 1); assert(ZExtOp->ndstbits() == 32); @@ -473,11 +473,11 @@ TestCompZeroExt() jlm::rvsdg::node_output * ZExtInput0; assert(ZExtInput0 = dynamic_cast(ZExtNode->input(0)->origin())); jlm::rvsdg::node * BitEqNode = ZExtInput0->node(); - assert(is(BitEqNode->operation())); + assert(is(BitEqNode->GetOperation())); // Check BitEq assert( - dynamic_cast(&BitEqNode->operation())->type().nbits() + dynamic_cast(&BitEqNode->GetOperation())->type().nbits() == 32); assert(BitEqNode->ninputs() == 2); @@ -485,25 +485,25 @@ TestCompZeroExt() jlm::rvsdg::node_output * AddOuput; assert(AddOuput = dynamic_cast(BitEqNode->input(0)->origin())); jlm::rvsdg::node * AddNode = AddOuput->node(); - assert(is(AddNode->operation())); + assert(is(AddNode->GetOperation())); assert(AddNode->ninputs() == 2); // Check BitEq input 1 jlm::rvsdg::node_output * Const2Ouput; assert(Const2Ouput = dynamic_cast(BitEqNode->input(1)->origin())); jlm::rvsdg::node * Const2Node = Const2Ouput->node(); - assert(is(Const2Node->operation())); + assert(is(Const2Node->GetOperation())); // Check Const2 const jlm::rvsdg::bitconstant_op * Const2Op = - dynamic_cast(&Const2Node->operation()); + dynamic_cast(&Const2Node->GetOperation()); assert(Const2Op->value() == 5); assert(is(Const2Op->result(0))); assert(std::dynamic_pointer_cast(Const2Op->result(0))->nbits() == 32); // Check add op const jlm::rvsdg::bitadd_op * AddOp = - dynamic_cast(&AddNode->operation()); + dynamic_cast(&AddNode->GetOperation()); assert(AddOp->type().nbits() == 32); // Check add input0 @@ -516,11 +516,11 @@ TestCompZeroExt() jlm::rvsdg::node_output * Const1Output; assert(Const1Output = dynamic_cast(AddNode->input(1)->origin())); jlm::rvsdg::node * Const1Node = Const1Output->node(); - assert(is(Const1Node->operation())); + assert(is(Const1Node->GetOperation())); // Check Const1 const jlm::rvsdg::bitconstant_op * Const1Op = - dynamic_cast(&Const1Node->operation()); + dynamic_cast(&Const1Node->GetOperation()); assert(Const1Op->value() == 20); assert(is(Const1Op->result(0))); assert(std::dynamic_pointer_cast(Const1Op->result(0))->nbits() == 32); @@ -668,9 +668,9 @@ TestMatchOp() assert( matchOutput = dynamic_cast(lambdaRegion->result(0)->origin())); jlm::rvsdg::node * matchNode = matchOutput->node(); - assert(is(matchNode->operation())); + assert(is(matchNode->GetOperation())); - auto matchOp = dynamic_cast(&matchNode->operation()); + auto matchOp = dynamic_cast(&matchNode->GetOperation()); assert(matchOp->narguments() == 1); assert(is(matchOp->argument(0))); assert(std::dynamic_pointer_cast(matchOp->argument(0))->nbits() == 32); @@ -844,10 +844,10 @@ TestGammaOp() assert( gammaOutput = dynamic_cast(lambdaRegion->result(0)->origin())); jlm::rvsdg::node * gammaNode = gammaOutput->node(); - assert(is(gammaNode->operation())); + assert(is(gammaNode->GetOperation())); std::cout << "Checking gamma operation" << std::endl; - auto gammaOp = dynamic_cast(&gammaNode->operation()); + auto gammaOp = dynamic_cast(&gammaNode->GetOperation()); assert(gammaNode->ninputs() == 3); assert(gammaOp->nalternatives() == 3); assert(gammaNode->noutputs() == 2); @@ -993,7 +993,7 @@ TestThetaOp() assert( thetaOutput = dynamic_cast(lambdaRegion->result(0)->origin())); jlm::rvsdg::node * node = thetaOutput->node(); - assert(is(node->operation())); + assert(is(node->GetOperation())); auto thetaNode = dynamic_cast(node); std::cout << "Checking theta node" << std::endl; diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 7690de4d6..5a7582dd9 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -36,8 +36,8 @@ types_bitstring_arithmetic_test_bitand(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*and0)->operation() == bitand_op(32)); - assert(output::GetNode(*and1)->operation() == int_constant_op(32, +1)); + assert(output::GetNode(*and0)->GetOperation() == bitand_op(32)); + assert(output::GetNode(*and1)->GetOperation() == int_constant_op(32, +1)); return 0; } @@ -72,11 +72,11 @@ types_bitstring_arithmetic_test_bitashr(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*ashr0)->operation() == bitashr_op(32)); - assert(output::GetNode(*ashr1)->operation() == int_constant_op(32, 4)); - assert(output::GetNode(*ashr2)->operation() == int_constant_op(32, 0)); - assert(output::GetNode(*ashr3)->operation() == int_constant_op(32, -4)); - assert(output::GetNode(*ashr4)->operation() == int_constant_op(32, -1)); + assert(output::GetNode(*ashr0)->GetOperation() == bitashr_op(32)); + assert(output::GetNode(*ashr1)->GetOperation() == int_constant_op(32, 4)); + assert(output::GetNode(*ashr2)->GetOperation() == int_constant_op(32, 0)); + assert(output::GetNode(*ashr3)->GetOperation() == int_constant_op(32, -4)); + assert(output::GetNode(*ashr4)->GetOperation() == int_constant_op(32, -1)); return 0; } @@ -99,7 +99,7 @@ types_bitstring_arithmetic_test_bitdifference(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*diff)->operation() == bitsub_op(32)); + assert(output::GetNode(*diff)->GetOperation() == bitsub_op(32)); return 0; } @@ -125,9 +125,9 @@ types_bitstring_arithmetic_test_bitnegate(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*neg0)->operation() == bitneg_op(32)); - assert(output::GetNode(*neg1)->operation() == int_constant_op(32, -3)); - assert(output::GetNode(*neg2)->operation() == int_constant_op(32, 3)); + assert(output::GetNode(*neg0)->GetOperation() == bitneg_op(32)); + assert(output::GetNode(*neg1)->GetOperation() == int_constant_op(32, -3)); + assert(output::GetNode(*neg2)->GetOperation() == int_constant_op(32, 3)); return 0; } @@ -153,9 +153,9 @@ types_bitstring_arithmetic_test_bitnot(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*not0)->operation() == bitnot_op(32)); - assert(output::GetNode(*not1)->operation() == int_constant_op(32, -4)); - assert(output::GetNode(*not2)->operation() == int_constant_op(32, 3)); + assert(output::GetNode(*not0)->GetOperation() == bitnot_op(32)); + assert(output::GetNode(*not1)->GetOperation() == int_constant_op(32, -4)); + assert(output::GetNode(*not2)->GetOperation() == int_constant_op(32, 3)); return 0; } @@ -182,8 +182,8 @@ types_bitstring_arithmetic_test_bitor(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*or0)->operation() == bitor_op(32)); - assert(output::GetNode(*or1)->operation() == uint_constant_op(32, 7)); + assert(output::GetNode(*or0)->GetOperation() == bitor_op(32)); + assert(output::GetNode(*or1)->GetOperation() == uint_constant_op(32, 7)); return 0; } @@ -211,8 +211,8 @@ types_bitstring_arithmetic_test_bitproduct(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*product0)->operation() == bitmul_op(32)); - assert(output::GetNode(*product1)->operation() == uint_constant_op(32, 15)); + assert(output::GetNode(*product0)->GetOperation() == bitmul_op(32)); + assert(output::GetNode(*product1)->GetOperation() == uint_constant_op(32, 15)); return 0; } @@ -235,7 +235,7 @@ types_bitstring_arithmetic_test_bitshiproduct(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*shiproduct)->operation() == bitsmulh_op(32)); + assert(output::GetNode(*shiproduct)->GetOperation() == bitsmulh_op(32)); return 0; } @@ -265,9 +265,9 @@ types_bitstring_arithmetic_test_bitshl(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*shl0)->operation() == bitshl_op(32)); - assert(output::GetNode(*shl1)->operation() == uint_constant_op(32, 64)); - assert(output::GetNode(*shl2)->operation() == uint_constant_op(32, 0)); + assert(output::GetNode(*shl0)->GetOperation() == bitshl_op(32)); + assert(output::GetNode(*shl1)->GetOperation() == uint_constant_op(32, 64)); + assert(output::GetNode(*shl2)->GetOperation() == uint_constant_op(32, 0)); return 0; } @@ -297,9 +297,9 @@ types_bitstring_arithmetic_test_bitshr(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*shr0)->operation() == bitshr_op(32)); - assert(output::GetNode(*shr1)->operation() == uint_constant_op(32, 4)); - assert(output::GetNode(*shr2)->operation() == uint_constant_op(32, 0)); + assert(output::GetNode(*shr0)->GetOperation() == bitshr_op(32)); + assert(output::GetNode(*shr1)->GetOperation() == uint_constant_op(32, 4)); + assert(output::GetNode(*shr2)->GetOperation() == uint_constant_op(32, 0)); return 0; } @@ -327,8 +327,8 @@ types_bitstring_arithmetic_test_bitsmod(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*smod0)->operation() == bitsmod_op(32)); - assert(output::GetNode(*smod1)->operation() == int_constant_op(32, -1)); + assert(output::GetNode(*smod0)->GetOperation() == bitsmod_op(32)); + assert(output::GetNode(*smod1)->GetOperation() == int_constant_op(32, -1)); return 0; } @@ -356,8 +356,8 @@ types_bitstring_arithmetic_test_bitsquotient(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*squot0)->operation() == bitsdiv_op(32)); - assert(output::GetNode(*squot1)->operation() == int_constant_op(32, -2)); + assert(output::GetNode(*squot0)->GetOperation() == bitsdiv_op(32)); + assert(output::GetNode(*squot1)->GetOperation() == int_constant_op(32, -2)); return 0; } @@ -385,8 +385,8 @@ types_bitstring_arithmetic_test_bitsum(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*sum0)->operation() == bitadd_op(32)); - assert(output::GetNode(*sum1)->operation() == int_constant_op(32, 8)); + assert(output::GetNode(*sum0)->GetOperation() == bitadd_op(32)); + assert(output::GetNode(*sum1)->GetOperation() == int_constant_op(32, 8)); return 0; } @@ -409,7 +409,7 @@ types_bitstring_arithmetic_test_bituhiproduct(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*uhiproduct)->operation() == bitumulh_op(32)); + assert(output::GetNode(*uhiproduct)->GetOperation() == bitumulh_op(32)); return 0; } @@ -437,8 +437,8 @@ types_bitstring_arithmetic_test_bitumod(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*umod0)->operation() == bitumod_op(32)); - assert(output::GetNode(*umod1)->operation() == int_constant_op(32, 1)); + assert(output::GetNode(*umod0)->GetOperation() == bitumod_op(32)); + assert(output::GetNode(*umod1)->GetOperation() == int_constant_op(32, 1)); return 0; } @@ -466,8 +466,8 @@ types_bitstring_arithmetic_test_bituquotient(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*uquot0)->operation() == bitudiv_op(32)); - assert(output::GetNode(*uquot1)->operation() == int_constant_op(32, 2)); + assert(output::GetNode(*uquot0)->GetOperation() == bitudiv_op(32)); + assert(output::GetNode(*uquot1)->GetOperation() == int_constant_op(32, 2)); return 0; } @@ -494,8 +494,8 @@ types_bitstring_arithmetic_test_bitxor(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*xor0)->operation() == bitxor_op(32)); - assert(output::GetNode(*xor1)->operation() == int_constant_op(32, 6)); + assert(output::GetNode(*xor0)->GetOperation() == bitxor_op(32)); + assert(output::GetNode(*xor1)->GetOperation() == int_constant_op(32, 6)); return 0; } @@ -504,7 +504,7 @@ static inline void expect_static_true(jlm::rvsdg::output * port) { auto node = jlm::rvsdg::output::GetNode(*port); - auto op = dynamic_cast(&node->operation()); + auto op = dynamic_cast(&node->GetOperation()); assert(op && op->value().nbits() == 1 && op->value().str() == "1"); } @@ -512,7 +512,7 @@ static inline void expect_static_false(jlm::rvsdg::output * port) { auto node = jlm::rvsdg::output::GetNode(*port); - auto op = dynamic_cast(&node->operation()); + auto op = dynamic_cast(&node->GetOperation()); assert(op && op->value().nbits() == 1 && op->value().str() == "0"); } @@ -542,10 +542,10 @@ types_bitstring_comparison_test_bitequal(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*equal0)->operation() == biteq_op(32)); + assert(output::GetNode(*equal0)->GetOperation() == biteq_op(32)); expect_static_true(equal1); expect_static_false(equal2); - assert(output::GetNode(*equal3)->operation() == biteq_op(32)); + assert(output::GetNode(*equal3)->GetOperation() == biteq_op(32)); return 0; } @@ -576,10 +576,10 @@ types_bitstring_comparison_test_bitnotequal(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*nequal0)->operation() == bitne_op(32)); + assert(output::GetNode(*nequal0)->GetOperation() == bitne_op(32)); expect_static_false(nequal1); expect_static_true(nequal2); - assert(output::GetNode(*nequal3)->operation() == bitne_op(32)); + assert(output::GetNode(*nequal3)->GetOperation() == bitne_op(32)); return 0; } @@ -613,7 +613,7 @@ types_bitstring_comparison_test_bitsgreater(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*sgreater0)->operation() == bitsgt_op(32)); + assert(output::GetNode(*sgreater0)->GetOperation() == bitsgt_op(32)); expect_static_false(sgreater1); expect_static_true(sgreater2); expect_static_false(sgreater3); @@ -653,7 +653,7 @@ types_bitstring_comparison_test_bitsgreatereq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*sgreatereq0)->operation() == bitsge_op(32)); + assert(output::GetNode(*sgreatereq0)->GetOperation() == bitsge_op(32)); expect_static_false(sgreatereq1); expect_static_true(sgreatereq2); expect_static_true(sgreatereq3); @@ -692,7 +692,7 @@ types_bitstring_comparison_test_bitsless(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*sless0)->operation() == bitslt_op(32)); + assert(output::GetNode(*sless0)->GetOperation() == bitslt_op(32)); expect_static_true(sless1); expect_static_false(sless2); expect_static_false(sless3); @@ -732,7 +732,7 @@ types_bitstring_comparison_test_bitslesseq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*slesseq0)->operation() == bitsle_op(32)); + assert(output::GetNode(*slesseq0)->GetOperation() == bitsle_op(32)); expect_static_true(slesseq1); expect_static_true(slesseq2); expect_static_false(slesseq3); @@ -771,7 +771,7 @@ types_bitstring_comparison_test_bitugreater(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*ugreater0)->operation() == bitugt_op(32)); + assert(output::GetNode(*ugreater0)->GetOperation() == bitugt_op(32)); expect_static_false(ugreater1); expect_static_true(ugreater2); expect_static_false(ugreater3); @@ -811,7 +811,7 @@ types_bitstring_comparison_test_bitugreatereq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*ugreatereq0)->operation() == bituge_op(32)); + assert(output::GetNode(*ugreatereq0)->GetOperation() == bituge_op(32)); expect_static_false(ugreatereq1); expect_static_true(ugreatereq2); expect_static_true(ugreatereq3); @@ -850,7 +850,7 @@ types_bitstring_comparison_test_bituless(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*uless0)->operation() == bitult_op(32)); + assert(output::GetNode(*uless0)->GetOperation() == bitult_op(32)); expect_static_true(uless1); expect_static_false(uless2); expect_static_false(uless3); @@ -890,7 +890,7 @@ types_bitstring_comparison_test_bitulesseq(void) graph.prune(); jlm::rvsdg::view(graph.root(), stdout); - assert(output::GetNode(*ulesseq0)->operation() == bitule_op(32)); + assert(output::GetNode(*ulesseq0)->GetOperation() == bitule_op(32)); expect_static_true(ulesseq1); expect_static_true(ulesseq2); expect_static_false(ulesseq3); @@ -940,24 +940,24 @@ types_bitstring_test_constant(void) auto b3 = output::GetNode(*create_bitconstant(graph.root(), 8, 204)); auto b4 = output::GetNode(*create_bitconstant(graph.root(), "001100110")); - assert(b1->operation() == uint_constant_op(8, 204)); - assert(b1->operation() == int_constant_op(8, -52)); + assert(b1->GetOperation() == uint_constant_op(8, 204)); + assert(b1->GetOperation() == int_constant_op(8, -52)); assert(b1 == b2); assert(b1 == b3); - assert(b1->operation() == uint_constant_op(8, 204)); - assert(b1->operation() == int_constant_op(8, -52)); + assert(b1->GetOperation() == uint_constant_op(8, 204)); + assert(b1->GetOperation() == int_constant_op(8, -52)); - assert(b4->operation() == uint_constant_op(9, 204)); - assert(b4->operation() == int_constant_op(9, 204)); + assert(b4->GetOperation() == uint_constant_op(9, 204)); + assert(b4->GetOperation() == int_constant_op(9, 204)); auto plus_one_128 = output::GetNode(*create_bitconstant(graph.root(), ONE_64 ZERO_64)); - assert(plus_one_128->operation() == uint_constant_op(128, 1)); - assert(plus_one_128->operation() == int_constant_op(128, 1)); + assert(plus_one_128->GetOperation() == uint_constant_op(128, 1)); + assert(plus_one_128->GetOperation() == int_constant_op(128, 1)); auto minus_one_128 = output::GetNode(*create_bitconstant(graph.root(), MONE_64 MONE_64)); - assert(minus_one_128->operation() == int_constant_op(128, -1)); + assert(minus_one_128->GetOperation() == int_constant_op(128, -1)); jlm::rvsdg::view(graph.root(), stdout); @@ -982,11 +982,11 @@ types_bitstring_test_normalize(void) sum_nf->set_mutable(false); auto sum0 = output::GetNode(*bitadd_op::create(32, imp, c0)); - assert(sum0->operation() == bitadd_op(32)); + assert(sum0->GetOperation() == bitadd_op(32)); assert(sum0->ninputs() == 2); auto sum1 = output::GetNode(*bitadd_op::create(32, sum0->output(0), c1)); - assert(sum1->operation() == bitadd_op(32)); + assert(sum1->GetOperation() == bitadd_op(32)); assert(sum1->ninputs() == 2); auto & exp = jlm::tests::GraphExport::Create(*sum1->output(0), "dummy"); @@ -996,7 +996,7 @@ types_bitstring_test_normalize(void) graph.prune(); auto origin = dynamic_cast(exp.origin()); - assert(origin->node()->operation() == bitadd_op(32)); + assert(origin->node()->GetOperation() == bitadd_op(32)); assert(origin->node()->ninputs() == 2); auto op1 = origin->node()->input(0)->origin(); auto op2 = origin->node()->input(1)->origin(); @@ -1007,7 +1007,7 @@ types_bitstring_test_normalize(void) op2 = tmp; } /* FIXME: the graph traversers are currently broken, that is why it won't normalize */ - assert(output::GetNode(*op1)->operation() == int_constant_op(32, 3 + 4)); + assert(output::GetNode(*op1)->GetOperation() == int_constant_op(32, 3 + 4)); assert(op2 == imp); jlm::rvsdg::view(graph.root(), stdout); @@ -1019,7 +1019,7 @@ static void assert_constant(jlm::rvsdg::output * bitstr, size_t nbits, const char bits[]) { auto node = jlm::rvsdg::output::GetNode(*bitstr); - auto op = dynamic_cast(node->operation()); + auto op = dynamic_cast(node->GetOperation()); assert(op.value() == jlm::rvsdg::bitvalue_repr(std::string(bits, nbits).c_str())); } @@ -1052,15 +1052,15 @@ types_bitstring_test_reduction(void) auto node = output::GetNode(*jlm::rvsdg::bitslice(concat, 8, 24)); auto o0 = dynamic_cast(node->input(0)->origin()); auto o1 = dynamic_cast(node->input(1)->origin()); - assert(dynamic_cast(&node->operation())); + assert(dynamic_cast(&node->GetOperation())); assert(node->ninputs() == 2); - assert(dynamic_cast(&o0->node()->operation())); - assert(dynamic_cast(&o1->node()->operation())); + assert(dynamic_cast(&o0->node()->GetOperation())); + assert(dynamic_cast(&o1->node()->GetOperation())); const bitslice_op * attrs; - attrs = dynamic_cast(&o0->node()->operation()); + attrs = dynamic_cast(&o0->node()->GetOperation()); assert((attrs->low() == 8) && (attrs->high() == 16)); - attrs = dynamic_cast(&o1->node()->operation()); + attrs = dynamic_cast(&o1->node()->GetOperation()); assert((attrs->low() == 0) && (attrs->high() == 8)); assert(o0->node()->input(0)->origin() == x); @@ -1095,7 +1095,7 @@ types_bitstring_test_slice_concat(void) /* slice of constant */ auto a = output::GetNode(*jlm::rvsdg::bitslice(base_const1, 2, 6)); - auto & op = dynamic_cast(a->operation()); + auto & op = dynamic_cast(a->GetOperation()); assert(op.value() == bitvalue_repr("1101")); } @@ -1104,9 +1104,9 @@ types_bitstring_test_slice_concat(void) auto a = jlm::rvsdg::bitslice(base_x, 2, 6); auto b = output::GetNode(*jlm::rvsdg::bitslice(a, 1, 3)); - assert(dynamic_cast(&b->operation())); + assert(dynamic_cast(&b->GetOperation())); const bitslice_op * attrs; - attrs = dynamic_cast(&b->operation()); + attrs = dynamic_cast(&b->GetOperation()); assert(attrs->low() == 3 && attrs->high() == 5); } @@ -1132,7 +1132,7 @@ types_bitstring_test_slice_concat(void) auto a = jlm::rvsdg::bitconcat({ base_x, base_y }); auto b = output::GetNode(*jlm::rvsdg::bitconcat({ a, base_z })); - assert(dynamic_cast(&b->operation())); + assert(dynamic_cast(&b->GetOperation())); assert(b->ninputs() == 3); assert(b->input(0)->origin() == base_x); assert(b->input(1)->origin() == base_y); @@ -1159,7 +1159,7 @@ types_bitstring_test_slice_concat(void) /* concat of constants */ auto a = output::GetNode(*jlm::rvsdg::bitconcat({ base_const1, base_const2 })); - auto & op = dynamic_cast(a->operation()); + auto & op = dynamic_cast(a->GetOperation()); assert(op.value() == bitvalue_repr("0011011111001000")); } diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 956aaf5c7..db046df4a 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -33,7 +33,7 @@ test_gamma(void) jlm::tests::GraphExport::Create(*gamma->output(0), "dummy"); - assert(gamma && gamma->operation() == GammaOperation(3)); + assert(gamma && gamma->GetOperation() == GammaOperation(3)); /* test gamma copy */ @@ -138,8 +138,8 @@ test_control_constant_reduction() jlm::rvsdg::view(graph.root(), stdout); auto match = output::GetNode(*ex1.origin()); - assert(match && is(match->operation())); - auto & match_op = to_match_op(match->operation()); + assert(match && is(match->GetOperation())); + auto & match_op = to_match_op(match->GetOperation()); assert(match_op.default_alternative() == 0); assert(output::GetNode(*ex2.origin()) == gamma); From 7683a97aa3ddeca7f18a4522d1b9f502ccfd3f7f Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Sun, 1 Dec 2024 11:18:19 +0100 Subject: [PATCH 125/170] lambda: revise API as well as results, arguments and outputs (#658) Remove all auxiliary input/output/argument/result classes for lambda. Provide new API for mapping pieces to context variables. Change all dispatch to be based on checking for "lambda node" instead of subclassed inputs/outputs. This leads to: - making the lambda abstraction a generic rvsdg concept - removing all needs to implement subclasses of argument, result, input and output - dispatching purely based on node kind --- .../rhls2firrtl/verilator-harness-hls.cpp | 4 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 21 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 16 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 20 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 45 +- .../rvsdg2rhls/remove-unused-state.cpp | 22 +- .../rvsdg2rhls/remove-unused-state.hpp | 4 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 21 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 20 +- .../InterProceduralGraphConversion.cpp | 11 +- jlm/llvm/ir/operators/call.cpp | 23 +- jlm/llvm/ir/operators/call.hpp | 43 +- jlm/llvm/ir/operators/lambda.cpp | 256 +++---- jlm/llvm/ir/operators/lambda.hpp | 503 +++---------- jlm/llvm/opt/DeadNodeElimination.cpp | 26 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 31 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 14 +- .../opt/alias-analyses/PointerObjectSet.cpp | 32 +- .../RegionAwareMemoryNodeProvider.cpp | 13 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 50 +- .../TopDownMemoryNodeEliminator.cpp | 10 +- jlm/llvm/opt/inlining.cpp | 12 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 8 +- scripts/run-hls-test.sh | 4 +- tests/TestRvsdgs.cpp | 682 +++++++++--------- tests/TestRvsdgs.hpp | 6 +- .../rvsdg2rhls/DeadNodeEliminationTests.cpp | 6 +- .../rvsdg2rhls/MemoryConverterTests.cpp | 28 +- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 8 +- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 12 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 6 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 8 +- .../rvsdg2rhls/test-loop-passthrough.cpp | 4 +- .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 23 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 48 +- tests/jlm/llvm/ir/operators/TestLambda.cpp | 116 +-- tests/jlm/llvm/ir/operators/TestPhi.cpp | 12 +- .../opt/InvariantValueRedirectionTests.cpp | 100 +-- tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp | 2 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 29 +- .../llvm/opt/alias-analyses/TestAndersen.cpp | 73 +- .../alias-analyses/TestMemoryStateEncoder.cpp | 369 ++++++---- .../alias-analyses/TestPointerObjectSet.cpp | 8 +- .../opt/alias-analyses/TestSteensgaard.cpp | 95 +-- tests/jlm/llvm/opt/test-cne.cpp | 12 +- tests/jlm/llvm/opt/test-inlining.cpp | 40 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 16 +- 48 files changed, 1404 insertions(+), 1510 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index 765826dae..6f56039de 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -490,10 +490,10 @@ VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) cpp << " top->i_data_" << i << " = (uint64_t) a" << i << ";\n"; register_ix++; } - for (size_t i = 0; i < ln->ncvarguments(); ++i) + for (const auto & ctxvar : ln->GetContextVars()) { std::string name; - if (auto graphImport = dynamic_cast(ln->input(i)->origin())) + if (auto graphImport = dynamic_cast(ctxvar.input->origin())) { name = graphImport->Name(); } diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index b4fc8dc12..a7fe6275c 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -14,7 +14,7 @@ namespace jlm::hls { static bool -IsPassthroughArgument(const rvsdg::RegionArgument & argument) +IsPassthroughArgument(const rvsdg::output & argument) { if (argument.nusers() != 1) { @@ -25,7 +25,7 @@ IsPassthroughArgument(const rvsdg::RegionArgument & argument) } static bool -IsPassthroughResult(const rvsdg::RegionResult & result) +IsPassthroughResult(const rvsdg::input & result) { auto argument = dynamic_cast(result.origin()); return argument != nullptr; @@ -71,31 +71,30 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) lambdaNode.attributes()); rvsdg::SubstitutionMap substitutionMap; - for (size_t i = 0; i < lambdaNode.ncvarguments(); ++i) + for (const auto & ctxvar : lambdaNode.GetContextVars()) { - auto oldArgument = lambdaNode.cvargument(i); - auto origin = oldArgument->input()->origin(); + auto oldArgument = ctxvar.inner; + auto origin = ctxvar.input->origin(); - auto newArgument = newLambda->add_ctxvar(origin); + auto newArgument = newLambda->AddContextVar(*origin).inner; substitutionMap.insert(oldArgument, newArgument); } size_t new_i = 0; - for (size_t i = 0; i < lambdaNode.nfctarguments(); ++i) + auto newArgs = newLambda->GetFunctionArguments(); + for (auto argument : lambdaNode.GetFunctionArguments()) { - auto argument = lambdaNode.fctargument(i); if (!IsPassthroughArgument(*argument)) { - substitutionMap.insert(argument, newLambda->fctargument(new_i)); + substitutionMap.insert(argument, newArgs[new_i]); new_i++; } } lambdaNode.subregion()->copy(newLambda->subregion(), substitutionMap, false, false); std::vector newResults; - for (size_t i = 0; i < lambdaNode.nfctresults(); ++i) + for (auto result : lambdaNode.GetFunctionResults()) { - auto result = lambdaNode.fctresult(i); if (!IsPassthroughResult(*result)) { newResults.push_back(substitutionMap.lookup(result->origin())); diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index a64715d43..ad0adf254 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -88,7 +88,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) } else if (auto lambda = dynamic_cast(region->node())) { - output = lambda->add_ctxvar(output); + output = lambda->AddContextVar(*output).inner; } else { diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 41c757245..7ee354e2d 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -51,23 +51,25 @@ add_lambda_argument(llvm::lambda::node * ln, std::shared_ptrattributes()); rvsdg::SubstitutionMap smap; - for (size_t i = 0; i < ln->ncvarguments(); ++i) + for (const auto & ctxvar : ln->GetContextVars()) { - // copy over cvarguments - smap.insert(ln->cvargument(i), new_lambda->add_ctxvar(ln->cvargument(i)->input()->origin())); + // copy over context vars + smap.insert(ctxvar.inner, new_lambda->AddContextVar(*ctxvar.input->origin()).inner); } - for (size_t i = 0; i < ln->nfctarguments(); ++i) + auto old_args = ln->GetFunctionArguments(); + auto new_args = new_lambda->GetFunctionArguments(); + for (size_t i = 0; i < old_args.size(); ++i) { - smap.insert(ln->fctargument(i), new_lambda->fctargument(i)); + smap.insert(old_args[i], new_args[i]); } // jlm::rvsdg::view(ln->subregion(), stdout); // jlm::rvsdg::view(new_lambda->subregion(), stdout); ln->subregion()->copy(new_lambda->subregion(), smap, false, false); std::vector new_results; - for (size_t i = 0; i < ln->nfctresults(); ++i) + for (auto result : ln->GetFunctionResults()) { - new_results.push_back(smap.lookup(ln->fctresult(i)->origin())); + new_results.push_back(smap.lookup(result->origin())); } auto new_out = new_lambda->finalize(new_results); diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 153eec06d..122a2df7c 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -28,18 +28,20 @@ change_function_name(llvm::lambda::node * ln, const std::string & name) /* add context variables */ rvsdg::SubstitutionMap subregionmap; - for (auto & cv : ln->ctxvars()) + for (const auto & cv : ln->GetContextVars()) { - auto origin = cv.origin(); - auto newcv = lambda->add_ctxvar(origin); - subregionmap.insert(cv.argument(), newcv); + auto origin = cv.input->origin(); + auto newcv = lambda->AddContextVar(*origin); + subregionmap.insert(cv.inner, newcv.inner); } /* collect function arguments */ - for (size_t n = 0; n < ln->nfctarguments(); n++) + auto args = ln->GetFunctionArguments(); + auto new_args = lambda->GetFunctionArguments(); + for (size_t n = 0; n < args.size(); n++) { - lambda->fctargument(n)->set_attributes(ln->fctargument(n)->attributes()); - subregionmap.insert(ln->fctargument(n), lambda->fctargument(n)); + lambda->SetArgumentAttributes(*new_args[n], ln->GetArgumentAttributes(*args[n])); + subregionmap.insert(args[n], new_args[n]); } /* copy subregion */ @@ -47,8 +49,8 @@ change_function_name(llvm::lambda::node * ln, const std::string & name) /* collect function results */ std::vector results; - for (auto & result : ln->fctresults()) - results.push_back(subregionmap.lookup(result.origin())); + for (auto result : ln->GetFunctionResults()) + results.push_back(subregionmap.lookup(result->origin())); /* finalize lambda */ lambda->finalize(results); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 114c0d308..5e92e73ac 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -235,14 +235,14 @@ find_decouple_response( const jlm::llvm::lambda::node * lambda, const jlm::rvsdg::bitconstant_op * request_constant) { - jlm::rvsdg::RegionArgument * response_function = nullptr; - for (size_t i = 0; i < lambda->ncvarguments(); ++i) + jlm::rvsdg::output * response_function = nullptr; + for (const auto & ctxvar : lambda->GetContextVars()) { - auto ip = lambda->cvargument(i)->input(); + auto ip = ctxvar.input; if (dynamic_cast(ip) && get_impport_function_name(ip) == "decouple_response") { - response_function = lambda->cvargument(i); + response_function = ctxvar.inner; } } JLM_ASSERT(response_function == nullptr); @@ -647,16 +647,20 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) lambda->attributes()); rvsdg::SubstitutionMap smap; - for (size_t i = 0; i < lambda->ncvarguments(); ++i) + for (const auto & ctxvar : lambda->GetContextVars()) { - smap.insert( - lambda->cvargument(i), - newLambda->add_ctxvar(lambda->cvargument(i)->input()->origin())); + smap.insert(ctxvar.inner, newLambda->AddContextVar(*ctxvar.input->origin()).inner); } - for (size_t i = 0; i < lambda->nfctarguments(); ++i) + auto args = lambda->GetFunctionArguments(); + auto newArgs = newLambda->GetFunctionArguments(); + // The new function has more arguments than the old function. + // Substitution of existing arguments is safe, but note + // that this is not an isomorphism. + JLM_ASSERT(args.size() <= newArgs.size()); + for (size_t i = 0; i < args.size(); ++i) { - smap.insert(lambda->fctargument(i), newLambda->fctargument(i)); + smap.insert(args[i], newArgs[i]); } lambda->subregion()->copy(newLambda->subregion(), smap, false, false); @@ -669,7 +673,7 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) std::vector newResults; // The new arguments are placed directly after the original arguments so we create an index that // points to the first new argument - auto newArgumentsIndex = lambda->nfctarguments(); + auto newArgumentsIndex = args.size(); for (auto & portNode : portNodes) { auto loadNodes = std::get<0>(portNode); @@ -695,9 +699,9 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) } std::vector originalResults; - for (auto & result : lambda->fctresults()) + for (auto result : lambda->GetFunctionResults()) { - originalResults.push_back(smap.lookup(result.origin())); + originalResults.push_back(smap.lookup(result->origin())); } originalResults.insert(originalResults.end(), newResults.begin(), newResults.end()); auto newOut = newLambda->finalize(originalResults); @@ -724,19 +728,20 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) newLambda = jlm::util::AssertedCast(root->Nodes().begin().ptr()); // Go through in reverse since we are removing things - for (int i = newLambda->ncvarguments() - 1; i >= 0; --i) + auto ctxvars = newLambda->GetContextVars(); + for (size_t n = ctxvars.size(); n > 0; --n) { - auto cvarg = newLambda->cvargument(i); - if (dynamic_cast(&cvarg->type())) + const auto & ctxvar = ctxvars[n - 1]; + if (dynamic_cast(&ctxvar.input->type())) { // The only functions at this time is decoupled loads that are encoded as functions by the // user auto visited = std::unordered_set(); - if (IsDecoupledFunctionPointer(cvarg->input()->origin(), visited)) + if (IsDecoupledFunctionPointer(ctxvar.input->origin(), visited)) { - JLM_ASSERT(cvarg->nusers() == 0); - auto cvip = cvarg->input(); - newLambda->subregion()->RemoveArgument(cvarg->index()); + JLM_ASSERT(ctxvar.inner->nusers() == 0); + auto cvip = ctxvar.input; + newLambda->subregion()->RemoveArgument(ctxvar.inner->index()); // TODO: work around const newLambda->RemoveInput(cvip->index()); auto graphImport = util::AssertedCast(cvip->origin()); diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index 6fbb4c493..4f626bd7b 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -206,28 +206,30 @@ remove_lambda_passthrough(llvm::lambda::node * ln) ln->attributes()); rvsdg::SubstitutionMap smap; - for (size_t i = 0; i < ln->ncvarguments(); ++i) + for (const auto & ctxvar : ln->GetContextVars()) { - // copy over cvarguments - smap.insert(ln->cvargument(i), new_lambda->add_ctxvar(ln->cvargument(i)->input()->origin())); + // copy over context vars + smap.insert(ctxvar.inner, new_lambda->AddContextVar(*ctxvar.input->origin()).inner); } size_t new_i = 0; - for (size_t i = 0; i < ln->nfctarguments(); ++i) + auto args = ln->GetFunctionArguments(); + auto new_args = new_lambda->GetFunctionArguments(); + JLM_ASSERT(args.size() >= new_args.size()); + for (size_t i = 0; i < args.size(); ++i) { - auto arg = ln->fctargument(i); + auto arg = args[i]; if (!is_passthrough(arg)) { - smap.insert(arg, new_lambda->fctargument(new_i)); + smap.insert(arg, new_args[new_i]); new_i++; } } ln->subregion()->copy(new_lambda->subregion(), smap, false, false); std::vector new_results; - for (size_t i = 0; i < ln->type().NumResults(); ++i) + for (auto res : ln->GetFunctionResults()) { - auto res = ln->fctresult(i); if (!is_passthrough(res)) { new_results.push_back(smap.lookup(res->origin())); @@ -263,7 +265,7 @@ remove_region_passthrough(const rvsdg::RegionArgument * arg) } bool -is_passthrough(const rvsdg::RegionResult * res) +is_passthrough(const rvsdg::input * res) { auto arg = dynamic_cast(res->origin()); if (arg) @@ -274,7 +276,7 @@ is_passthrough(const rvsdg::RegionResult * res) } bool -is_passthrough(const rvsdg::RegionArgument * arg) +is_passthrough(const rvsdg::output * arg) { if (arg->nusers() == 1) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp index 6a1d721db..f41f55ea0 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.hpp @@ -15,10 +15,10 @@ namespace jlm::hls { bool -is_passthrough(const rvsdg::RegionArgument * arg); +is_passthrough(const rvsdg::output * arg); bool -is_passthrough(const rvsdg::RegionResult * res); +is_passthrough(const rvsdg::input * res); llvm::lambda::node * remove_lambda_passthrough(llvm::lambda::node * ln); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 2f6dd010d..20e9534d2 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -287,18 +287,21 @@ change_linkage(llvm::lambda::node * ln, llvm::linkage link) /* add context variables */ rvsdg::SubstitutionMap subregionmap; - for (auto & cv : ln->ctxvars()) + for (const auto & cv : ln->GetContextVars()) { - auto origin = cv.origin(); - auto newcv = lambda->add_ctxvar(origin); - subregionmap.insert(cv.argument(), newcv); + auto origin = cv.input->origin(); + auto newcv = lambda->AddContextVar(*origin); + subregionmap.insert(cv.inner, newcv.inner); } /* collect function arguments */ - for (size_t n = 0; n < ln->nfctarguments(); n++) + auto args = ln->GetFunctionArguments(); + auto newArgs = lambda->GetFunctionArguments(); + JLM_ASSERT(args.size() == newArgs.size()); + for (size_t n = 0; n < args.size(); n++) { - lambda->fctargument(n)->set_attributes(ln->fctargument(n)->attributes()); - subregionmap.insert(ln->fctargument(n), lambda->fctargument(n)); + lambda->SetArgumentAttributes(*newArgs[n], ln->GetArgumentAttributes(*args[n])); + subregionmap.insert(args[n], newArgs[n]); } /* copy subregion */ @@ -306,8 +309,8 @@ change_linkage(llvm::lambda::node * ln, llvm::linkage link) /* collect function results */ std::vector results; - for (auto & result : ln->fctresults()) - results.push_back(subregionmap.lookup(result.origin())); + for (auto result : ln->GetFunctionResults()) + results.push_back(subregionmap.lookup(result->origin())); /* finalize lambda */ lambda->finalize(results); diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 7532530a3..88de496d3 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -127,27 +127,27 @@ create_cfg(const lambda::node & lambda, context & ctx) ctx.set_cfg(cfg.get()); /* add arguments */ - size_t n = 0; - for (auto & fctarg : lambda.fctarguments()) + for (auto fctarg : lambda.GetFunctionArguments()) { - auto name = util::strfmt("_a", n++, "_"); - auto argument = llvm::argument::create(name, fctarg.Type(), fctarg.attributes()); + auto name = util::strfmt("_a", fctarg->index(), "_"); + auto argument = + llvm::argument::create(name, fctarg->Type(), lambda.GetArgumentAttributes(*fctarg)); auto v = cfg->entry()->append_argument(std::move(argument)); - ctx.insert(&fctarg, v); + ctx.insert(fctarg, v); } /* add context variables */ - for (auto & cv : lambda.ctxvars()) + for (const auto & cv : lambda.GetContextVars()) { - auto v = ctx.variable(cv.origin()); - ctx.insert(cv.argument(), v); + auto v = ctx.variable(cv.input->origin()); + ctx.insert(cv.inner, v); } convert_region(*lambda.subregion(), ctx); /* add results */ - for (auto & result : lambda.fctresults()) - cfg->exit()->append_result(ctx.variable(result.origin())); + for (auto result : lambda.GetFunctionResults()) + cfg->exit()->append_result(ctx.variable(result->origin())); ctx.lpbb()->add_outedge(cfg->exit()); ctx.set_lpbb(nullptr); diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 4af4abd23..9c2ecc0bd 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -614,14 +614,15 @@ Convert( /* * Add arguments */ - JLM_ASSERT(entryAggregationNode.narguments() == lambdaNode.nfctarguments()); + JLM_ASSERT(entryAggregationNode.narguments() == lambdaNode.GetFunctionArguments().size()); + auto lambdaArgs = lambdaNode.GetFunctionArguments(); for (size_t n = 0; n < entryAggregationNode.narguments(); n++) { auto functionNodeArgument = entryAggregationNode.argument(n); - auto lambdaNodeArgument = lambdaNode.fctargument(n); + auto lambdaNodeArgument = lambdaArgs[n]; topVariableMap.insert(functionNodeArgument, lambdaNodeArgument); - lambdaNodeArgument->set_attributes(functionNodeArgument->attributes()); + lambdaNode.SetArgumentAttributes(*lambdaNodeArgument, functionNodeArgument->attributes()); } /* @@ -631,7 +632,7 @@ Convert( { if (outerVariableMap.contains(&v)) { - topVariableMap.insert(&v, lambdaNode.add_ctxvar(outerVariableMap.lookup(&v))); + topVariableMap.insert(&v, lambdaNode.AddContextVar(*outerVariableMap.lookup(&v)).inner); } else { @@ -912,7 +913,7 @@ AnnotateAggregationTree( return demandMap; } -static lambda::output * +static rvsdg::output * ConvertAggregationTreeToLambda( const aggnode & aggregationTreeRoot, const AnnotationMap & demandMap, diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index de2ed69d9..c5480a3a8 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -157,10 +157,7 @@ CallNode::TraceFunctionInput(const CallNode & callNode) while (true) { - if (is(origin)) - return origin; - - if (is(origin)) + if (rvsdg::TryGetOwnerNode(*origin)) return origin; if (is(origin)) @@ -174,11 +171,17 @@ CallNode::TraceFunctionInput(const CallNode & callNode) return origin; } - if (is(origin)) + if (auto lambda = rvsdg::TryGetRegionParentNode(*origin)) { - auto argument = util::AssertedCast(origin); - origin = argument->input()->origin(); - continue; + if (auto ctxvar = lambda->MapBinderContextVar(*origin)) + { + origin = ctxvar->input->origin(); + continue; + } + else + { + return origin; + } } if (auto gammaOutput = dynamic_cast(origin)) @@ -241,9 +244,9 @@ CallNode::ClassifyCall(const CallNode & callNode) { auto output = CallNode::TraceFunctionInput(callNode); - if (auto lambdaOutput = dynamic_cast(output)) + if (rvsdg::TryGetOwnerNode(*output)) { - return CallTypeClassifier::CreateNonRecursiveDirectCallClassifier(*lambdaOutput); + return CallTypeClassifier::CreateNonRecursiveDirectCallClassifier(*output); } if (auto argument = dynamic_cast(output)) diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index bbfbc454d..f08a658d3 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -172,12 +172,12 @@ class CallTypeClassifier final * * @return The called function. */ - [[nodiscard]] lambda::output & + [[nodiscard]] rvsdg::output & GetLambdaOutput() const noexcept { if (GetCallType() == CallType::NonRecursiveDirectCall) { - return *jlm::util::AssertedCast(Output_); + return *Output_; } JLM_ASSERT(GetCallType() == CallType::RecursiveDirectCall); @@ -187,8 +187,7 @@ class CallTypeClassifier final * would be better if we did not use the index for retrieving the result, but instead * explicitly encoded it in an phi_argument. */ - return *jlm::util::AssertedCast( - argument->region()->result(argument->index())->origin()); + return *argument->region()->result(argument->index())->origin(); } /** \brief Returns the imported function. @@ -218,12 +217,31 @@ class CallTypeClassifier final return *Output_; } + /** + \brief Classify callee as non-recursive. + + \param output + Output representing the function called (must be a lambda). + + \pre + The given output must belong to a lambda node. + */ static std::unique_ptr - CreateNonRecursiveDirectCallClassifier(lambda::output & output) + CreateNonRecursiveDirectCallClassifier(rvsdg::output & output) { + rvsdg::AssertGetOwnerNode(output); return std::make_unique(CallType::NonRecursiveDirectCall, output); } + /** + \brief Classify callee as recursive. + + \param output + Output representing the function called (must be phi argument). + + \pre + The given output must belong to a phi node. + */ static std::unique_ptr CreateRecursiveDirectCallClassifier(rvsdg::RegionArgument & output) { @@ -231,6 +249,15 @@ class CallTypeClassifier final return std::make_unique(CallType::RecursiveDirectCall, output); } + /** + \brief Classify callee as external. + + \param argument + Output representing the function called (must be graph argument). + + \pre + The given output must be an argument to the root region of the graph. + */ static std::unique_ptr CreateExternalCallClassifier(rvsdg::RegionArgument & argument) { @@ -238,6 +265,12 @@ class CallTypeClassifier final return std::make_unique(CallType::ExternalCall, argument); } + /** + \brief Classify callee as inderict. + + \param output + Output representing the function called (supposed to be pointer). + */ static std::unique_ptr CreateIndirectCallClassifier(jlm::rvsdg::output & output) { diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index c208266ff..ad32f7609 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -41,133 +41,97 @@ operation::copy() const node::~node() = default; -const lambda::operation & -node::GetOperation() const noexcept -{ - return *jlm::util::AssertedCast(&StructuralNode::GetOperation()); -} - -node::fctargument_range -node::fctarguments() -{ - fctargiterator end(nullptr); - - if (nfctarguments() == 0) - return { end, end }; - - fctargiterator begin(fctargument(0)); - return { begin, end }; -} - -node::fctargument_constrange -node::fctarguments() const +node::node(rvsdg::Region & parent, lambda::operation op) + : StructuralNode(std::move(op), &parent, 1) { - fctargconstiterator end(nullptr); - - if (nfctarguments() == 0) - return { end, end }; - - fctargconstiterator begin(fctargument(0)); - return { begin, end }; -} - -node::ctxvar_range -node::ctxvars() -{ - cviterator end(nullptr); - - if (ncvarguments() == 0) - return { end, end }; - - cviterator begin(input(0)); - return { begin, end }; + ArgumentAttributes_.resize(GetOperation().Type()->NumArguments()); } -node::ctxvar_constrange -node::ctxvars() const -{ - cvconstiterator end(nullptr); - - if (ncvarguments() == 0) - return { end, end }; - - cvconstiterator begin(input(0)); - return { begin, end }; -} - -node::fctresult_range -node::fctresults() -{ - fctresiterator end(nullptr); - - if (nfctresults() == 0) - return { end, end }; - - fctresiterator begin(fctresult(0)); - return { begin, end }; -} - -node::fctresult_constrange -node::fctresults() const +const lambda::operation & +node::GetOperation() const noexcept { - fctresconstiterator end(nullptr); - - if (nfctresults() == 0) - return { end, end }; - - fctresconstiterator begin(fctresult(0)); - return { begin, end }; + return *jlm::util::AssertedCast(&StructuralNode::GetOperation()); } -cvinput * -node::input(size_t n) const noexcept +[[nodiscard]] std::vector +node::GetFunctionArguments() const { - return util::AssertedCast(StructuralNode::input(n)); + std::vector arguments; + const auto & type = GetOperation().Type(); + for (std::size_t n = 0; n < type->Arguments().size(); ++n) + { + arguments.push_back(subregion()->argument(n)); + } + return arguments; } -lambda::output * -node::output() const noexcept +[[nodiscard]] std::vector +node::GetFunctionResults() const { - return util::AssertedCast(StructuralNode::output(0)); + std::vector results; + for (std::size_t n = 0; n < subregion()->nresults(); ++n) + { + results.push_back(subregion()->result(n)); + } + return results; } -lambda::fctargument * -node::fctargument(size_t n) const noexcept +[[nodiscard]] node::ContextVar +node::MapInputContextVar(const rvsdg::input & input) const noexcept { - return util::AssertedCast(subregion()->argument(n)); + JLM_ASSERT(rvsdg::TryGetOwnerNode(input) == this); + return ContextVar{ const_cast(&input), + subregion()->argument(GetOperation().Type()->NumArguments() + input.index()) }; } -lambda::cvargument * -node::cvargument(size_t n) const noexcept +[[nodiscard]] std::optional +node::MapBinderContextVar(const rvsdg::output & output) const noexcept { - return input(n)->argument(); + JLM_ASSERT(rvsdg::TryGetOwnerRegion(output) == subregion()); + auto numArguments = GetOperation().Type()->NumArguments(); + if (output.index() >= numArguments) + { + return ContextVar{ input(output.index() - GetOperation().Type()->NumArguments()), + const_cast(&output) }; + } + else + { + return std::nullopt; + } } -lambda::result * -node::fctresult(size_t n) const noexcept +[[nodiscard]] std::vector +node::GetContextVars() const noexcept { - return util::AssertedCast(subregion()->result(n)); + std::vector vars; + for (size_t n = 0; n < ninputs(); ++n) + { + vars.push_back( + ContextVar{ input(n), subregion()->argument(n + GetOperation().Type()->NumArguments()) }); + } + return vars; } -cvargument * -node::add_ctxvar(jlm::rvsdg::output * origin) +node::ContextVar +node::AddContextVar(jlm::rvsdg::output & origin) { - auto input = cvinput::create(this, origin); - return cvargument::create(subregion(), input); + auto input = rvsdg::StructuralInput::create(this, &origin, origin.Type()); + auto argument = &rvsdg::RegionArgument::Create(*subregion(), input, origin.Type()); + return ContextVar{ input, argument }; } -rvsdg::RegionArgument & +rvsdg::output & node::GetMemoryStateRegionArgument() const noexcept { - auto argument = fctargument(nfctarguments() - 1); + auto argument = GetFunctionArguments().back(); JLM_ASSERT(is(argument->type())); return *argument; } -rvsdg::RegionResult & +rvsdg::input & node::GetMemoryStateRegionResult() const noexcept { - auto result = fctresult(nfctresults() - 1); + auto result = GetFunctionResults().back(); JLM_ASSERT(is(result->type())); return *result; } @@ -206,15 +170,15 @@ node::create( const attributeset & attributes) { lambda::operation op(type, name, linkage, attributes); - auto node = new lambda::node(parent, std::move(op)); + auto node = new lambda::node(*parent, std::move(op)); for (auto & argumentType : type->Arguments()) - lambda::fctargument::create(node->subregion(), argumentType); + rvsdg::RegionArgument::Create(*node->subregion(), nullptr, argumentType); return node; } -lambda::output * +rvsdg::output * node::finalize(const std::vector & results) { /* check if finalized was already called */ @@ -239,9 +203,15 @@ node::finalize(const std::vector & results) } for (const auto & origin : results) - lambda::result::create(origin); + rvsdg::RegionResult::Create(*origin->region(), *origin, nullptr, origin->Type()); - return output::create(this, PointerType::Create()); + return append_output(std::make_unique(this, PointerType::Create())); +} + +rvsdg::output * +node::output() const noexcept +{ + return StructuralNode::output(0); } lambda::node * @@ -257,18 +227,20 @@ node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const /* add context variables */ rvsdg::SubstitutionMap subregionmap; - for (auto & cv : ctxvars()) + for (const auto & cv : GetContextVars()) { - auto origin = smap.lookup(cv.origin()); - auto newcv = lambda->add_ctxvar(origin); - subregionmap.insert(cv.argument(), newcv); + auto origin = smap.lookup(cv.input->origin()); + subregionmap.insert(cv.inner, lambda->AddContextVar(*origin).inner); } /* collect function arguments */ - for (size_t n = 0; n < nfctarguments(); n++) + auto args = GetFunctionArguments(); + auto newArgs = lambda->GetFunctionArguments(); + JLM_ASSERT(args.size() == newArgs.size()); + for (size_t n = 0; n < args.size(); n++) { - lambda->fctargument(n)->set_attributes(fctargument(n)->attributes()); - subregionmap.insert(fctargument(n), lambda->fctargument(n)); + lambda->SetArgumentAttributes(*newArgs[n], GetArgumentAttributes(*args[n])); + subregionmap.insert(args[n], newArgs[n]); } /* copy subregion */ @@ -276,13 +248,15 @@ node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const /* collect function results */ std::vector results; - for (auto & result : fctresults()) - results.push_back(subregionmap.lookup(result.origin())); + for (auto result : GetFunctionResults()) + results.push_back(subregionmap.lookup(result->origin())); /* finalize lambda */ auto o = lambda->finalize(results); smap.insert(output(), o); + lambda->ArgumentAttributes_ = ArgumentAttributes_; + return lambda; } @@ -301,16 +275,18 @@ node::ComputeCallSummary() const auto input = worklist.front(); worklist.pop_front(); - if (auto cvinput = dynamic_cast(input)) + auto inputNode = rvsdg::input::GetNode(*input); + + if (auto lambdaNode = rvsdg::TryGetOwnerNode(*input)) { - auto argument = cvinput->argument(); - worklist.insert(worklist.end(), argument->begin(), argument->end()); + auto & argument = *lambdaNode->MapInputContextVar(*input).inner; + worklist.insert(worklist.end(), argument.begin(), argument.end()); continue; } - if (auto lambdaResult = dynamic_cast(input)) + if (rvsdg::TryGetRegionParentNode(*input)) { - otherUsers.emplace_back(lambdaResult); + otherUsers.emplace_back(input); continue; } @@ -372,7 +348,6 @@ node::ComputeCallSummary() const continue; } - auto inputNode = rvsdg::input::GetNode(*input); if (is(inputNode) && input == inputNode->input(0)) { directCalls.emplace_back(util::AssertedCast(inputNode)); @@ -385,8 +360,7 @@ node::ComputeCallSummary() const continue; } - auto simpleInput = dynamic_cast(input); - if (simpleInput != nullptr) + if (auto simpleInput = dynamic_cast(input)) { otherUsers.emplace_back(simpleInput); continue; @@ -405,51 +379,17 @@ node::IsExported(const lambda::node & lambdaNode) return callSummary->IsExported(); } -/* lambda context variable input class */ - -cvinput::~cvinput() = default; - -cvargument * -cvinput::argument() const noexcept +[[nodiscard]] const jlm::llvm::attributeset & +node::GetArgumentAttributes(const rvsdg::output & argument) const noexcept { - return util::AssertedCast(arguments.first()); + JLM_ASSERT(argument.index() < ArgumentAttributes_.size()); + return ArgumentAttributes_[argument.index()]; } -/* lambda output class */ - -output::~output() = default; - -/* lambda function argument class */ - -fctargument::~fctargument() = default; - -fctargument & -fctargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) -{ - JLM_ASSERT(input == nullptr); - return *fctargument::create(®ion, Type()); -} - -/* lambda context variable argument class */ - -cvargument::~cvargument() = default; - -cvargument & -cvargument::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) -{ - auto lambdaInput = util::AssertedCast(input); - return *cvargument::create(®ion, lambdaInput); -} - -/* lambda result class */ - -result::~result() = default; - -result & -result::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) +void +node::SetArgumentAttributes(rvsdg::output & argument, const jlm::llvm::attributeset & attributes) { - JLM_ASSERT(output == nullptr); - return *result::create(&origin); + ArgumentAttributes_[argument.index()] = attributes; } } diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index 987962302..df6e4b7f1 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -15,6 +15,7 @@ #include #include +#include #include namespace jlm::llvm @@ -101,27 +102,22 @@ class operation final : public rvsdg::StructuralOperation jlm::llvm::attributeset attributes_; }; -class cvargument; -class cvinput; -class fctargument; -class output; -class result; - /** \brief Lambda node * * A lambda node represents a lambda expression in the RVSDG. Its creation requires the invocation * of two functions: \ref create() and \ref finalize(). First, a node with only the function * arguments is created by invoking \ref create(). The free variables of the lambda expression can - * then be added to the lambda node using the \ref add_ctxvar() method, and the body of the lambda - * node can be created. Finally, the lambda node can be finalized by invoking \ref finalize(). + * then be added to the lambda node using the \ref AddContextVar() method, and the body of the + * lambda node can be created. Finally, the lambda node can be finalized by invoking \ref + * finalize(). * * The following snippet illustrates the creation of lambda nodes: * * \code{.cpp} * auto lambda = lambda::node::create(...); * ... - * auto cv1 = lambda->add_ctxvar(...); - * auto cv2 = lambda->add_ctxvar(...); + * auto cv1 = lambda->AddContextVar(...); + * auto cv2 = lambda->AddContextVar(...); * ... * // generate lambda body * ... @@ -133,51 +129,52 @@ class node final : public rvsdg::StructuralNode public: class CallSummary; -private: - class cviterator; - class cvconstiterator; - - class fctargiterator; - class fctargconstiterator; - - class fctresiterator; - class fctresconstiterator; - - using fctargument_range = jlm::util::iterator_range; - using fctargument_constrange = jlm::util::iterator_range; - - using ctxvar_range = jlm::util::iterator_range; - using ctxvar_constrange = jlm::util::iterator_range; - - using fctresult_range = jlm::util::iterator_range; - using fctresult_constrange = jlm::util::iterator_range; - -public: ~node() override; private: - node(rvsdg::Region * parent, lambda::operation && op) - : StructuralNode(op, parent, 1) - {} + node(rvsdg::Region & parent, lambda::operation op); public: - [[nodiscard]] fctargument_range - fctarguments(); - - [[nodiscard]] fctargument_constrange - fctarguments() const; - - ctxvar_range - ctxvars(); - - [[nodiscard]] ctxvar_constrange - ctxvars() const; + /** + * \brief Bound context variable + * + * Context variables may be bound at the point of creation of a + * lambda abstraction. These are represented as inputs to the + * lambda node itself, and made accessible to the body of the + * lambda in the form of an initial argument to the subregion. + */ + struct ContextVar + { + /** + * \brief Input variable bound into lambda node + * + * The input port into the lambda node that supplies the value + * of the context variable bound into the lambda at the + * time the lambda abstraction is built. + */ + rvsdg::input * input; + + /** + * \brief Access to bound object in subregion. + * + * Supplies access to the value bound into the lambda abstraction + * from inside the region contained in the lambda node. This + * evaluates to the value bound into the lambda. + */ + rvsdg::output * inner; + }; + + [[nodiscard]] std::vector + GetFunctionArguments() const; + + [[nodiscard]] std::vector + GetFunctionResults() const; - fctresult_range - fctresults(); + [[nodiscard]] const jlm::llvm::attributeset & + GetArgumentAttributes(const rvsdg::output & argument) const noexcept; - [[nodiscard]] fctresult_constrange - fctresults() const; + void + SetArgumentAttributes(rvsdg::output & argument, const jlm::llvm::attributeset & attributes); [[nodiscard]] rvsdg::Region * subregion() const noexcept @@ -218,43 +215,80 @@ class node final : public rvsdg::StructuralNode return GetOperation().attributes(); } - [[nodiscard]] size_t - ncvarguments() const noexcept - { - return ninputs(); - } + /** + * \brief Adds a context/free variable to the lambda node. + * + * \param origin + * The value to be bound into the lambda node. + * + * \pre + * \p origin must be from the same region as the lambda node. + * + * \return The context variable argument of the lambda abstraction. + */ + ContextVar + AddContextVar(jlm::rvsdg::output & origin); - [[nodiscard]] size_t - nfctarguments() const noexcept - { - return subregion()->narguments() - ninputs(); - } + /** + * \brief Maps input to context variable. + * + * \param input + * Input to the lambda node. + * + * \returns + * The context variable description corresponding to the input. + * + * \pre + * \p input must be input to this node. + * + * Returns the context variable description corresponding + * to this input of the lambda node. All inputs to the lambda + * node are by definition bound context variables that are + * accessible in the subregion through the corresponding + * argument. + */ + [[nodiscard]] ContextVar + MapInputContextVar(const rvsdg::input & input) const noexcept; - [[nodiscard]] size_t - nfctresults() const noexcept - { - return subregion()->nresults(); - } + /** + * \brief Maps bound variable reference to context variable + * + * \param output + * Region argument to lambda subregion + * + * \returns + * The context variable description corresponding to the argument + * + * \pre + * \p output must be an argument to the subregion of this node + * + * Returns the context variable description corresponding + * to this bound variable reference in the lambda node region. + * Note that some arguments of the region are formal call arguments + * and do not have an associated context variable description. + */ + [[nodiscard]] std::optional + MapBinderContextVar(const rvsdg::output & output) const noexcept; /** - * Adds a context/free variable to the lambda node. The \p origin must be from the same region - * as the lambda node. + * \brief Gets all bound context variables * - * \return The context variable argument from the lambda region. + * \returns + * The context variable descriptions. + * + * Returns all context variable descriptions. */ - lambda::cvargument * - add_ctxvar(jlm::rvsdg::output * origin); + [[nodiscard]] std::vector + GetContextVars() const noexcept; /** * Remove lambda inputs and their respective arguments. * * An input must match the condition specified by \p match and its argument must be dead. * - * @tparam F A type that supports the function call operator: bool operator(const cvinput&) + * @tparam F A type that supports the function call operator: bool operator(const rvsdg::input&) * @param match Defines the condition of the elements to remove. * @return The number of removed inputs. - * - * \see cvargument#IsDead() */ template size_t @@ -270,7 +304,7 @@ class node final : public rvsdg::StructuralNode size_t PruneLambdaInputs() { - auto match = [](const cvinput &) + auto match = [](const rvsdg::input &) { return true; }; @@ -278,21 +312,9 @@ class node final : public rvsdg::StructuralNode return RemoveLambdaInputsWhere(match); } - [[nodiscard]] cvinput * - input(size_t n) const noexcept; - - [[nodiscard]] lambda::output * + [[nodiscard]] rvsdg::output * output() const noexcept; - [[nodiscard]] lambda::fctargument * - fctargument(size_t n) const noexcept; - - [[nodiscard]] lambda::cvargument * - cvargument(size_t n) const noexcept; - - [[nodiscard]] lambda::result * - fctresult(size_t n) const noexcept; - lambda::node * copy(rvsdg::Region * region, const std::vector & operands) const override; @@ -302,13 +324,13 @@ class node final : public rvsdg::StructuralNode /** * @return The memory state argument of the lambda subregion. */ - [[nodiscard]] rvsdg::RegionArgument & + [[nodiscard]] rvsdg::output & GetMemoryStateRegionArgument() const noexcept; /** * @return The memory state result of the lambda subregion. */ - [[nodiscard]] rvsdg::RegionResult & + [[nodiscard]] rvsdg::input & GetMemoryStateRegionResult() const noexcept; /** @@ -338,8 +360,8 @@ class node final : public rvsdg::StructuralNode /** * Creates a lambda node in the region \p parent with the function type \p type and name \p name. * After the invocation of \ref create(), the lambda node only features the function arguments. - * Free variables can be added to the function node using \ref add_ctxvar(). The generation of the - * node can be finished using the \ref finalize() method. + * Free variables can be added to the function node using \ref AddContextVar(). The generation of + * the node can be finished using the \ref finalize() method. * * \param parent The region where the lambda node is created. * \param type The lambda node's type. @@ -377,7 +399,7 @@ class node final : public rvsdg::StructuralNode * * \return The output of the lambda node. */ - lambda::output * + rvsdg::output * finalize(const std::vector & results); /** @@ -398,302 +420,9 @@ class node final : public rvsdg::StructuralNode */ [[nodiscard]] static bool IsExported(const lambda::node & lambdaNode); -}; - -/** \brief Lambda context variable input - */ -class cvinput final : public rvsdg::StructuralInput -{ - friend ::jlm::llvm::lambda::node; - -public: - ~cvinput() override; - -private: - cvinput(lambda::node * node, jlm::rvsdg::output * origin) - : StructuralInput(node, origin, origin->Type()) - {} - - static cvinput * - create(lambda::node * node, jlm::rvsdg::output * origin) - { - auto input = std::unique_ptr(new cvinput(node, origin)); - return jlm::util::AssertedCast(node->append_input(std::move(input))); - } - -public: - [[nodiscard]] cvargument * - argument() const noexcept; - - [[nodiscard]] lambda::node * - node() const noexcept - { - return jlm::util::AssertedCast(StructuralInput::node()); - } -}; - -/** \brief Lambda context variable iterator - */ -class node::cviterator final : public jlm::rvsdg::input::iterator -{ - friend ::jlm::llvm::lambda::node; - - constexpr explicit cviterator(cvinput * input) - : jlm::rvsdg::input::iterator(input) - {} - - [[nodiscard]] cvinput * - next() const override - { - auto node = value()->node(); - auto index = value()->index(); - - return node->ninputs() > index + 1 ? node->input(index + 1) : nullptr; - } -}; - -/** \brief Lambda context variable const iterator - */ -class node::cvconstiterator final : public jlm::rvsdg::input::constiterator -{ - friend ::jlm::llvm::lambda::node; - - constexpr explicit cvconstiterator(const cvinput * input) - : jlm::rvsdg::input::constiterator(input) - {} - - [[nodiscard]] const cvinput * - next() const override - { - auto node = value()->node(); - auto index = value()->index(); - - return node->ninputs() > index + 1 ? node->input(index + 1) : nullptr; - } -}; - -/** \brief Lambda output - */ -class output final : public rvsdg::StructuralOutput -{ - friend ::jlm::llvm::lambda::node; - -public: - ~output() override; - - output(lambda::node * node, std::shared_ptr type) - : StructuralOutput(node, std::move(type)) - {} - -private: - static output * - create(lambda::node * node, std::shared_ptr type) - { - auto output = std::make_unique(node, std::move(type)); - return jlm::util::AssertedCast(node->append_output(std::move(output))); - } - -public: - lambda::node * - node() const noexcept - { - return jlm::util::AssertedCast(StructuralOutput::node()); - } -}; - -/** \brief Lambda function argument - */ -class fctargument final : public rvsdg::RegionArgument -{ - friend ::jlm::llvm::lambda::node; - -public: - ~fctargument() override; - - const jlm::llvm::attributeset & - attributes() const noexcept - { - return attributes_; - } - - void - set_attributes(const jlm::llvm::attributeset & attributes) - { - attributes_ = attributes; - } - - fctargument & - Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; private: - fctargument(rvsdg::Region * region, std::shared_ptr type) - : rvsdg::RegionArgument(region, nullptr, std::move(type)) - {} - - static fctargument * - create(rvsdg::Region * region, std::shared_ptr type) - { - auto argument = new fctargument(region, std::move(type)); - region->append_argument(argument); - return argument; - } - - jlm::llvm::attributeset attributes_; -}; - -/** \brief Lambda function argument iterator - */ -class node::fctargiterator final : public jlm::rvsdg::output::iterator -{ - friend ::jlm::llvm::lambda::node; - - constexpr explicit fctargiterator(lambda::fctargument * argument) - : jlm::rvsdg::output::iterator(argument) - {} - - [[nodiscard]] lambda::fctargument * - next() const override - { - auto index = value()->index(); - auto lambda = jlm::util::AssertedCast(value()->region()->node()); - - /* - This assumes that all function arguments were added to the lambda region - before any context variable was added. - */ - return lambda->nfctarguments() > index + 1 ? lambda->fctargument(index + 1) : nullptr; - } -}; - -/** \brief Lambda function argument const iterator - */ -class node::fctargconstiterator final - : public jlm::rvsdg::output::constiterator -{ - friend ::jlm::llvm::lambda::node; - - constexpr explicit fctargconstiterator(const lambda::fctargument * argument) - : jlm::rvsdg::output::constiterator(argument) - {} - - [[nodiscard]] const lambda::fctargument * - next() const override - { - auto index = value()->index(); - auto lambda = jlm::util::AssertedCast(value()->region()->node()); - - /* - This assumes that all function arguments were added to the lambda region - before any context variable was added. - */ - return lambda->nfctarguments() > index + 1 ? lambda->fctargument(index + 1) : nullptr; - } -}; - -/** \brief Lambda context variable argument - */ -class cvargument final : public rvsdg::RegionArgument -{ - friend ::jlm::llvm::lambda::node; - -public: - ~cvargument() override; - - cvargument & - Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) override; - -private: - cvargument(rvsdg::Region * region, cvinput * input) - : rvsdg::RegionArgument(region, input, input->Type()) - {} - - static cvargument * - create(rvsdg::Region * region, lambda::cvinput * input) - { - auto argument = new cvargument(region, input); - region->append_argument(argument); - return argument; - } - -public: - cvinput * - input() const noexcept - { - return jlm::util::AssertedCast(rvsdg::RegionArgument::input()); - } -}; - -/** \brief Lambda result - */ -class result final : public rvsdg::RegionResult -{ - friend ::jlm::llvm::lambda::node; - -public: - ~result() override; - - result & - Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) override; - -private: - explicit result(jlm::rvsdg::output * origin) - : rvsdg::RegionResult(origin->region(), origin, nullptr, origin->Type()) - {} - - static result * - create(jlm::rvsdg::output * origin) - { - auto result = new lambda::result(origin); - origin->region()->append_result(result); - return result; - } - -public: - lambda::output * - output() const noexcept - { - return jlm::util::AssertedCast(rvsdg::RegionResult::output()); - } -}; - -/** \brief Lambda result iterator - */ -class node::fctresiterator final : public jlm::rvsdg::input::iterator -{ - friend ::jlm::llvm::lambda::node; - - constexpr explicit fctresiterator(lambda::result * result) - : jlm::rvsdg::input::iterator(result) - {} - - [[nodiscard]] lambda::result * - next() const override - { - auto index = value()->index(); - auto lambda = jlm::util::AssertedCast(value()->region()->node()); - - return lambda->nfctresults() > index + 1 ? lambda->fctresult(index + 1) : nullptr; - } -}; - -/** \brief Lambda result const iterator - */ -class node::fctresconstiterator final : public jlm::rvsdg::input::constiterator -{ - friend ::jlm::llvm::lambda::node; - - constexpr explicit fctresconstiterator(const lambda::result * result) - : jlm::rvsdg::input::constiterator(result) - {} - - [[nodiscard]] const lambda::result * - next() const override - { - auto index = value()->index(); - auto lambda = jlm::util::AssertedCast(value()->region()->node()); - - return lambda->nfctresults() > index + 1 ? lambda->fctresult(index + 1) : nullptr; - } + std::vector ArgumentAttributes_; }; /** @@ -879,10 +608,10 @@ lambda::node::RemoveLambdaInputsWhere(const F & match) // iterate backwards to avoid the invalidation of 'n' by RemoveInput() for (size_t n = ninputs() - 1; n != static_cast(-1); n--) { - auto & lambdaInput = *input(n); - auto & argument = *lambdaInput.argument(); + auto lambdaInput = input(n); + auto & argument = *MapInputContextVar(*lambdaInput).inner; - if (argument.IsDead() && match(lambdaInput)) + if (argument.IsDead() && match(*lambdaInput)) { subregion()->RemoveArgument(argument.index()); RemoveInput(n); diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index e06c64b89..21299c67d 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -230,24 +230,28 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto o = dynamic_cast(&output)) + if (auto lambda = rvsdg::TryGetOwnerNode(output)) { - for (auto & result : o->node()->fctresults()) + for (auto & result : lambda->GetFunctionResults()) { - MarkOutput(*result.origin()); + MarkOutput(*result->origin()); } return; } - if (is(&output)) - { - return; - } - - if (auto cv = dynamic_cast(&output)) + if (auto lambda = rvsdg::TryGetRegionParentNode(output)) { - MarkOutput(*cv->input()->origin()); - return; + if (auto ctxvar = lambda->MapBinderContextVar(output)) + { + // Bound context variable. + MarkOutput(*ctxvar->input->origin()); + return; + } + else + { + // Function argument. + return; + } } if (auto phiOutput = dynamic_cast(&output)) diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index ccc3bf5a0..203d85511 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -179,21 +179,23 @@ InvariantValueRedirection::RedirectCallOutputs(CallNode & callNode) if (callType != CallTypeClassifier::CallType::NonRecursiveDirectCall) return; - auto & lambdaNode = *callTypeClassifier->GetLambdaOutput().node(); + auto & lambdaNode = + rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()); // LLVM permits code where it can happen that the number and type of arguments handed in to the // call node do not agree with the number and type of lambda parameters, even though it is a // direct call. See jlm::tests::LambdaCallArgumentMismatch for an example. In this case, we cannot // redirect the call outputs to the call operand as the types would not align, resulting in type // errors. - if (callNode.NumArguments() != lambdaNode.nfctarguments()) + if (callNode.NumArguments() != lambdaNode.GetFunctionArguments().size()) return; auto memoryStateOutput = callNode.GetMemoryStateOutput(); auto callExitSplit = CallNode::GetMemoryStateExitSplit(callNode); auto hasCallExitSplit = callExitSplit != nullptr; - JLM_ASSERT(callNode.noutputs() == lambdaNode.nfctresults()); + auto results = lambdaNode.GetFunctionResults(); + JLM_ASSERT(callNode.noutputs() == results.size()); for (size_t n = 0; n < callNode.noutputs(); n++) { auto callOutput = callNode.output(n); @@ -228,16 +230,21 @@ InvariantValueRedirection::RedirectCallOutputs(CallNode & callNode) } else { - auto & lambdaResult = *lambdaNode.fctresult(n); - if (auto lambdaFunctionArgument = dynamic_cast(lambdaResult.origin())) + auto & lambdaResult = *results[n]; + auto origin = lambdaResult.origin(); + if (rvsdg::TryGetRegionParentNode(*origin) == &lambdaNode) { - auto callOperand = callNode.Argument(lambdaFunctionArgument->index())->origin(); - callOutput->divert_users(callOperand); - } - else if (dynamic_cast(lambdaResult.origin())) - { - // FIXME: We would like to get this case working as well, but we need to route the origin of - // the respective lambda input to the subregion of the call node. + if (auto ctxvar = lambdaNode.MapBinderContextVar(*origin)) + { + // This is a bound context variable. + // FIXME: We would like to get this case working as well, but we need to route the origin + // of the respective lambda input to the subregion of the call node. + } + else + { + auto callOperand = callNode.Argument(origin->index())->origin(); + callOutput->divert_users(callOperand); + } } } } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index b708c0d42..15544f514 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -970,22 +970,22 @@ void Andersen::AnalyzeLambda(const lambda::node & lambda) { // Handle context variables - for (auto & cv : lambda.ctxvars()) + for (const auto & cv : lambda.GetContextVars()) { - if (!IsOrContainsPointerType(cv.type())) + if (!IsOrContainsPointerType(cv.input->type())) continue; - auto & inputRegister = *cv.origin(); - auto & argumentRegister = *cv.argument(); + auto & inputRegister = *cv.input->origin(); + auto & argumentRegister = *cv.inner; const auto inputRegisterPO = Set_->GetRegisterPointerObject(inputRegister); Set_->MapRegisterToExistingPointerObject(argumentRegister, inputRegisterPO); } // Create Register PointerObjects for each argument of pointing type in the function - for (auto & argument : lambda.fctarguments()) + for (auto argument : lambda.GetFunctionArguments()) { - if (IsOrContainsPointerType(argument.type())) - (void)Set_->CreateRegisterPointerObject(argument); + if (IsOrContainsPointerType(argument->type())) + (void)Set_->CreateRegisterPointerObject(*argument); } AnalyzeRegion(*lambda.subregion()); diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index e07a2f749..b0bf15b0a 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -672,10 +672,11 @@ HandleLambdaCallParameters( const lambda::node & lambdaNode, MakeSupersetFunctor & makeSuperset) { - for (size_t n = 0; n < callNode.NumArguments() && n < lambdaNode.nfctarguments(); n++) + auto lambdaArgs = lambdaNode.GetFunctionArguments(); + for (size_t n = 0; n < callNode.NumArguments() && n < lambdaArgs.size(); n++) { const auto & inputRegister = *callNode.Argument(n)->origin(); - const auto & argumentRegister = *lambdaNode.fctargument(n); + const auto & argumentRegister = *lambdaArgs[n]; const auto inputRegisterPO = set.TryGetRegisterPointerObject(inputRegister); const auto argumentRegisterPO = set.TryGetRegisterPointerObject(argumentRegister); @@ -698,10 +699,11 @@ HandleLambdaCallReturnValues( const lambda::node & lambdaNode, MakeSupersetFunctor & makeSuperset) { - for (size_t n = 0; n < callNode.NumResults() && n < lambdaNode.nfctresults(); n++) + auto lambdaResults = lambdaNode.GetFunctionResults(); + for (size_t n = 0; n < callNode.NumResults() && n < lambdaResults.size(); n++) { const auto & outputRegister = *callNode.Result(n); - const auto & resultRegister = *lambdaNode.fctresult(n)->origin(); + const auto & resultRegister = *lambdaResults[n]->origin(); const auto outputRegisterPO = set.TryGetRegisterPointerObject(outputRegister); const auto resultRegisterPO = set.TryGetRegisterPointerObject(resultRegister); @@ -873,10 +875,10 @@ HandleEscapedFunction( auto & lambdaNode = set.GetLambdaNodeFromFunctionMemoryObject(lambda); // All the function's arguments need to be flagged as PointsToExternal - for (auto & argument : lambdaNode.fctarguments()) + for (auto argument : lambdaNode.GetFunctionArguments()) { // Argument registers that are mapped to a register pointer object should point to external - const auto argumentPO = set.TryGetRegisterPointerObject(argument); + const auto argumentPO = set.TryGetRegisterPointerObject(*argument); if (!argumentPO) continue; @@ -888,9 +890,9 @@ HandleEscapedFunction( } // All results of pointer type need to be flagged as pointees escaping - for (auto & result : lambdaNode.fctresults()) + for (auto result : lambdaNode.GetFunctionResults()) { - const auto resultPO = set.TryGetRegisterPointerObject(*result.origin()); + const auto resultPO = set.TryGetRegisterPointerObject(*result->origin()); if (!resultPO) continue; @@ -1207,17 +1209,19 @@ LabelFunctionsArgumentsAndReturnValues(PointerObjectSet & set, util::Graph & gra graph.GetNode(pointerObject).AppendToLabel(util::strfmt("function", functionIndex)); // Add labels to registers corresponding to arguments and results of the function - for (size_t i = 0; i < function->nfctarguments(); i++) + auto args = function->GetFunctionArguments(); + for (size_t i = 0; i < args.size(); i++) { - if (auto argumentRegister = set.TryGetRegisterPointerObject(*function->fctargument(i))) + if (auto argumentRegister = set.TryGetRegisterPointerObject(*args[i])) { const auto label = util::strfmt("function", functionIndex, " arg", i); graph.GetNode(*argumentRegister).AppendToLabel(label); } } - for (size_t i = 0; i < function->nfctresults(); i++) + auto results = function->GetFunctionResults(); + for (size_t i = 0; i < results.size(); i++) { - if (auto resultRegister = set.TryGetRegisterPointerObject(*function->fctresult(i)->origin())) + if (auto resultRegister = set.TryGetRegisterPointerObject(*results[i]->origin())) { const auto label = util::strfmt("function", functionIndex, " res", i); graph.GetNode(*resultRegister).AppendToLabel(label); @@ -1257,9 +1261,9 @@ PointerObjectConstraintSet::CreateOvsSubsetGraph() // Mark all function argument register nodes as not direct for (auto [lambda, _] : Set_.GetFunctionMap()) { - for (size_t n = 0; n < lambda->nfctarguments(); n++) + for (auto arg : lambda->GetFunctionArguments()) { - if (auto argumentPO = Set_.TryGetRegisterPointerObject(*lambda->fctargument(n))) + if (auto argumentPO = Set_.TryGetRegisterPointerObject(*arg)) isDirectNode[*argumentPO] = false; } } diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index ce6eb5dbf..50c510693 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -377,7 +377,8 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning if (callTypeClassifier->IsNonRecursiveDirectCall() || callTypeClassifier->IsRecursiveDirectCall()) { - auto & lambdaNode = *callTypeClassifier->GetLambdaOutput().node(); + auto & lambdaNode = + rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()); return GetLambdaEntryNodes(lambdaNode); } else if (callTypeClassifier->IsExternalCall()) @@ -401,7 +402,8 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning if (callTypeClassifier->IsNonRecursiveDirectCall() || callTypeClassifier->IsRecursiveDirectCall()) { - auto & lambdaNode = *callTypeClassifier->GetLambdaOutput().node(); + auto & lambdaNode = + rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()); return GetLambdaExitNodes(lambdaNode); } else if (callTypeClassifier->IsExternalCall()) @@ -562,7 +564,9 @@ class RegionAwareMemoryNodeProvisioning final : public MemoryNodeProvisioning auto & regionUnknownMemoryNodeReferences = regionSummary.GetUnknownMemoryNodeReferences(); auto callTypeClassifier = CallNode::ClassifyCall(callNode); - auto & lambdaRegion = *callTypeClassifier->GetLambdaOutput().node()->subregion(); + auto & lambdaRegion = + *rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()) + .subregion(); auto & lambdaRegionSummary = provisioning.GetRegionSummary(lambdaRegion); auto & lambdaRegionMemoryNodes = lambdaRegionSummary.GetMemoryNodes(); auto & lambdaRegionUnknownMemoryNodeReferences = @@ -947,7 +951,8 @@ RegionAwareMemoryNodeProvider::PropagateRegion(const rvsdg::Region & region) for (auto & callNode : regionSummary.GetNonRecursiveCalls().Items()) { auto callTypeClassifier = CallNode::ClassifyCall(*callNode); - auto & lambdaRegion = *callTypeClassifier->GetLambdaOutput().node()->subregion(); + auto & lambdaRegion = + *rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()).subregion(); auto & lambdaRegionSummary = Provisioning_->GetRegionSummary(lambdaRegion); RegionSummary::Propagate(regionSummary, lambdaRegionSummary); diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 26332e561..cdfc22ba9 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -209,16 +209,19 @@ class RegisterLocation final : public Location return jlm::util::strfmt(nodestr, ":", index, "[" + outputstr + "]"); } - if (is(Output_)) + if (auto node = rvsdg::TryGetRegionParentNode(*Output_)) { - auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); - return jlm::util::strfmt(dbgstr, ":cv:", index); - } - - if (is(Output_)) - { - auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); - return jlm::util::strfmt(dbgstr, ":arg:", index); + auto dbgstr = node->GetOperation().debug_string(); + if (auto ctxvar = node->MapBinderContextVar(*Output_)) + { + // Bound context variable. + return jlm::util::strfmt(dbgstr, ":cv:", index); + } + else + { + // Formal function argument. + return jlm::util::strfmt(dbgstr, ":arg:", index); + } } if (is(Output_)) @@ -1142,7 +1145,9 @@ Steensgaard::AnalyzeCall(const CallNode & callNode) { case CallTypeClassifier::CallType::NonRecursiveDirectCall: case CallTypeClassifier::CallType::RecursiveDirectCall: - AnalyzeDirectCall(callNode, *callTypeClassifier->GetLambdaOutput().node()); + AnalyzeDirectCall( + callNode, + rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput())); break; case CallTypeClassifier::CallType::ExternalCall: AnalyzeExternalCall(callNode); @@ -1174,10 +1179,11 @@ Steensgaard::AnalyzeDirectCall(const CallNode & callNode, const lambda::node & l // Handle call node operands // // Variadic arguments are taken care of in AnalyzeVaList(). + auto arguments = lambdaNode.GetFunctionArguments(); for (size_t n = 1; n < callNode.ninputs(); n++) { auto & callArgument = *callNode.input(n)->origin(); - auto & lambdaArgument = *lambdaNode.fctargument(n - 1); + auto & lambdaArgument = *arguments[n - 1]; if (HasOrContainsPointerType(callArgument)) { @@ -1472,14 +1478,14 @@ void Steensgaard::AnalyzeLambda(const lambda::node & lambda) { // Handle context variables - for (auto & cv : lambda.ctxvars()) + for (const auto & cv : lambda.GetContextVars()) { - auto & origin = *cv.origin(); + auto & origin = *cv.input->origin(); if (HasOrContainsPointerType(origin)) { auto & originLocation = Context_->GetLocation(origin); - auto & argumentLocation = Context_->GetOrInsertRegisterLocation(*cv.argument()); + auto & argumentLocation = Context_->GetOrInsertRegisterLocation(*cv.inner); Context_->Join(originLocation, argumentLocation); } } @@ -1488,22 +1494,22 @@ Steensgaard::AnalyzeLambda(const lambda::node & lambda) auto callSummary = lambda.ComputeCallSummary(); if (callSummary->HasOnlyDirectCalls()) { - for (auto & argument : lambda.fctarguments()) + for (auto & argument : lambda.GetFunctionArguments()) { - if (HasOrContainsPointerType(argument)) + if (HasOrContainsPointerType(*argument)) { - Context_->GetOrInsertRegisterLocation(argument); + Context_->GetOrInsertRegisterLocation(*argument); } } } else { // FIXME: We also end up in this case when the lambda has only direct calls, but is exported. - for (auto & argument : lambda.fctarguments()) + for (auto argument : lambda.GetFunctionArguments()) { - if (HasOrContainsPointerType(argument)) + if (HasOrContainsPointerType(*argument)) { - auto & argumentLocation = Context_->GetOrInsertRegisterLocation(argument); + auto & argumentLocation = Context_->GetOrInsertRegisterLocation(*argument); argumentLocation.SetPointsToFlags( argumentLocation.GetPointsToFlags() | PointsToFlags::PointsToExternalMemory | PointsToFlags::PointsToEscapedMemory); @@ -1516,9 +1522,9 @@ Steensgaard::AnalyzeLambda(const lambda::node & lambda) // Handle function results if (lambda::node::IsExported(lambda)) { - for (auto & result : lambda.fctresults()) + for (auto result : lambda.GetFunctionResults()) { - auto & operand = *result.origin(); + auto & operand = *result->origin(); if (HasOrContainsPointerType(operand)) { diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 7a2cc2b80..0c4c118ed 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -95,7 +95,8 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi if (callTypeClassifier->IsNonRecursiveDirectCall() || callTypeClassifier->IsRecursiveDirectCall()) { - auto & lambdaNode = *callTypeClassifier->GetLambdaOutput().node(); + auto & lambdaNode = + rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()); return GetLambdaEntryNodes(lambdaNode); } else if (callTypeClassifier->IsExternalCall()) @@ -118,7 +119,8 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi if (callTypeClassifier->IsNonRecursiveDirectCall() || callTypeClassifier->IsRecursiveDirectCall()) { - auto & lambdaNode = *callTypeClassifier->GetLambdaOutput().node(); + auto & lambdaNode = + rvsdg::AssertGetOwnerNode(callTypeClassifier->GetLambdaOutput()); return GetLambdaExitNodes(lambdaNode); } else if (callTypeClassifier->IsExternalCall()) @@ -804,7 +806,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownNonRecursiveDirectCall( JLM_ASSERT(callTypeClassifier.IsNonRecursiveDirectCall()); auto & liveNodes = Context_->GetLiveNodes(*callNode.region()); - auto & lambdaNode = *callTypeClassifier.GetLambdaOutput().node(); + auto & lambdaNode = rvsdg::AssertGetOwnerNode(callTypeClassifier.GetLambdaOutput()); Context_->AddLiveNodes(*lambdaNode.subregion(), liveNodes); Context_->AddLiveNodesAnnotatedLambda(lambdaNode); @@ -818,7 +820,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownRecursiveDirectCall( JLM_ASSERT(callTypeClassifier.IsRecursiveDirectCall()); auto & liveNodes = Context_->GetLiveNodes(*callNode.region()); - auto & lambdaNode = *callTypeClassifier.GetLambdaOutput().node(); + auto & lambdaNode = rvsdg::AssertGetOwnerNode(callTypeClassifier.GetLambdaOutput()); Context_->AddLiveNodes(*lambdaNode.subregion(), liveNodes); Context_->AddLiveNodesAnnotatedLambda(lambdaNode); diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index eba80257e..1616ca675 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -82,7 +82,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) } else if (auto lambda = dynamic_cast(region->node())) { - output = lambda->add_ctxvar(output); + output = lambda->AddContextVar(*output).inner; } else if (auto phi = dynamic_cast(region->node())) { @@ -119,16 +119,18 @@ inlineCall(jlm::rvsdg::simple_node * call, const lambda::node * lambda) JLM_ASSERT(is(call)); auto deps = route_dependencies(lambda, call); - JLM_ASSERT(lambda->ncvarguments() == deps.size()); + auto ctxvars = lambda->GetContextVars(); + JLM_ASSERT(ctxvars.size() == deps.size()); rvsdg::SubstitutionMap smap; + auto args = lambda->GetFunctionArguments(); for (size_t n = 1; n < call->ninputs(); n++) { - auto argument = lambda->fctargument(n - 1); + auto argument = args[n - 1]; smap.insert(argument, call->input(n)->origin()); } - for (size_t n = 0; n < lambda->ncvarguments(); n++) - smap.insert(lambda->cvargument(n), deps[n]); + for (size_t n = 0; n < ctxvars.size(); n++) + smap.insert(ctxvars[n].inner, deps[n]); lambda->subregion()->copy(call->region(), smap, false, false); diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 1b46c697f..2c7104549 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -389,15 +389,15 @@ ::mlir::Operation * JlmToMlirConverter::ConvertLambda(const llvm::lambda::node & lambdaNode, ::mlir::Block & block) { ::llvm::SmallVector<::mlir::Type> arguments; - for (size_t i = 0; i < lambdaNode.nfctarguments(); ++i) + for (auto arg : lambdaNode.GetFunctionArguments()) { - arguments.push_back(ConvertType(lambdaNode.fctargument(i)->type())); + arguments.push_back(ConvertType(arg->type())); } ::llvm::SmallVector<::mlir::Type> results; - for (size_t i = 0; i < lambdaNode.nfctresults(); ++i) + for (auto res : lambdaNode.GetFunctionResults()) { - results.push_back(ConvertType(lambdaNode.fctresult(i)->type())); + results.push_back(ConvertType(res->type())); } ::llvm::SmallVector<::mlir::Type> lambdaRef; diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index f9278f2c5..6ec1ba514 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=d0bb58feb2432aefbc65364e10fded264c024fd8 +GIT_COMMIT=99d309be2a9aa8d565c2ece493dc33a447ad166d # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" @@ -60,7 +60,7 @@ done # Check if verilator exists if ! command -v verilator &> /dev/null then - echo "No verilator in ${PATH}" + echo "No verilator in ${PATH}" echo "Consider installing the verilator package for your Linux distro." exit 1 fi diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index db533ac47..094f39a88 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -31,7 +31,8 @@ StoreTest1::SetupRvsdg() auto b = alloca_op::create(pointerType, csize, 4); auto a = alloca_op::create(pointerType, csize, 4); - auto merge_d = MemoryStateMergeOperation::Create({ d[1], fct->fctargument(0) }); + auto merge_d = MemoryStateMergeOperation::Create( + std::vector{ d[1], fct->GetFunctionArguments()[0] }); auto merge_c = MemoryStateMergeOperation::Create(std::vector({ c[1], merge_d })); auto merge_b = @@ -85,7 +86,8 @@ StoreTest2::SetupRvsdg() auto y = alloca_op::create(pointerType, csize, 4); auto p = alloca_op::create(pointerType, csize, 4); - auto merge_a = MemoryStateMergeOperation::Create({ a[1], fct->fctargument(0) }); + auto merge_a = MemoryStateMergeOperation::Create( + std::vector{ a[1], fct->GetFunctionArguments()[0] }); auto merge_b = MemoryStateMergeOperation::Create(std::vector({ b[1], merge_a })); auto merge_x = @@ -138,8 +140,11 @@ LoadTest1::SetupRvsdg() auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); - auto ld1 = - LoadNonVolatileNode::Create(fct->fctargument(0), { fct->fctargument(1) }, pointerType, 4); + auto ld1 = LoadNonVolatileNode::Create( + fct->GetFunctionArguments()[0], + { fct->GetFunctionArguments()[1] }, + pointerType, + 4); auto ld2 = LoadNonVolatileNode::Create(ld1[0], { ld1[1] }, jlm::rvsdg::bittype::Create(32), 4); fct->finalize(ld2); @@ -181,7 +186,8 @@ LoadTest2::SetupRvsdg() auto y = alloca_op::create(pointerType, csize, 4); auto p = alloca_op::create(pointerType, csize, 4); - auto merge_a = MemoryStateMergeOperation::Create({ a[1], fct->fctargument(0) }); + auto merge_a = MemoryStateMergeOperation::Create( + std::vector{ a[1], fct->GetFunctionArguments()[0] }); auto merge_b = MemoryStateMergeOperation::Create(std::vector({ b[1], merge_a })); auto merge_x = @@ -243,7 +249,7 @@ LoadFromUndefTest::SetupRvsdg() auto undefValue = UndefValueOperation::Create(*Lambda_->subregion(), pointerType); auto loadResults = LoadNonVolatileNode::Create( undefValue, - { Lambda_->fctargument(0) }, + { Lambda_->GetFunctionArguments()[0] }, jlm::rvsdg::bittype::Create(32), 4); @@ -284,16 +290,22 @@ GetElementPtrTest::SetupRvsdg() auto zero = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 0); auto one = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 1); - auto gepx = - GetElementPtrOperation::Create(fct->fctargument(0), { zero, zero }, structType, pointerType); + auto gepx = GetElementPtrOperation::Create( + fct->GetFunctionArguments()[0], + { zero, zero }, + structType, + pointerType); auto ldx = LoadNonVolatileNode::Create( gepx, - { fct->fctargument(1) }, + { fct->GetFunctionArguments()[1] }, jlm::rvsdg::bittype::Create(32), 4); - auto gepy = - GetElementPtrOperation::Create(fct->fctargument(0), { zero, one }, structType, pointerType); + auto gepy = GetElementPtrOperation::Create( + fct->GetFunctionArguments()[0], + { zero, one }, + structType, + pointerType); auto ldy = LoadNonVolatileNode::Create(gepy, { ldx[1] }, jlm::rvsdg::bittype::Create(32), 4); auto sum = jlm::rvsdg::bitadd_op::create(32, ldx[0], ldy[0]); @@ -329,7 +341,7 @@ BitCastTest::SetupRvsdg() auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); - auto cast = bitcast_op::create(fct->fctargument(0), pointerType); + auto cast = bitcast_op::create(fct->GetFunctionArguments()[0], pointerType); fct->finalize({ cast }); @@ -366,9 +378,9 @@ Bits2PtrTest::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "bit2ptr", linkage::external_linkage); - auto valueArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto valueArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto cast = bits2ptr_op::create(valueArgument, pt); @@ -377,7 +389,7 @@ Bits2PtrTest::SetupRvsdg() return std::make_tuple(lambda, jlm::rvsdg::output::GetNode(*cast)); }; - auto setupTestFunction = [&](lambda::output * b2p) + auto setupTestFunction = [&](rvsdg::output * b2p) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -387,15 +399,15 @@ Bits2PtrTest::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto valueArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto valueArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto cvbits2ptr = lambda->add_ctxvar(b2p); + auto cvbits2ptr = lambda->AddContextVar(*b2p).inner; auto & call = CallNode::CreateNode( cvbits2ptr, - b2p->node()->Type(), + rvsdg::AssertGetOwnerNode(*b2p).Type(), { valueArgument, iOStateArgument, memoryStateArgument }); lambda->finalize({ call.GetIoStateOutput(), call.GetMemoryStateOutput() }); @@ -440,9 +452,9 @@ ConstantPointerNullTest::SetupRvsdg() auto constantPointerNullResult = ConstantPointerNullOperation::Create(fct->subregion(), pointerType); auto st = StoreNonVolatileNode::Create( - fct->fctargument(0), + fct->GetFunctionArguments()[0], constantPointerNullResult, - { fct->fctargument(1) }, + { fct->GetFunctionArguments()[1] }, 4); fct->finalize({ st[0] }); @@ -482,10 +494,10 @@ CallTest1::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); - auto pointerArgument1 = lambda->fctargument(0); - auto pointerArgument2 = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); + auto pointerArgument1 = lambda->GetFunctionArguments()[0]; + auto pointerArgument2 = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; auto ld1 = LoadNonVolatileNode::Create( pointerArgument1, @@ -518,10 +530,10 @@ CallTest1::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "g", linkage::external_linkage); - auto pointerArgument1 = lambda->fctargument(0); - auto pointerArgument2 = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); + auto pointerArgument1 = lambda->GetFunctionArguments()[0]; + auto pointerArgument2 = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; auto ld1 = LoadNonVolatileNode::Create( pointerArgument1, @@ -550,11 +562,11 @@ CallTest1::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "h", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto cvf = lambda->add_ctxvar(f->output()); - auto cvg = lambda->add_ctxvar(g->output()); + auto cvf = lambda->AddContextVar(*f->output()).inner; + auto cvg = lambda->AddContextVar(*g->output()).inner; auto size = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); @@ -636,9 +648,9 @@ CallTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "create", linkage::external_linkage); - auto valueArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto valueArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto four = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto prod = jlm::rvsdg::bitmul_op::create(32, valueArgument, four); @@ -665,9 +677,9 @@ CallTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "destroy", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto cast = bitcast_op::create(pointerArgument, pointerType); auto freeResults = FreeOperation::Create(cast, { memoryStateArgument }, iOStateArgument); @@ -688,11 +700,11 @@ CallTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto create_cv = lambda->add_ctxvar(lambdaCreate->output()); - auto destroy_cv = lambda->add_ctxvar(lambdaDestroy->output()); + auto create_cv = lambda->AddContextVar(*lambdaCreate->output()).inner; + auto destroy_cv = lambda->AddContextVar(*lambdaDestroy->output()).inner; auto six = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 6); auto seven = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 7); @@ -767,8 +779,8 @@ IndirectCallTest1::SetupRvsdg() { auto lambda = lambda::node::create(graph->root(), constantFunctionType, name, linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto constant = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, n); @@ -785,9 +797,9 @@ IndirectCallTest1::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "indcall", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto & call = CallNode::CreateNode( pointerArgument, @@ -800,7 +812,7 @@ IndirectCallTest1::SetupRvsdg() }; auto SetupTestFunction = - [&](lambda::output * fctindcall, lambda::output * fctthree, lambda::output * fctfour) + [&](rvsdg::output * fctindcall, rvsdg::output * fctthree, rvsdg::output * fctfour) { auto functionType = FunctionType::Create( { iostatetype::Create(), MemoryStateType::Create() }, @@ -808,20 +820,20 @@ IndirectCallTest1::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto fctindcall_cv = lambda->add_ctxvar(fctindcall); - auto fctfour_cv = lambda->add_ctxvar(fctfour); - auto fctthree_cv = lambda->add_ctxvar(fctthree); + auto fctindcall_cv = lambda->AddContextVar(*fctindcall).inner; + auto fctfour_cv = lambda->AddContextVar(*fctfour).inner; + auto fctthree_cv = lambda->AddContextVar(*fctthree).inner; auto & call_four = CallNode::CreateNode( fctindcall_cv, - fctindcall->node()->Type(), + rvsdg::AssertGetOwnerNode(*fctindcall).Type(), { fctfour_cv, iOStateArgument, memoryStateArgument }); auto & call_three = CallNode::CreateNode( fctindcall_cv, - fctindcall->node()->Type(), + rvsdg::AssertGetOwnerNode(*fctindcall).Type(), { fctthree_cv, call_four.GetIoStateOutput(), call_four.GetMemoryStateOutput() }); auto add = jlm::rvsdg::bitadd_op::create(32, call_four.Result(0), call_three.Result(0)); @@ -842,10 +854,10 @@ IndirectCallTest1::SetupRvsdg() /* * Assign */ - this->LambdaThree_ = fctthree->node(); - this->LambdaFour_ = fctfour->node(); - this->LambdaIndcall_ = fctindcall->node(); - this->LambdaTest_ = fcttest->node(); + this->LambdaThree_ = &rvsdg::AssertGetOwnerNode(*fctthree); + this->LambdaFour_ = &rvsdg::AssertGetOwnerNode(*fctfour); + this->LambdaIndcall_ = &rvsdg::AssertGetOwnerNode(*fctindcall); + this->LambdaTest_ = &rvsdg::AssertGetOwnerNode(*fcttest); this->CallIndcall_ = callIndirectFunction; this->CallThree_ = callFunctionThree; @@ -906,8 +918,8 @@ IndirectCallTest2::SetupRvsdg() { auto lambda = lambda::node::create(graph->root(), constantFunctionType, name, linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto constant = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, n); @@ -923,9 +935,9 @@ IndirectCallTest2::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "i", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto & call = CallNode::CreateNode( pointerArgument, @@ -939,8 +951,8 @@ IndirectCallTest2::SetupRvsdg() auto SetupIndirectCallFunction = [&](ssize_t n, const std::string & name, - lambda::output & functionI, - lambda::output & argumentFunction) + rvsdg::output & functionI, + rvsdg::output & argumentFunction) { auto pointerType = PointerType::Create(); @@ -950,12 +962,12 @@ IndirectCallTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, name, linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto functionICv = lambda->add_ctxvar(&functionI); - auto argumentFunctionCv = lambda->add_ctxvar(&argumentFunction); + auto functionICv = lambda->AddContextVar(functionI).inner; + auto argumentFunctionCv = lambda->AddContextVar(argumentFunction).inner; auto five = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, n); auto storeNode = @@ -963,7 +975,7 @@ IndirectCallTest2::SetupRvsdg() auto & call = CallNode::CreateNode( functionICv, - functionI.node()->Type(), + rvsdg::AssertGetOwnerNode(functionI).Type(), { argumentFunctionCv, iOStateArgument, storeNode[0] }); auto lambdaOutput = lambda->finalize(call.Results()); @@ -971,8 +983,8 @@ IndirectCallTest2::SetupRvsdg() return std::make_tuple(lambdaOutput, &call); }; - auto SetupTestFunction = [&](lambda::output & functionX, - lambda::output & functionY, + auto SetupTestFunction = [&](rvsdg::output & functionX, + rvsdg::output & functionY, delta::output & globalG1, delta::output & globalG2) { @@ -982,31 +994,32 @@ IndirectCallTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto functionXCv = lambda->add_ctxvar(&functionX); - auto functionYCv = lambda->add_ctxvar(&functionY); - auto globalG1Cv = lambda->add_ctxvar(&globalG1); - auto globalG2Cv = lambda->add_ctxvar(&globalG2); + auto functionXCv = lambda->AddContextVar(functionX).inner; + auto functionYCv = lambda->AddContextVar(functionY).inner; + auto globalG1Cv = lambda->AddContextVar(globalG1).inner; + auto globalG2Cv = lambda->AddContextVar(globalG2).inner; auto constantSize = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto pxAlloca = alloca_op::create(jlm::rvsdg::bittype::Create(32), constantSize, 4); auto pyAlloca = alloca_op::create(jlm::rvsdg::bittype::Create(32), constantSize, 4); - auto pxMerge = MemoryStateMergeOperation::Create({ pxAlloca[1], memoryStateArgument }); + auto pxMerge = MemoryStateMergeOperation::Create( + std::vector{ pxAlloca[1], memoryStateArgument }); auto pyMerge = MemoryStateMergeOperation::Create( std::vector({ pyAlloca[1], pxMerge })); auto & callX = CallNode::CreateNode( functionXCv, - functionX.node()->Type(), + rvsdg::AssertGetOwnerNode(functionX).Type(), { pxAlloca[0], iOStateArgument, pyMerge }); auto & callY = CallNode::CreateNode( functionYCv, - functionY.node()->Type(), + rvsdg::AssertGetOwnerNode(functionY).Type(), { pyAlloca[0], callX.GetIoStateOutput(), callX.GetMemoryStateOutput() }); auto loadG1 = LoadNonVolatileNode::Create( @@ -1034,7 +1047,7 @@ IndirectCallTest2::SetupRvsdg() jlm::rvsdg::output::GetNode(*pyAlloca[0]))); }; - auto SetupTest2Function = [&](lambda::output & functionX) + auto SetupTest2Function = [&](rvsdg::output & functionX) { auto functionType = FunctionType::Create( { iostatetype::Create(), MemoryStateType::Create() }, @@ -1042,19 +1055,20 @@ IndirectCallTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test2", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto constantSize = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto pzAlloca = alloca_op::create(jlm::rvsdg::bittype::Create(32), constantSize, 4); - auto pzMerge = MemoryStateMergeOperation::Create({ pzAlloca[1], memoryStateArgument }); + auto pzMerge = MemoryStateMergeOperation::Create( + std::vector{ pzAlloca[1], memoryStateArgument }); - auto functionXCv = lambda->add_ctxvar(&functionX); + auto functionXCv = lambda->AddContextVar(functionX).inner; auto & callX = CallNode::CreateNode( functionXCv, - functionX.node()->Type(), + rvsdg::AssertGetOwnerNode(functionX).Type(), { pzAlloca[0], iOStateArgument, pzMerge }); auto lambdaOutput = lambda->finalize(callX.Results()); @@ -1083,13 +1097,13 @@ IndirectCallTest2::SetupRvsdg() */ this->DeltaG1_ = deltaG1->node(); this->DeltaG2_ = deltaG2->node(); - this->LambdaThree_ = lambdaThree->node(); - this->LambdaFour_ = lambdaFour->node(); - this->LambdaI_ = lambdaI->node(); - this->LambdaX_ = lambdaX->node(); - this->LambdaY_ = lambdaY->node(); - this->LambdaTest_ = lambdaTest->node(); - this->LambdaTest2_ = lambdaTest2->node(); + this->LambdaThree_ = &rvsdg::AssertGetOwnerNode(*lambdaThree); + this->LambdaFour_ = &rvsdg::AssertGetOwnerNode(*lambdaFour); + this->LambdaI_ = &rvsdg::AssertGetOwnerNode(*lambdaI); + this->LambdaX_ = &rvsdg::AssertGetOwnerNode(*lambdaX); + this->LambdaY_ = &rvsdg::AssertGetOwnerNode(*lambdaY); + this->LambdaTest_ = &rvsdg::AssertGetOwnerNode(*lambdaTest); + this->LambdaTest2_ = &rvsdg::AssertGetOwnerNode(*lambdaTest2); this->IndirectCall_ = indirectCall; this->CallIWithThree_ = callIWithThree; @@ -1144,19 +1158,20 @@ ExternalCallTest1::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); - auto pathArgument = lambda->fctargument(0); - auto modeArgument = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); + auto pathArgument = lambda->GetFunctionArguments()[0]; + auto modeArgument = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; - auto functionGCv = lambda->add_ctxvar(functionG); + auto functionGCv = lambda->AddContextVar(*functionG).inner; auto size = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto allocaPath = alloca_op::create(pointerType, size, 4); auto allocaMode = alloca_op::create(pointerType, size, 4); - auto mergePath = MemoryStateMergeOperation::Create({ allocaPath[1], memoryStateArgument }); + auto mergePath = MemoryStateMergeOperation::Create( + std::vector{ allocaPath[1], memoryStateArgument }); auto mergeMode = MemoryStateMergeOperation::Create( std::vector({ allocaMode[1], mergePath })); @@ -1234,16 +1249,17 @@ ExternalCallTest2::SetupRvsdg() // Setup function g() LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); - auto iOStateArgument = LambdaG_->fctargument(0); - auto memoryStateArgument = LambdaG_->fctargument(1); - auto llvmLifetimeStartArgument = LambdaG_->add_ctxvar(llvmLifetimeStart); - auto llvmLifetimeEndArgument = LambdaG_->add_ctxvar(llvmLifetimeEnd); - auto lambdaFArgument = LambdaG_->add_ctxvar(ExternalFArgument_); + auto iOStateArgument = LambdaG_->GetFunctionArguments()[0]; + auto memoryStateArgument = LambdaG_->GetFunctionArguments()[1]; + auto llvmLifetimeStartArgument = LambdaG_->AddContextVar(*llvmLifetimeStart).inner; + auto llvmLifetimeEndArgument = LambdaG_->AddContextVar(*llvmLifetimeEnd).inner; + auto lambdaFArgument = LambdaG_->AddContextVar(*ExternalFArgument_).inner; auto twentyFour = jlm::rvsdg::create_bitconstant(LambdaG_->subregion(), 64, 24); auto allocaResults = alloca_op::create(structType, twentyFour, 16); - auto memoryState = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); + auto memoryState = MemoryStateMergeOperation::Create( + std::vector{ allocaResults[1], memoryStateArgument }); auto & callLLvmLifetimeStart = CallNode::CreateNode( llvmLifetimeStartArgument, @@ -1316,21 +1332,21 @@ GammaTest::SetupRvsdg() auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); auto zero = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 0); - auto biteq = jlm::rvsdg::biteq_op::create(32, fct->fctargument(0), zero); + auto biteq = jlm::rvsdg::biteq_op::create(32, fct->GetFunctionArguments()[0], zero); auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, biteq); auto gammanode = jlm::rvsdg::GammaNode::create(predicate, 2); - auto p1ev = gammanode->add_entryvar(fct->fctargument(1)); - auto p2ev = gammanode->add_entryvar(fct->fctargument(2)); - auto p3ev = gammanode->add_entryvar(fct->fctargument(3)); - auto p4ev = gammanode->add_entryvar(fct->fctargument(4)); + auto p1ev = gammanode->add_entryvar(fct->GetFunctionArguments()[1]); + auto p2ev = gammanode->add_entryvar(fct->GetFunctionArguments()[2]); + auto p3ev = gammanode->add_entryvar(fct->GetFunctionArguments()[3]); + auto p4ev = gammanode->add_entryvar(fct->GetFunctionArguments()[4]); auto tmp1 = gammanode->add_exitvar({ p1ev->argument(0), p3ev->argument(1) }); auto tmp2 = gammanode->add_exitvar({ p2ev->argument(0), p4ev->argument(1) }); auto ld1 = LoadNonVolatileNode::Create( tmp1, - { fct->fctargument(5) }, + { fct->GetFunctionArguments()[5] }, jlm::rvsdg::bittype::Create(32), 4); auto ld2 = LoadNonVolatileNode::Create(tmp2, { ld1[1] }, jlm::rvsdg::bittype::Create(32), 4); @@ -1417,18 +1433,18 @@ GammaTest2::SetupRvsdg() { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); - auto cArgument = lambda->fctargument(0); - auto xArgument = lambda->fctargument(1); - auto yArgument = lambda->fctargument(2); - auto iOStateArgument = lambda->fctargument(3); - auto memoryStateArgument = lambda->fctargument(4); + auto cArgument = lambda->GetFunctionArguments()[0]; + auto xArgument = lambda->GetFunctionArguments()[1]; + auto yArgument = lambda->GetFunctionArguments()[2]; + auto iOStateArgument = lambda->GetFunctionArguments()[3]; + auto memoryStateArgument = lambda->GetFunctionArguments()[4]; auto size = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto allocaZResults = alloca_op::create(pointerType, size, 4); - auto memoryState = - MemoryStateMergeOperation::Create({ allocaZResults[1], memoryStateArgument }); + auto memoryState = MemoryStateMergeOperation::Create( + std::vector{ allocaZResults[1], memoryStateArgument }); auto nullPointer = ConstantPointerNullOperation::Create(lambda->subregion(), pointerType); auto storeZResults = @@ -1457,7 +1473,7 @@ GammaTest2::SetupRvsdg() rvsdg::output::GetNode(*allocaZResults[0])); }; - auto SetupLambdaGH = [&](lambda::output & lambdaF, + auto SetupLambdaGH = [&](rvsdg::output & lambdaF, int64_t cValue, int64_t xValue, int64_t yValue, @@ -1472,17 +1488,17 @@ GammaTest2::SetupRvsdg() auto lambda = lambda::node::create(rvsdg->root(), functionType, functionName, linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); - auto lambdaFArgument = lambda->add_ctxvar(&lambdaF); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; + auto lambdaFArgument = lambda->AddContextVar(lambdaF).inner; auto size = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto allocaXResults = alloca_op::create(rvsdg::bittype::Create(32), size, 4); auto allocaYResults = alloca_op::create(pointerType, size, 4); - auto memoryState = - MemoryStateMergeOperation::Create({ allocaXResults[1], memoryStateArgument }); + auto memoryState = MemoryStateMergeOperation::Create( + std::vector{ allocaXResults[1], memoryStateArgument }); memoryState = MemoryStateMergeOperation::Create( std::vector({ allocaYResults[1], memoryState })); @@ -1498,7 +1514,7 @@ GammaTest2::SetupRvsdg() auto & call = CallNode::CreateNode( lambdaFArgument, - lambdaF.node()->Type(), + rvsdg::AssertGetOwnerNode(lambdaF).Type(), { predicate, allocaXResults[0], allocaYResults[0], iOStateArgument, storeYResults[0] }); lambda->finalize(call.Results()); @@ -1516,9 +1532,9 @@ GammaTest2::SetupRvsdg() auto [lambdaH, callFromH, allocaXFromH, allocaYFromH] = SetupLambdaGH(*lambdaF, 1, 3, 4, "h"); // Assign nodes - this->LambdaF_ = lambdaF->node(); - this->LambdaG_ = lambdaG->node(); - this->LambdaH_ = lambdaH->node(); + this->LambdaF_ = &rvsdg::AssertGetOwnerNode(*lambdaF); + this->LambdaG_ = &rvsdg::AssertGetOwnerNode(*lambdaG); + this->LambdaH_ = &rvsdg::AssertGetOwnerNode(*lambdaH); this->Gamma_ = gammaNode; @@ -1561,10 +1577,10 @@ ThetaTest::SetupRvsdg() auto thetanode = jlm::rvsdg::ThetaNode::create(fct->subregion()); auto n = thetanode->add_loopvar(zero); - auto l = thetanode->add_loopvar(fct->fctargument(0)); - auto a = thetanode->add_loopvar(fct->fctargument(1)); - auto c = thetanode->add_loopvar(fct->fctargument(2)); - auto s = thetanode->add_loopvar(fct->fctargument(3)); + auto l = thetanode->add_loopvar(fct->GetFunctionArguments()[0]); + auto a = thetanode->add_loopvar(fct->GetFunctionArguments()[1]); + auto c = thetanode->add_loopvar(fct->GetFunctionArguments()[2]); + auto s = thetanode->add_loopvar(fct->GetFunctionArguments()[3]); auto gepnode = GetElementPtrOperation::Create( a->argument(), @@ -1631,9 +1647,9 @@ DeltaTest1::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "g", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto ld = LoadNonVolatileNode::Create( pointerArgument, @@ -1644,7 +1660,7 @@ DeltaTest1::SetupRvsdg() return lambda->finalize({ ld[0], iOStateArgument, ld[1] }); }; - auto SetupFunctionH = [&](delta::output * f, lambda::output * g) + auto SetupFunctionH = [&](delta::output * f, rvsdg::output * g) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -1653,15 +1669,18 @@ DeltaTest1::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "h", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto cvf = lambda->add_ctxvar(f); - auto cvg = lambda->add_ctxvar(g); + auto cvf = lambda->AddContextVar(*f).inner; + auto cvg = lambda->AddContextVar(*g).inner; auto five = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 5); auto st = StoreNonVolatileNode::Create(cvf, five, { memoryStateArgument }, 4); - auto & callG = CallNode::CreateNode(cvg, g->node()->Type(), { cvf, iOStateArgument, st[0] }); + auto & callG = CallNode::CreateNode( + cvg, + rvsdg::AssertGetOwnerNode(*g).Type(), + { cvf, iOStateArgument, st[0] }); auto lambdaOutput = lambda->finalize(callG.Results()); GraphExport::Create(*lambda->output(), "h"); @@ -1676,8 +1695,8 @@ DeltaTest1::SetupRvsdg() /* * Assign nodes */ - this->lambda_g = g->node(); - this->lambda_h = h->node(); + this->lambda_g = &rvsdg::AssertGetOwnerNode(*g); + this->lambda_h = &rvsdg::AssertGetOwnerNode(*h); this->delta_f = f->node(); @@ -1738,17 +1757,17 @@ DeltaTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "f1", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto cvd1 = lambda->add_ctxvar(d1); + auto cvd1 = lambda->AddContextVar(*d1).inner; auto b2 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 2); auto st = StoreNonVolatileNode::Create(cvd1, b2, { memoryStateArgument }, 4); return lambda->finalize({ iOStateArgument, st[0] }); }; - auto SetupF2 = [&](lambda::output * f1, delta::output * d1, delta::output * d2) + auto SetupF2 = [&](rvsdg::output * f1, delta::output * d1, delta::output * d2) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -1758,17 +1777,20 @@ DeltaTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "f2", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto cvd1 = lambda->add_ctxvar(d1); - auto cvd2 = lambda->add_ctxvar(d2); - auto cvf1 = lambda->add_ctxvar(f1); + auto cvd1 = lambda->AddContextVar(*d1).inner; + auto cvd2 = lambda->AddContextVar(*d2).inner; + auto cvf1 = lambda->AddContextVar(*f1).inner; auto b5 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 5); auto b42 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 42); auto st = StoreNonVolatileNode::Create(cvd1, b5, { memoryStateArgument }, 4); - auto & call = CallNode::CreateNode(cvf1, f1->node()->Type(), { iOStateArgument, st[0] }); + auto & call = CallNode::CreateNode( + cvf1, + rvsdg::AssertGetOwnerNode(*f1).Type(), + { iOStateArgument, st[0] }); st = StoreNonVolatileNode::Create(cvd2, b42, { call.GetMemoryStateOutput() }, 4); auto lambdaOutput = lambda->finalize(call.Results()); @@ -1783,8 +1805,8 @@ DeltaTest2::SetupRvsdg() auto [f2, callF1] = SetupF2(f1, d1, d2); // Assign nodes - this->lambda_f1 = f1->node(); - this->lambda_f2 = f2->node(); + this->lambda_f1 = &rvsdg::AssertGetOwnerNode(*f1); + this->lambda_f2 = &rvsdg::AssertGetOwnerNode(*f2); this->delta_d1 = d1->node(); this->delta_d2 = d2->node(); @@ -1841,10 +1863,10 @@ DeltaTest3::SetupRvsdg() { jlm::rvsdg::bittype::Create(16), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); - auto g1CtxVar = lambda->add_ctxvar(&g1); - auto g2CtxVar = lambda->add_ctxvar(&g2); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; + auto g1CtxVar = lambda->AddContextVar(g1).inner; + auto g2CtxVar = lambda->AddContextVar(g2).inner; auto loadResults = LoadNonVolatileNode::Create(g2CtxVar, { memoryStateArgument }, PointerType::Create(), 8); @@ -1858,7 +1880,7 @@ DeltaTest3::SetupRvsdg() return lambda->finalize({ truncResult, iOStateArgument, loadResults[1] }); }; - auto SetupTest = [&](lambda::output & lambdaF) + auto SetupTest = [&](rvsdg::output & lambdaF) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -1868,14 +1890,14 @@ DeltaTest3::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto lambdaFArgument = lambda->add_ctxvar(&lambdaF); + auto lambdaFArgument = lambda->AddContextVar(lambdaF).inner; auto & call = CallNode::CreateNode( lambdaFArgument, - lambdaF.node()->Type(), + rvsdg::AssertGetOwnerNode(lambdaF).Type(), { iOStateArgument, memoryStateArgument }); auto lambdaOutput = lambda->finalize({ call.GetIoStateOutput(), call.GetMemoryStateOutput() }); @@ -1892,8 +1914,8 @@ DeltaTest3::SetupRvsdg() /* * Assign nodes */ - this->LambdaF_ = f->node(); - this->LambdaTest_ = test->node(); + this->LambdaF_ = &rvsdg::AssertGetOwnerNode(*f); + this->LambdaTest_ = &rvsdg::AssertGetOwnerNode(*test); this->DeltaG1_ = g1->node(); this->DeltaG2_ = g2->node(); @@ -1924,10 +1946,10 @@ ImportTest::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "f1", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto cvd1 = lambda->add_ctxvar(d1); + auto cvd1 = lambda->AddContextVar(*d1).inner; auto b5 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 5); auto st = StoreNonVolatileNode::Create(cvd1, b5, { memoryStateArgument }, 4); @@ -1935,7 +1957,7 @@ ImportTest::SetupRvsdg() return lambda->finalize({ iOStateArgument, st[0] }); }; - auto SetupF2 = [&](lambda::output * f1, jlm::rvsdg::output * d1, jlm::rvsdg::output * d2) + auto SetupF2 = [&](rvsdg::output * f1, jlm::rvsdg::output * d1, jlm::rvsdg::output * d2) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -1945,16 +1967,19 @@ ImportTest::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "f2", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto cvd1 = lambda->add_ctxvar(d1); - auto cvd2 = lambda->add_ctxvar(d2); - auto cvf1 = lambda->add_ctxvar(f1); + auto cvd1 = lambda->AddContextVar(*d1).inner; + auto cvd2 = lambda->AddContextVar(*d2).inner; + auto cvf1 = lambda->AddContextVar(*f1).inner; auto b2 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 2); auto b21 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 21); auto st = StoreNonVolatileNode::Create(cvd1, b2, { memoryStateArgument }, 4); - auto & call = CallNode::CreateNode(cvf1, f1->node()->Type(), { iOStateArgument, st[0] }); + auto & call = CallNode::CreateNode( + cvf1, + rvsdg::AssertGetOwnerNode(*f1).Type(), + { iOStateArgument, st[0] }); st = StoreNonVolatileNode::Create(cvd2, b21, { call.GetMemoryStateOutput() }, 4); auto lambdaOutput = lambda->finalize(call.Results()); @@ -1978,8 +2003,8 @@ ImportTest::SetupRvsdg() auto [f2, callF1] = SetupF2(f1, d1, d2); // Assign nodes - this->lambda_f1 = f1->node(); - this->lambda_f2 = f2->node(); + this->lambda_f1 = &rvsdg::AssertGetOwnerNode(*f1); + this->lambda_f2 = &rvsdg::AssertGetOwnerNode(*f2); this->CallF1_ = callF1; @@ -2020,11 +2045,11 @@ PhiTest1::SetupRvsdg() auto lambda = lambda::node::create(pb.subregion(), fibFunctionType, "fib", linkage::external_linkage); - auto valueArgument = lambda->fctargument(0); - auto pointerArgument = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); - auto ctxVarFib = lambda->add_ctxvar(fibrv->argument()); + auto valueArgument = lambda->GetFunctionArguments()[0]; + auto pointerArgument = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; + auto ctxVarFib = lambda->AddContextVar(*fibrv->argument()).inner; auto two = jlm::rvsdg::create_bitconstant(lambda->subregion(), 64, 2); auto bitult = jlm::rvsdg::bitult_op::create(64, valueArgument, two); @@ -2111,13 +2136,14 @@ PhiTest1::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); - auto fibcv = lambda->add_ctxvar(phiNode->output(0)); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; + auto fibcv = lambda->AddContextVar(*phiNode->output(0)).inner; auto ten = jlm::rvsdg::create_bitconstant(lambda->subregion(), 64, 10); auto allocaResults = alloca_op::create(at, ten, 16); - auto state = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); + auto state = MemoryStateMergeOperation::Create( + std::vector{ allocaResults[1], memoryStateArgument }); auto zero = jlm::rvsdg::create_bitconstant(lambda->subregion(), 64, 0); auto gep = GetElementPtrOperation::Create(allocaResults[0], { zero, zero }, at, pbit64); @@ -2135,8 +2161,8 @@ PhiTest1::SetupRvsdg() auto [testfct, callFib, alloca] = SetupTestFunction(phiNode); // Assign nodes - this->lambda_fib = fibfct->node(); - this->lambda_test = testfct->node(); + this->lambda_fib = &rvsdg::AssertGetOwnerNode(*fibfct); + this->lambda_test = &rvsdg::AssertGetOwnerNode(*testfct); this->gamma = gammaNode; this->phi = phiNode; @@ -2190,8 +2216,8 @@ PhiTest2::SetupRvsdg() constantFunctionType, "eight", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto constant = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 8); @@ -2202,9 +2228,9 @@ PhiTest2::SetupRvsdg() { auto lambda = lambda::node::create(graph->root(), functionIType, "i", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto & call = CallNode::CreateNode( pointerArgument, @@ -2220,12 +2246,12 @@ PhiTest2::SetupRvsdg() [&](jlm::rvsdg::Region & region, phi::rvargument & functionB, phi::rvargument & functionD) { auto lambda = lambda::node::create(®ion, recFunctionType, "a", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto functionBCv = lambda->add_ctxvar(&functionB); - auto functionDCv = lambda->add_ctxvar(&functionD); + auto functionBCv = lambda->AddContextVar(functionB).inner; + auto functionDCv = lambda->AddContextVar(functionD).inner; auto one = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 1); auto storeNode = StoreNonVolatileNode::Create(pointerArgument, one, { memoryStateArgument }, 4); @@ -2264,13 +2290,13 @@ PhiTest2::SetupRvsdg() phi::cvargument & functionEight) { auto lambda = lambda::node::create(®ion, recFunctionType, "b", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto functionICv = lambda->add_ctxvar(&functionI); - auto functionCCv = lambda->add_ctxvar(&functionC); - auto functionEightCv = lambda->add_ctxvar(&functionEight); + auto functionICv = lambda->AddContextVar(functionI).inner; + auto functionCCv = lambda->AddContextVar(functionC).inner; + auto functionEightCv = lambda->AddContextVar(functionEight).inner; auto two = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 2); auto storeNode = StoreNonVolatileNode::Create(pointerArgument, two, { memoryStateArgument }, 4); @@ -2306,11 +2332,11 @@ PhiTest2::SetupRvsdg() auto SetupC = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) { auto lambda = lambda::node::create(®ion, recFunctionType, "c", linkage::external_linkage); - auto xArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto xArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto functionACv = lambda->add_ctxvar(&functionA); + auto functionACv = lambda->AddContextVar(functionA).inner; auto three = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 3); auto storeNode = StoreNonVolatileNode::Create(xArgument, three, { memoryStateArgument }, 4); @@ -2345,11 +2371,11 @@ PhiTest2::SetupRvsdg() auto SetupD = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) { auto lambda = lambda::node::create(®ion, recFunctionType, "d", linkage::external_linkage); - auto xArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto xArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto functionACv = lambda->add_ctxvar(&functionA); + auto functionACv = lambda->AddContextVar(functionA).inner; auto four = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto storeNode = StoreNonVolatileNode::Create(xArgument, four, { memoryStateArgument }, 4); @@ -2372,7 +2398,7 @@ PhiTest2::SetupRvsdg() jlm::rvsdg::output::GetNode(*pdAlloca[0]))); }; - auto SetupPhi = [&](lambda::output & lambdaEight, lambda::output & lambdaI) + auto SetupPhi = [&](rvsdg::output & lambdaEight, rvsdg::output & lambdaI) { jlm::llvm::phi::builder phiBuilder; phiBuilder.begin(graph->root()); @@ -2429,10 +2455,10 @@ PhiTest2::SetupRvsdg() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto functionACv = lambda->add_ctxvar(&functionA); + auto functionACv = lambda->AddContextVar(functionA).inner; auto four = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto pTestAlloca = alloca_op::create(jlm::rvsdg::bittype::Create(32), four, 4); @@ -2478,17 +2504,13 @@ PhiTest2::SetupRvsdg() /* * Assign nodes */ - this->LambdaEight_ = lambdaEight->node(); - this->LambdaI_ = lambdaI->node(); - this->LambdaA_ = jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*lambdaA->result()->origin())); - this->LambdaB_ = jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*lambdaB->result()->origin())); - this->LambdaC_ = jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*lambdaC->result()->origin())); - this->LambdaD_ = jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*lambdaD->result()->origin())); - this->LambdaTest_ = lambdaTest->node(); + this->LambdaEight_ = &rvsdg::AssertGetOwnerNode(*lambdaEight); + this->LambdaI_ = &rvsdg::AssertGetOwnerNode(*lambdaI); + this->LambdaA_ = &rvsdg::AssertGetOwnerNode(*lambdaA->result()->origin()); + this->LambdaB_ = &rvsdg::AssertGetOwnerNode(*lambdaB->result()->origin()); + this->LambdaC_ = &rvsdg::AssertGetOwnerNode(*lambdaC->result()->origin()); + this->LambdaD_ = &rvsdg::AssertGetOwnerNode(*lambdaD->result()->origin()); + this->LambdaTest_ = &rvsdg::AssertGetOwnerNode(*lambdaTest); this->CallAFromTest_ = callAFromTest; this->CallAFromC_ = callAFromC; @@ -2574,9 +2596,9 @@ ExternalMemoryTest::SetupRvsdg() * Setup function f. */ LambdaF = lambda::node::create(graph->root(), ft, "f", linkage::external_linkage); - auto x = LambdaF->fctargument(0); - auto y = LambdaF->fctargument(1); - auto state = LambdaF->fctargument(2); + auto x = LambdaF->GetFunctionArguments()[0]; + auto y = LambdaF->GetFunctionArguments()[1]; + auto state = LambdaF->GetFunctionArguments()[2]; auto one = jlm::rvsdg::create_bitconstant(LambdaF->subregion(), 32, 1); auto two = jlm::rvsdg::create_bitconstant(LambdaF->subregion(), 32, 2); @@ -2669,11 +2691,11 @@ EscapedMemoryTest1::SetupRvsdg() auto lambda = lambda::node::create(rvsdg->root(), functionType, "test", linkage::external_linkage); - auto pointerArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto pointerArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; - auto contextVariableB = lambda->add_ctxvar(&deltaB); + auto contextVariableB = lambda->AddContextVar(deltaB).inner; auto loadResults1 = LoadNonVolatileNode::Create(pointerArgument, { memoryStateArgument }, pointerType, 4); @@ -2706,7 +2728,7 @@ EscapedMemoryTest1::SetupRvsdg() /* * Assign nodes */ - this->LambdaTest = lambdaTest->node(); + this->LambdaTest = &rvsdg::AssertGetOwnerNode(*lambdaTest); this->DeltaA = deltaA->node(); this->DeltaB = deltaB->node(); @@ -2773,8 +2795,8 @@ EscapedMemoryTest2::SetupRvsdg() functionType, "ReturnAddress", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto eight = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 8); @@ -2802,10 +2824,10 @@ EscapedMemoryTest2::SetupRvsdg() functionType, "CallExternalFunction1", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto externalFunction1 = lambda->add_ctxvar(externalFunction1Argument); + auto externalFunction1 = lambda->AddContextVar(*externalFunction1Argument).inner; auto eight = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 8); @@ -2838,10 +2860,10 @@ EscapedMemoryTest2::SetupRvsdg() functionType, "CallExternalFunction2", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto externalFunction2 = lambda->add_ctxvar(externalFunction2Argument); + auto externalFunction2 = lambda->AddContextVar(*externalFunction2Argument).inner; auto & call = CallNode::CreateNode( externalFunction2, @@ -2877,9 +2899,9 @@ EscapedMemoryTest2::SetupRvsdg() /* * Assign nodes */ - this->ReturnAddressFunction = returnAddressFunction->node(); - this->CallExternalFunction1 = callExternalFunction1->node(); - this->CallExternalFunction2 = callExternalFunction2->node(); + this->ReturnAddressFunction = &rvsdg::AssertGetOwnerNode(*returnAddressFunction); + this->CallExternalFunction1 = &rvsdg::AssertGetOwnerNode(*callExternalFunction1); + this->CallExternalFunction2 = &rvsdg::AssertGetOwnerNode(*callExternalFunction2); this->ExternalFunction1Call = externalFunction1Call; this->ExternalFunction2Call = externalFunction2Call; @@ -2951,10 +2973,10 @@ EscapedMemoryTest3::SetupRvsdg() auto lambda = lambda::node::create(rvsdg->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto externalFunction = lambda->add_ctxvar(externalFunctionArgument); + auto externalFunction = lambda->AddContextVar(*externalFunctionArgument).inner; auto & call = CallNode::CreateNode( externalFunction, @@ -2984,7 +3006,7 @@ EscapedMemoryTest3::SetupRvsdg() auto [lambdaTest, callExternalFunction, loadNode] = SetupTestFunction(importExternalFunction); // Assign nodes - this->LambdaTest = lambdaTest->node(); + this->LambdaTest = &rvsdg::AssertGetOwnerNode(*lambdaTest); this->DeltaGlobal = deltaGlobal->node(); this->ImportExternalFunction = importExternalFunction; this->CallExternalFunction = callExternalFunction; @@ -3059,10 +3081,10 @@ MemcpyTest::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto globalArrayArgument = lambda->add_ctxvar(&globalArray); + auto globalArrayArgument = lambda->AddContextVar(globalArray).inner; auto zero = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 0); auto two = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 2); @@ -3087,7 +3109,7 @@ MemcpyTest::SetupRvsdg() }; auto SetupFunctionG = - [&](delta::output & localArray, delta::output & globalArray, lambda::output & lambdaF) + [&](delta::output & localArray, delta::output & globalArray, rvsdg::output & lambdaF) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -3096,12 +3118,12 @@ MemcpyTest::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg->root(), functionType, "g", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto localArrayArgument = lambda->add_ctxvar(&localArray); - auto globalArrayArgument = lambda->add_ctxvar(&globalArray); - auto functionFArgument = lambda->add_ctxvar(&lambdaF); + auto localArrayArgument = lambda->AddContextVar(localArray).inner; + auto globalArrayArgument = lambda->AddContextVar(globalArray).inner; + auto functionFArgument = lambda->AddContextVar(lambdaF).inner; auto bcLocalArray = bitcast_op::create(localArrayArgument, PointerType::Create()); auto bcGlobalArray = bitcast_op::create(globalArrayArgument, PointerType::Create()); @@ -3116,7 +3138,7 @@ MemcpyTest::SetupRvsdg() auto & call = CallNode::CreateNode( functionFArgument, - lambdaF.node()->Type(), + rvsdg::AssertGetOwnerNode(lambdaF).Type(), { iOStateArgument, memcpyResults[0] }); auto lambdaOutput = lambda->finalize(call.Results()); @@ -3134,8 +3156,8 @@ MemcpyTest::SetupRvsdg() /* * Assign nodes */ - this->LambdaF_ = lambdaF->node(); - this->LambdaG_ = lambdaG->node(); + this->LambdaF_ = &rvsdg::AssertGetOwnerNode(*lambdaF); + this->LambdaG_ = &rvsdg::AssertGetOwnerNode(*lambdaG); this->LocalArray_ = localArray->node(); this->GlobalArray_ = globalArray->node(); this->CallF_ = callF; @@ -3173,10 +3195,10 @@ MemcpyTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg->root(), functionType, "g", linkage::internal_linkage); - auto s1Argument = lambda->fctargument(0); - auto s2Argument = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); + auto s1Argument = lambda->GetFunctionArguments()[0]; + auto s2Argument = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; auto c0 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 0); auto c128 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 64, 128); @@ -3196,7 +3218,7 @@ MemcpyTest2::SetupRvsdg() return std::make_tuple(lambdaOutput, jlm::rvsdg::output::GetNode(*memcpyResults[0])); }; - auto SetupFunctionF = [&](lambda::output & functionF) + auto SetupFunctionF = [&](rvsdg::output & functionF) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -3208,12 +3230,12 @@ MemcpyTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); - auto s1Argument = lambda->fctargument(0); - auto s2Argument = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); + auto s1Argument = lambda->GetFunctionArguments()[0]; + auto s2Argument = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; - auto functionFArgument = lambda->add_ctxvar(&functionF); + auto functionFArgument = lambda->AddContextVar(functionF).inner; auto c0 = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 0); @@ -3225,7 +3247,7 @@ MemcpyTest2::SetupRvsdg() auto & call = CallNode::CreateNode( functionFArgument, - functionF.node()->Type(), + rvsdg::AssertGetOwnerNode(functionF).Type(), { ldS1[0], ldS2[0], iOStateArgument, ldS2[1] }); auto lambdaOutput = lambda->finalize(call.Results()); @@ -3238,8 +3260,8 @@ MemcpyTest2::SetupRvsdg() auto [lambdaG, memcpyNode] = SetupFunctionG(); auto [lambdaF, callG] = SetupFunctionF(*lambdaG); - this->LambdaF_ = lambdaF->node(); - this->LambdaG_ = lambdaG->node(); + this->LambdaF_ = &rvsdg::AssertGetOwnerNode(*lambdaF); + this->LambdaG_ = &rvsdg::AssertGetOwnerNode(*lambdaG); this->CallG_ = callG; this->Memcpy_ = memcpyNode; @@ -3269,9 +3291,9 @@ MemcpyTest3::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }); Lambda_ = lambda::node::create(rvsdg->root(), functionType, "f", linkage::internal_linkage); - auto pArgument = Lambda_->fctargument(0); - auto iOStateArgument = Lambda_->fctargument(1); - auto memoryStateArgument = Lambda_->fctargument(2); + auto pArgument = Lambda_->GetFunctionArguments()[0]; + auto iOStateArgument = Lambda_->GetFunctionArguments()[1]; + auto memoryStateArgument = Lambda_->GetFunctionArguments()[2]; auto eight = jlm::rvsdg::create_bitconstant(Lambda_->subregion(), 64, 8); auto zero = jlm::rvsdg::create_bitconstant(Lambda_->subregion(), 32, 0); @@ -3279,7 +3301,8 @@ MemcpyTest3::SetupRvsdg() auto three = jlm::rvsdg::create_bitconstant(Lambda_->subregion(), 64, 3); auto allocaResults = alloca_op::create(structType, eight, 8); - auto memoryState = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); + auto memoryState = MemoryStateMergeOperation::Create( + std::vector{ allocaResults[1], memoryStateArgument }); auto memcpyResults = MemCpyNonVolatileOperation::create(allocaResults[0], pArgument, eight, { memoryState }); @@ -3348,16 +3371,17 @@ LinkedListTest::SetupRvsdg() auto lambda = lambda::node::create(rvsdg.root(), functionType, "next", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; - auto myListArgument = lambda->add_ctxvar(&myList); + auto myListArgument = lambda->AddContextVar(myList).inner; auto zero = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 0); auto size = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); auto alloca = alloca_op::create(pointerType, size, 4); - auto mergedMemoryState = MemoryStateMergeOperation::Create({ alloca[1], memoryStateArgument }); + auto mergedMemoryState = MemoryStateMergeOperation::Create( + std::vector{ alloca[1], memoryStateArgument }); auto load1 = LoadNonVolatileNode::Create(myListArgument, { mergedMemoryState }, pointerType, 4); auto store1 = StoreNonVolatileNode::Create(alloca[0], load1[0], { load1[1] }, 4); @@ -3383,7 +3407,7 @@ LinkedListTest::SetupRvsdg() * Assign nodes */ this->DeltaMyList_ = deltaMyList->node(); - this->LambdaNext_ = lambdaNext->node(); + this->LambdaNext_ = &rvsdg::AssertGetOwnerNode(*lambdaNext); this->Alloca_ = alloca; return rvsdgModule; @@ -3425,9 +3449,9 @@ AllMemoryNodesTest::SetupRvsdg() // Start of function "f" Lambda_ = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); - auto entryMemoryState = Lambda_->fctargument(0); - auto deltaContextVar = Lambda_->add_ctxvar(Delta_->output()); - auto importContextVar = Lambda_->add_ctxvar(Import_); + auto entryMemoryState = Lambda_->GetFunctionArguments()[0]; + auto deltaContextVar = Lambda_->AddContextVar(*Delta_->output()).inner; + auto importContextVar = Lambda_->AddContextVar(*Import_).inner; // Create alloca node auto allocaSize = jlm::rvsdg::create_bitconstant(Lambda_->subregion(), 32, 1); @@ -3505,7 +3529,7 @@ NAllocaNodesTest::SetupRvsdg() auto allocaSize = jlm::rvsdg::create_bitconstant(Function_->subregion(), 32, 1); - jlm::rvsdg::output * latestMemoryState = Function_->fctargument(0); + jlm::rvsdg::output * latestMemoryState = Function_->GetFunctionArguments()[0]; for (size_t i = 0; i < NumAllocaNodes_; i++) { @@ -3561,7 +3585,7 @@ EscapingLocalFunctionTest::SetupRvsdg() "localFunction", linkage::internal_linkage); - LocalFuncParam_ = LocalFunc_->fctargument(0); + LocalFuncParam_ = LocalFunc_->GetFunctionArguments()[0]; const auto allocaSize = rvsdg::create_bitconstant(LocalFunc_->subregion(), 32, 1); const auto allocaOutputs = alloca_op::create(uint32Type, allocaSize, 4); @@ -3569,14 +3593,14 @@ EscapingLocalFunctionTest::SetupRvsdg() // Merge function's input Memory State and alloca node's memory state rvsdg::output * mergedMemoryState = MemoryStateMergeOperation::Create( - std::vector{ LocalFunc_->fctargument(1), allocaOutputs[1] }); + std::vector{ LocalFunc_->GetFunctionArguments()[1], allocaOutputs[1] }); // Store the function parameter into the alloca node auto storeOutputs = StoreNonVolatileNode::Create(allocaOutputs[0], LocalFuncParam_, { mergedMemoryState }, 4); // Bring in deltaOuput as a context variable - const auto deltaOutputCtxVar = LocalFunc_->add_ctxvar(deltaOutput); + const auto deltaOutputCtxVar = LocalFunc_->AddContextVar(*deltaOutput).inner; // Return &global LocalFunc_->finalize({ deltaOutputCtxVar, storeOutputs[0] }); @@ -3589,10 +3613,10 @@ EscapingLocalFunctionTest::SetupRvsdg() "exportedFunc", linkage::external_linkage); - const auto localFuncCtxVar = ExportedFunc_->add_ctxvar(LocalFuncRegister_); + const auto localFuncCtxVar = ExportedFunc_->AddContextVar(*LocalFuncRegister_).inner; // Return &localFunc, pass memory state directly through - ExportedFunc_->finalize({ localFuncCtxVar, ExportedFunc_->fctargument(0) }); + ExportedFunc_->finalize({ localFuncCtxVar, ExportedFunc_->GetFunctionArguments()[0] }); GraphExport::Create(*ExportedFunc_->output(), "exportedFunc"); @@ -3616,8 +3640,8 @@ FreeNullTest::SetupRvsdg() LambdaMain_ = lambda::node::create(graph->root(), functionType, "main", linkage::external_linkage); - auto iOStateArgument = LambdaMain_->fctargument(0); - auto memoryStateArgument = LambdaMain_->fctargument(1); + auto iOStateArgument = LambdaMain_->GetFunctionArguments()[0]; + auto memoryStateArgument = LambdaMain_->GetFunctionArguments()[1]; auto constantPointerNullResult = ConstantPointerNullOperation::Create(LambdaMain_->subregion(), PointerType::Create()); @@ -3650,15 +3674,15 @@ LambdaCallArgumentMismatch::SetupRvsdg() { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(rvsdg.root(), functionType, "g", linkage::internal_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto five = rvsdg::create_bitconstant(lambda->subregion(), 32, 5); return lambda->finalize({ five, iOStateArgument, memoryStateArgument }); }; - auto setupLambdaMain = [&](lambda::output & lambdaG) + auto setupLambdaMain = [&](rvsdg::output & lambdaG) { auto pointerType = PointerType::Create(); auto iOStateType = iostatetype::Create(); @@ -3676,9 +3700,9 @@ LambdaCallArgumentMismatch::SetupRvsdg() auto lambda = lambda::node::create(rvsdg.root(), functionTypeMain, "main", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); - auto lambdaGArgument = lambda->add_ctxvar(&lambdaG); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; + auto lambdaGArgument = lambda->AddContextVar(lambdaG).inner; auto one = rvsdg::create_bitconstant(lambda->subregion(), 32, 1); auto six = rvsdg::create_bitconstant(lambda->subregion(), 32, 6); @@ -3707,9 +3731,9 @@ LambdaCallArgumentMismatch::SetupRvsdg() return std::make_tuple(lambdaOutput, &call); }; - LambdaG_ = setupLambdaG()->node(); + LambdaG_ = &rvsdg::AssertGetOwnerNode(*setupLambdaG()); auto [lambdaMainOutput, call] = setupLambdaMain(*LambdaG_->output()); - LambdaMain_ = lambdaMainOutput->node(); + LambdaMain_ = &rvsdg::AssertGetOwnerNode(*lambdaMainOutput); Call_ = call; return rvsdgModule; @@ -3747,10 +3771,10 @@ VariadicFunctionTest1::SetupRvsdg() // Setup f() { LambdaF_ = lambda::node::create(rvsdg.root(), lambdaFType, "f", linkage::internal_linkage); - auto iArgument = LambdaF_->fctargument(0); - auto iOStateArgument = LambdaF_->fctargument(1); - auto memoryStateArgument = LambdaF_->fctargument(2); - auto lambdaHArgument = LambdaF_->add_ctxvar(ImportH_); + auto iArgument = LambdaF_->GetFunctionArguments()[0]; + auto iOStateArgument = LambdaF_->GetFunctionArguments()[1]; + auto memoryStateArgument = LambdaF_->GetFunctionArguments()[2]; + auto lambdaHArgument = LambdaF_->AddContextVar(*ImportH_).inner; auto one = jlm::rvsdg::create_bitconstant(LambdaF_->subregion(), 32, 1); auto three = jlm::rvsdg::create_bitconstant(LambdaF_->subregion(), 32, 3); @@ -3774,15 +3798,16 @@ VariadicFunctionTest1::SetupRvsdg() // Setup g() { LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); - auto iOStateArgument = LambdaG_->fctargument(0); - auto memoryStateArgument = LambdaG_->fctargument(1); - auto lambdaFArgument = LambdaG_->add_ctxvar(LambdaF_->output()); + auto iOStateArgument = LambdaG_->GetFunctionArguments()[0]; + auto memoryStateArgument = LambdaG_->GetFunctionArguments()[1]; + auto lambdaFArgument = LambdaG_->AddContextVar(*LambdaF_->output()).inner; auto one = jlm::rvsdg::create_bitconstant(LambdaG_->subregion(), 32, 1); auto five = jlm::rvsdg::create_bitconstant(LambdaG_->subregion(), 32, 5); auto allocaResults = alloca_op::create(jlm::rvsdg::bittype::Create(32), one, 4); - auto merge = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); + auto merge = MemoryStateMergeOperation::Create( + std::vector{ allocaResults[1], memoryStateArgument }); AllocaNode_ = rvsdg::output::GetNode(*allocaResults[0]); auto storeResults = StoreNonVolatileNode::Create(allocaResults[0], five, { merge }, 4); @@ -3858,19 +3883,20 @@ VariadicFunctionTest2::SetupRvsdg() { LambdaFst_ = lambda::node::create(rvsdg.root(), lambdaFstType, "fst", linkage::internal_linkage); - auto iOStateArgument = LambdaFst_->fctargument(2); - auto memoryStateArgument = LambdaFst_->fctargument(3); - auto llvmLifetimeStartArgument = LambdaFst_->add_ctxvar(llvmLifetimeStart); - auto llvmLifetimeEndArgument = LambdaFst_->add_ctxvar(llvmLifetimeEnd); - auto llvmVaStartArgument = LambdaFst_->add_ctxvar(llvmVaStart); - auto llvmVaEndArgument = LambdaFst_->add_ctxvar(llvmVaEnd); + auto iOStateArgument = LambdaFst_->GetFunctionArguments()[2]; + auto memoryStateArgument = LambdaFst_->GetFunctionArguments()[3]; + auto llvmLifetimeStartArgument = LambdaFst_->AddContextVar(*llvmLifetimeStart).inner; + auto llvmLifetimeEndArgument = LambdaFst_->AddContextVar(*llvmLifetimeEnd).inner; + auto llvmVaStartArgument = LambdaFst_->AddContextVar(*llvmVaStart).inner; + auto llvmVaEndArgument = LambdaFst_->AddContextVar(*llvmVaEnd).inner; auto one = jlm::rvsdg::create_bitconstant(LambdaFst_->subregion(), 32, 1); auto twentyFour = jlm::rvsdg::create_bitconstant(LambdaFst_->subregion(), 64, 24); auto fortyOne = jlm::rvsdg::create_bitconstant(LambdaFst_->subregion(), 32, 41); auto allocaResults = alloca_op::create(arrayType, one, 16); - auto memoryState = MemoryStateMergeOperation::Create({ allocaResults[1], memoryStateArgument }); + auto memoryState = MemoryStateMergeOperation::Create( + std::vector{ allocaResults[1], memoryStateArgument }); AllocaNode_ = rvsdg::output::GetNode(*allocaResults[0]); auto & callLLvmLifetimeStart = CallNode::CreateNode( @@ -3969,9 +3995,9 @@ VariadicFunctionTest2::SetupRvsdg() // Setup function g() { LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); - auto iOStateArgument = LambdaG_->fctargument(0); - auto memoryStateArgument = LambdaG_->fctargument(1); - auto lambdaFstArgument = LambdaG_->add_ctxvar(LambdaFst_->output()); + auto iOStateArgument = LambdaG_->GetFunctionArguments()[0]; + auto memoryStateArgument = LambdaG_->GetFunctionArguments()[1]; + auto lambdaFstArgument = LambdaG_->AddContextVar(*LambdaFst_->output()).inner; auto zero = jlm::rvsdg::create_bitconstant(LambdaG_->subregion(), 32, 0); auto one = jlm::rvsdg::create_bitconstant(LambdaG_->subregion(), 32, 1); diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index 0cc55821a..0bc3339fb 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -2122,7 +2122,7 @@ class AllMemoryNodesTest final : public RvsdgTest return *Lambda_; } - [[nodiscard]] const jlm::llvm::lambda::output & + [[nodiscard]] const rvsdg::output & GetLambdaOutput() const noexcept { JLM_ASSERT(Lambda_); @@ -2279,7 +2279,7 @@ class EscapingLocalFunctionTest final : public RvsdgTest return *LocalFuncRegister_; } - [[nodiscard]] const jlm::rvsdg::RegionArgument & + [[nodiscard]] const jlm::rvsdg::output & GetLocalFunctionParam() const noexcept { JLM_ASSERT(LocalFuncParam_); @@ -2306,7 +2306,7 @@ class EscapingLocalFunctionTest final : public RvsdgTest jlm::llvm::delta::node * Global_ = {}; jlm::llvm::lambda::node * LocalFunc_ = {}; - jlm::rvsdg::RegionArgument * LocalFuncParam_ = {}; + jlm::rvsdg::output * LocalFuncParam_ = {}; jlm::rvsdg::output * LocalFuncRegister_ = {}; jlm::rvsdg::node * LocalFuncParamAllocaNode_ = {}; jlm::llvm::lambda::node * ExportedFunc_ = {}; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp index 93808a2cd..94cbbc141 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp @@ -31,7 +31,7 @@ TestDeadLoopNode() loop_node::create(lambdaNode->subregion()); - lambdaNode->finalize({ lambdaNode->fctargument(1) }); + lambdaNode->finalize({ lambdaNode->GetFunctionArguments()[1] }); // Act EliminateDeadNodes(rvsdgModule); @@ -60,8 +60,8 @@ TestDeadLoopNodeOutput() "f", jlm::llvm::linkage::external_linkage); - auto p = lambdaNode->fctargument(0); - auto x = lambdaNode->fctargument(1); + auto p = lambdaNode->GetFunctionArguments()[0]; + auto x = lambdaNode->GetFunctionArguments()[1]; auto loopNode = loop_node::create(lambdaNode->subregion()); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index 75ea8087a..8b90435f3 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -41,16 +41,16 @@ TestTraceArgument() linkage::external_linkage); // Load followed by store - auto loadAddress = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(3); + auto loadAddress = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; auto loadOutput = LoadNonVolatileNode::Create( loadAddress, { memoryStateArgument }, jlm::llvm::PointerType::Create(), 32); - auto storeAddress = lambda->fctargument(1); - auto storeData = lambda->fctargument(2); + auto storeAddress = lambda->GetFunctionArguments()[1]; + auto storeData = lambda->GetFunctionArguments()[2]; auto storeOutput = StoreNonVolatileNode::Create(storeAddress, storeData, { loadOutput[1] }, 32); auto lambdaOutput = lambda->finalize({ storeOutput[0] }); @@ -98,8 +98,8 @@ TestLoad() linkage::external_linkage); // Single load - auto loadAddress = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto loadAddress = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto loadOutput = LoadNonVolatileNode::Create( loadAddress, { memoryStateArgument }, @@ -182,9 +182,9 @@ TestLoadStore() linkage::external_linkage); // Load followed by store - auto loadAddress = lambda->fctargument(0); - auto storeData = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto loadAddress = lambda->GetFunctionArguments()[0]; + auto storeData = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto loadOutput = LoadNonVolatileNode::Create( loadAddress, { memoryStateArgument }, @@ -274,9 +274,9 @@ TestThetaLoad() auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); auto thetaRegion = theta->subregion(); // Predicate - auto idv = theta->add_loopvar(lambda->fctargument(0)); - auto lvs = theta->add_loopvar(lambda->fctargument(1)); - auto lve = theta->add_loopvar(lambda->fctargument(2)); + auto idv = theta->add_loopvar(lambda->GetFunctionArguments()[0]); + auto lvs = theta->add_loopvar(lambda->GetFunctionArguments()[1]); + auto lve = theta->add_loopvar(lambda->GetFunctionArguments()[2]); jlm::rvsdg::bitult_op ult(32); jlm::rvsdg::bitsgt_op sgt(32); jlm::rvsdg::bitadd_op add(32); @@ -292,8 +292,8 @@ TestThetaLoad() theta->set_predicate(match); // Load node - auto loadAddress = theta->add_loopvar(lambda->fctargument(3)); - auto memoryStateArgument = theta->add_loopvar(lambda->fctargument(4)); + auto loadAddress = theta->add_loopvar(lambda->GetFunctionArguments()[3]); + auto memoryStateArgument = theta->add_loopvar(lambda->GetFunctionArguments()[4]); auto loadOutput = LoadNonVolatileNode::Create( loadAddress->argument(), { memoryStateArgument->argument() }, diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 6a5752413..23e8a7fad 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -33,11 +33,11 @@ TestFork() auto loop = hls::loop_node::create(lambda->subregion()); auto subregion = loop->subregion(); rvsdg::output * idvBuffer; - loop->add_loopvar(lambda->fctargument(0), &idvBuffer); + loop->add_loopvar(lambda->GetFunctionArguments()[0], &idvBuffer); rvsdg::output * lvsBuffer; - loop->add_loopvar(lambda->fctargument(1), &lvsBuffer); + loop->add_loopvar(lambda->GetFunctionArguments()[1], &lvsBuffer); rvsdg::output * lveBuffer; - loop->add_loopvar(lambda->fctargument(2), &lveBuffer); + loop->add_loopvar(lambda->GetFunctionArguments()[2], &lveBuffer); auto arm = rvsdg::simple_node::create_normalized(subregion, add, { idvBuffer, lvsBuffer })[0]; auto cmp = rvsdg::simple_node::create_normalized(subregion, ult, { arm, lveBuffer })[0]; @@ -102,7 +102,7 @@ TestConstantFork() auto loop = hls::loop_node::create(lambdaRegion); auto subregion = loop->subregion(); rvsdg::output * idvBuffer; - loop->add_loopvar(lambda->fctargument(0), &idvBuffer); + loop->add_loopvar(lambda->GetFunctionArguments()[0], &idvBuffer); auto bitConstant1 = rvsdg::create_bitconstant(subregion, 32, 1); auto arm = rvsdg::simple_node::create_normalized(subregion, add, { idvBuffer, bitConstant1 })[0]; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index 0d693d4d4..6207a6969 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -28,10 +28,10 @@ TestWithMatch() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->fctargument(0)); + auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->GetFunctionArguments()[0]); auto gamma = jlm::rvsdg::GammaNode::create(match, 2); - auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); - auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); + auto ev1 = gamma->add_entryvar(lambda->GetFunctionArguments()[1]); + auto ev2 = gamma->add_entryvar(lambda->GetFunctionArguments()[2]); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); auto f = lambda->finalize({ ex }); @@ -65,9 +65,9 @@ TestWithoutMatch() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); - auto gamma = jlm::rvsdg::GammaNode::create(lambda->fctargument(0), 2); - auto ev1 = gamma->add_entryvar(lambda->fctargument(1)); - auto ev2 = gamma->add_entryvar(lambda->fctargument(2)); + auto gamma = jlm::rvsdg::GammaNode::create(lambda->GetFunctionArguments()[0], 2); + auto ev1 = gamma->add_entryvar(lambda->GetFunctionArguments()[1]); + auto ev2 = gamma->add_entryvar(lambda->GetFunctionArguments()[2]); auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); auto f = lambda->finalize({ ex }); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index aa7bf06b2..201631fb2 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -32,9 +32,9 @@ TestUnknownBoundaries() auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); auto subregion = theta->subregion(); - auto idv = theta->add_loopvar(lambda->fctargument(0)); - auto lvs = theta->add_loopvar(lambda->fctargument(1)); - auto lve = theta->add_loopvar(lambda->fctargument(2)); + auto idv = theta->add_loopvar(lambda->GetFunctionArguments()[0]); + auto lvs = theta->add_loopvar(lambda->GetFunctionArguments()[1]); + auto lve = theta->add_loopvar(lambda->GetFunctionArguments()[2]); auto arm = jlm::rvsdg::simple_node::create_normalized( subregion, diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index af5602049..86c9a923f 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -138,10 +138,10 @@ TestLambda() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); - auto argument0 = lambdaNode->fctargument(0); - auto argument1 = lambdaNode->fctargument(1); - auto argument2 = lambdaNode->add_ctxvar(x); - auto argument3 = lambdaNode->add_ctxvar(x); + auto argument0 = lambdaNode->GetFunctionArguments()[0]; + auto argument1 = lambdaNode->GetFunctionArguments()[1]; + auto argument2 = lambdaNode->AddContextVar(*x).inner; + auto argument3 = lambdaNode->AddContextVar(*x).inner; auto result1 = jlm::tests::SimpleNode::Create(*lambdaNode->subregion(), { argument1 }, { valueType }) diff --git a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp index 76d0e3b06..d64edd77a 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp @@ -52,7 +52,7 @@ test() auto loop = hls::loop_node::create(lambda->subregion()); - auto loop_out = loop->add_loopvar(lambda->fctargument(1)); + auto loop_out = loop->add_loopvar(lambda->GetFunctionArguments()[1]); auto f = lambda->finalize({ loop_out }); jlm::llvm::GraphExport::Create(*f, ""); @@ -66,7 +66,7 @@ test() stringToFile(dhls2.run(rm), "/tmp/jlm_hls_test_after.dot"); // The whole loop gets eliminated, leading to a direct connection - assert(lambda->fctresult(0)->origin() == lambda->fctargument(1)); + assert(lambda->GetFunctionResults()[0]->origin() == lambda->GetFunctionArguments()[1]); return 0; } diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp index c457f4469..68caecaab 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -38,10 +38,10 @@ GammaWithMatch() "lambdaOutput", linkage::external_linkage); - auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->fctargument(0)); + auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->GetFunctionArguments()[0]); auto gamma = jlm::rvsdg::GammaNode::create(match, 2); - auto gammaInput1 = gamma->add_entryvar(lambdaNode->fctargument(1)); - auto gammaInput2 = gamma->add_entryvar(lambdaNode->fctargument(2)); + auto gammaInput1 = gamma->add_entryvar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput2 = gamma->add_entryvar(lambdaNode->GetFunctionArguments()[2]); auto gammaOutput = gamma->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); @@ -92,9 +92,9 @@ GammaWithoutMatch() "lambdaOutput", linkage::external_linkage); - auto gammaNode = jlm::rvsdg::GammaNode::create(lambdaNode->fctargument(0), 2); - auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->fctargument(1)); - auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->fctargument(2)); + auto gammaNode = jlm::rvsdg::GammaNode::create(lambdaNode->GetFunctionArguments()[0], 2); + auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[2]); auto gammaOutput = gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); @@ -146,11 +146,12 @@ EmptyGammaWithThreeSubregions() "lambdaOutput", linkage::external_linkage); - auto match = jlm::rvsdg::match(32, { { 0, 0 }, { 1, 1 } }, 2, 3, lambdaNode->fctargument(0)); + auto match = + jlm::rvsdg::match(32, { { 0, 0 }, { 1, 1 } }, 2, 3, lambdaNode->GetFunctionArguments()[0]); auto gammaNode = jlm::rvsdg::GammaNode::create(match, 3); - auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->fctargument(1)); - auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->fctargument(2)); + auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[2]); auto gammaOutput = gammaNode->add_exitvar( { gammaInput1->argument(0), gammaInput1->argument(1), gammaInput2->argument(2) }); @@ -198,9 +199,9 @@ PartialEmptyGamma() "lambdaOutput", linkage::external_linkage); - auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->fctargument(0)); + auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->GetFunctionArguments()[0]); auto gammaNode = jlm::rvsdg::GammaNode::create(match, 2); - auto gammaInput = gammaNode->add_entryvar(lambdaNode->fctargument(1)); + auto gammaInput = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[1]); auto output = jlm::tests::create_testop( gammaNode->subregion(1), { gammaInput->argument(1) }, diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 0f2ea8883..571a5c5b6 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -118,14 +118,18 @@ TestCallTypeClassifierIndirectCall() auto SetupFunction = [&]() { auto lambda = lambda::node::create(graph->root(), fcttype2, "fct", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto one = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 1); auto alloca = alloca_op::create(PointerType::Create(), one, 8); - auto store = StoreNonVolatileNode::Create(alloca[0], lambda->fctargument(0), { alloca[1] }, 8); + auto store = StoreNonVolatileNode::Create( + alloca[0], + lambda->GetFunctionArguments()[0], + { alloca[1] }, + 8); auto load = LoadNonVolatileNode::Create(alloca[0], store, PointerType::Create(), 8); @@ -175,8 +179,8 @@ TestCallTypeClassifierNonRecursiveDirectCall() { auto lambda = lambda::node::create(graph->root(), functionTypeG, "g", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto constant = jlm::tests::test_op::create(lambda->subregion(), {}, { vt }); @@ -186,9 +190,9 @@ TestCallTypeClassifierNonRecursiveDirectCall() return lambdaOutput; }; - auto SetupFunctionF = [&](lambda::output * g) + auto SetupFunctionF = [&](jlm::rvsdg::output * g) { - auto SetupOuterTheta = [](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * functionG) + auto SetupOuterTheta = [](jlm::rvsdg::Region * region, jlm::rvsdg::output * functionG) { auto outerTheta = jlm::rvsdg::ThetaNode::create(region); auto otf = outerTheta->add_loopvar(functionG); @@ -216,9 +220,9 @@ TestCallTypeClassifierNonRecursiveDirectCall() { vt, iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); - auto functionGArgument = lambda->add_ctxvar(g); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto functionGArgument = lambda->AddContextVar(*g).inner; + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto functionG = SetupOuterTheta(lambda->subregion(), functionGArgument); @@ -271,18 +275,18 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() { auto lambda = lambda::node::create(graph->root(), functionTypeG, "g", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto c1 = jlm::tests::test_op::create(lambda->subregion(), {}, { vt }); return lambda->finalize({ c1->output(0), iOStateArgument, memoryStateArgument }); }; - auto SetupFunctionF = [&](lambda::output * g) + auto SetupFunctionF = [&](jlm::rvsdg::output * g) { auto SetupOuterTheta = [&](jlm::rvsdg::Region * region, - jlm::rvsdg::RegionArgument * g, + jlm::rvsdg::output * g, jlm::rvsdg::output * value, jlm::rvsdg::output * iOState, jlm::rvsdg::output * memoryState) @@ -329,9 +333,9 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() { vt, iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); - auto functionG = lambda->add_ctxvar(g); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto functionG = lambda->AddContextVar(*g).inner; + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto value = jlm::tests::test_op::create(lambda->subregion(), {}, { vt })->output(0); @@ -392,11 +396,11 @@ TestCallTypeClassifierRecursiveDirectCall() auto lambda = lambda::node::create(pb.subregion(), functionType, "fib", linkage::external_linkage); - auto valueArgument = lambda->fctargument(0); - auto pointerArgument = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); - auto ctxVarFib = lambda->add_ctxvar(fibrv->argument()); + auto valueArgument = lambda->GetFunctionArguments()[0]; + auto pointerArgument = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; + auto ctxVarFib = lambda->AddContextVar(*fibrv->argument()).inner; auto two = jlm::rvsdg::create_bitconstant(lambda->subregion(), 64, 2); auto bitult = jlm::rvsdg::bitult_op::create(64, valueArgument, two); diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index 4c0f76a91..05e83b0f3 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -27,13 +27,14 @@ TestArgumentIterators() functionType, "f", linkage::external_linkage); - lambda->finalize({ lambda->fctargument(0) }); + lambda->finalize({ lambda->GetFunctionArguments()[0] }); - std::vector functionArguments; - for (auto & argument : lambda->fctarguments()) - functionArguments.push_back(&argument); + std::vector functionArguments; + for (auto argument : lambda->GetFunctionArguments()) + functionArguments.push_back(argument); - assert(functionArguments.size() == 1 && functionArguments[0] == lambda->fctargument(0)); + assert( + functionArguments.size() == 1 && functionArguments[0] == lambda->GetFunctionArguments()[0]); } { @@ -49,7 +50,7 @@ TestArgumentIterators() lambda->finalize({ nullaryNode }); - assert(lambda->nfctarguments() == 0); + assert(lambda->GetFunctionArguments().empty()); } { @@ -63,18 +64,18 @@ TestArgumentIterators() "f", linkage::external_linkage); - auto cv = lambda->add_ctxvar(rvsdgImport); + auto cv = lambda->AddContextVar(*rvsdgImport).inner; - lambda->finalize({ lambda->fctargument(0), cv }); + lambda->finalize({ lambda->GetFunctionArguments()[0], cv }); - std::vector functionArguments; - for (auto & argument : lambda->fctarguments()) - functionArguments.push_back(&argument); + std::vector functionArguments; + for (auto argument : lambda->GetFunctionArguments()) + functionArguments.push_back(argument); assert(functionArguments.size() == 3); - assert(functionArguments[0] == lambda->fctargument(0)); - assert(functionArguments[1] == lambda->fctargument(1)); - assert(functionArguments[2] == lambda->fctargument(2)); + assert(functionArguments[0] == lambda->GetFunctionArguments()[0]); + assert(functionArguments[1] == lambda->GetFunctionArguments()[1]); + assert(functionArguments[2] == lambda->GetFunctionArguments()[2]); } } @@ -126,13 +127,13 @@ TestRemoveLambdaInputsWhere() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); - auto lambdaInput0 = lambdaNode->add_ctxvar(x)->input(); - auto lambdaInput1 = lambdaNode->add_ctxvar(x)->input(); - lambdaNode->add_ctxvar(x)->input(); + auto lambdaBinder0 = lambdaNode->AddContextVar(*x); + auto lambdaBinder1 = lambdaNode->AddContextVar(*x); + lambdaNode->AddContextVar(*x); auto result = jlm::tests::SimpleNode::Create( *lambdaNode->subregion(), - { lambdaInput1->argument() }, + { lambdaBinder1.inner }, { valueType }) .output(0); @@ -141,38 +142,38 @@ TestRemoveLambdaInputsWhere() // Act & Assert // Try to remove lambdaInput1 even though it is used auto numRemovedInputs = lambdaNode->RemoveLambdaInputsWhere( - [&](const lambda::cvinput & input) + [&](const jlm::rvsdg::input & input) { - return input.index() == lambdaInput1->index(); + return input.index() == lambdaBinder1.input->index(); }); assert(numRemovedInputs == 0); assert(lambdaNode->ninputs() == 3); - assert(lambdaNode->ncvarguments() == 3); + assert(lambdaNode->GetContextVars().size() == 3); // Remove lambdaInput2 numRemovedInputs = lambdaNode->RemoveLambdaInputsWhere( - [&](const lambda::cvinput & input) + [&](const jlm::rvsdg::input & input) { return input.index() == 2; }); assert(numRemovedInputs == 1); assert(lambdaNode->ninputs() == 2); - assert(lambdaNode->ncvarguments() == 2); - assert(lambdaNode->input(0) == lambdaInput0); - assert(lambdaNode->input(1) == lambdaInput1); + assert(lambdaNode->GetContextVars().size() == 2); + assert(lambdaNode->input(0) == lambdaBinder0.input); + assert(lambdaNode->input(1) == lambdaBinder1.input); // Remove lambdaInput0 numRemovedInputs = lambdaNode->RemoveLambdaInputsWhere( - [&](const lambda::cvinput & input) + [&](const jlm::rvsdg::input & input) { return input.index() == 0; }); assert(numRemovedInputs == 1); assert(lambdaNode->ninputs() == 1); - assert(lambdaNode->ncvarguments() == 1); - assert(lambdaNode->input(0) == lambdaInput1); - assert(lambdaInput1->index() == 0); - assert(lambdaInput1->argument()->index() == 0); + assert(lambdaNode->GetContextVars().size() == 1); + assert(lambdaNode->input(0) == lambdaBinder1.input); + assert(lambdaBinder1.input->index() == 0); + assert(lambdaBinder1.inner->index() == 0); } /** @@ -195,13 +196,13 @@ TestPruneLambdaInputs() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); - lambdaNode->add_ctxvar(x)->input(); - auto lambdaInput1 = lambdaNode->add_ctxvar(x)->input(); - lambdaNode->add_ctxvar(x)->input(); + lambdaNode->AddContextVar(*x); + auto lambdaInput1 = lambdaNode->AddContextVar(*x); + lambdaNode->AddContextVar(*x); auto result = jlm::tests::SimpleNode::Create( *lambdaNode->subregion(), - { lambdaInput1->argument() }, + { lambdaInput1.inner }, { valueType }) .output(0); @@ -213,11 +214,11 @@ TestPruneLambdaInputs() // Assert assert(numRemovedInputs == 2); assert(lambdaNode->ninputs() == 1); - assert(lambdaNode->ncvarguments() == 1); - assert(lambdaNode->input(0) == lambdaInput1); - assert(lambdaNode->cvargument(0) == lambdaInput1->argument()); - assert(lambdaInput1->index() == 0); - assert(lambdaInput1->argument()->index() == 0); + assert(lambdaNode->GetContextVars().size() == 1); + assert(lambdaNode->input(0) == lambdaInput1.input); + assert(lambdaNode->GetContextVars()[0].inner == lambdaInput1.inner); + assert(lambdaInput1.input->index() == 0); + assert(lambdaInput1.inner->index() == 0); } static void @@ -310,24 +311,24 @@ TestCallSummaryComputationDirectCalls() functionType, "x", jlm::llvm::linkage::external_linkage); - auto iOStateArgument = lambdaNode->fctargument(0); - auto memoryStateArgument = lambdaNode->fctargument(1); + auto iOStateArgument = lambdaNode->GetFunctionArguments()[0]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[1]; auto result = tests::create_testop(lambdaNode->subregion(), {}, { vt })[0]; return lambdaNode->finalize({ result, iOStateArgument, memoryStateArgument }); }; - auto SetupLambdaY = [&](jlm::llvm::lambda::output & lambdaX) + auto SetupLambdaY = [&](rvsdg::output & lambdaX) { auto lambdaNode = jlm::llvm::lambda::node::create( rvsdg.root(), functionType, "y", jlm::llvm::linkage::external_linkage); - auto iOStateArgument = lambdaNode->fctargument(0); - auto memoryStateArgument = lambdaNode->fctargument(1); - auto lambdaXCv = lambdaNode->add_ctxvar(&lambdaX); + auto iOStateArgument = lambdaNode->GetFunctionArguments()[0]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[1]; + auto lambdaXCv = lambdaNode->AddContextVar(lambdaX).inner; auto callResults = jlm::llvm::CallNode::Create( lambdaXCv, @@ -340,17 +341,17 @@ TestCallSummaryComputationDirectCalls() return lambdaOutput; }; - auto SetupLambdaZ = [&](jlm::llvm::lambda::output & lambdaX, jlm::llvm::lambda::output & lambdaY) + auto SetupLambdaZ = [&](rvsdg::output & lambdaX, rvsdg::output & lambdaY) { auto lambdaNode = jlm::llvm::lambda::node::create( rvsdg.root(), functionType, "y", jlm::llvm::linkage::external_linkage); - auto iOStateArgument = lambdaNode->fctargument(0); - auto memoryStateArgument = lambdaNode->fctargument(1); - auto lambdaXCv = lambdaNode->add_ctxvar(&lambdaX); - auto lambdaYCv = lambdaNode->add_ctxvar(&lambdaY); + auto iOStateArgument = lambdaNode->GetFunctionArguments()[0]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[1]; + auto lambdaXCv = lambdaNode->AddContextVar(lambdaX).inner; + auto lambdaYCv = lambdaNode->AddContextVar(lambdaY).inner; auto callXResults = jlm::llvm::CallNode::Create( lambdaXCv, @@ -375,9 +376,12 @@ TestCallSummaryComputationDirectCalls() auto lambdaZ = SetupLambdaZ(*lambdaX, *lambdaY); // Act - auto lambdaXCallSummary = lambdaX->node()->ComputeCallSummary(); - auto lambdaYCallSummary = lambdaY->node()->ComputeCallSummary(); - auto lambdaZCallSummary = lambdaZ->node()->ComputeCallSummary(); + auto lambdaXCallSummary = + rvsdg::AssertGetOwnerNode(*lambdaX).ComputeCallSummary(); + auto lambdaYCallSummary = + rvsdg::AssertGetOwnerNode(*lambdaY).ComputeCallSummary(); + auto lambdaZCallSummary = + rvsdg::AssertGetOwnerNode(*lambdaZ).ComputeCallSummary(); // Assert assert(lambdaXCallSummary->HasOnlyDirectCalls()); @@ -459,7 +463,7 @@ TestCallSummaryComputationFunctionPointerInDelta() auto lambdaNode = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); - lambdaNode->finalize({ lambdaNode->fctargument(0) }); + lambdaNode->finalize({ lambdaNode->GetFunctionArguments()[0] }); auto deltaNode = delta::node::Create( rvsdg->root(), @@ -499,11 +503,11 @@ TestCallSummaryComputationLambdaResult() auto lambdaNodeG = lambda::node::create(rvsdg.root(), functionTypeG, "g", linkage::external_linkage); - auto lambdaOutputG = lambdaNodeG->finalize({ lambdaNodeG->fctargument(0) }); + auto lambdaOutputG = lambdaNodeG->finalize({ lambdaNodeG->GetFunctionArguments()[0] }); auto lambdaNodeF = lambda::node::create(rvsdg.root(), functionTypeF, "f", linkage::external_linkage); - auto lambdaGArgument = lambdaNodeF->add_ctxvar(lambdaOutputG); + auto lambdaGArgument = lambdaNodeF->AddContextVar(*lambdaOutputG).inner; auto lambdaOutputF = lambdaNodeF->finalize({ lambdaGArgument }); GraphExport::Create(*lambdaOutputF, "f"); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index 24c790a00..5e334f608 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -32,8 +32,8 @@ TestPhiCreation() auto SetupEmptyLambda = [&](jlm::rvsdg::Region * region, const std::string & name) { auto lambda = lambda::node::create(region, f0type, name, linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; return lambda->finalize({ iOStateArgument, memoryStateArgument }); }; @@ -41,10 +41,10 @@ TestPhiCreation() auto SetupF2 = [&](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * f2) { auto lambda = lambda::node::create(region, f1type, "f2", linkage::external_linkage); - auto ctxVarF2 = lambda->add_ctxvar(f2); - auto valueArgument = lambda->fctargument(0); - auto iOStateArgument = lambda->fctargument(1); - auto memoryStateArgument = lambda->fctargument(2); + auto ctxVarF2 = lambda->AddContextVar(*f2).inner; + auto valueArgument = lambda->GetFunctionArguments()[0]; + auto iOStateArgument = lambda->GetFunctionArguments()[1]; + auto memoryStateArgument = lambda->GetFunctionArguments()[2]; auto callResults = CallNode::Create(ctxVarF2, f1type, { valueArgument, iOStateArgument, memoryStateArgument }); diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index 0710dbeae..f6eae7852 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -46,9 +46,9 @@ TestGamma() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "test", linkage::external_linkage); - auto c = lambdaNode->fctargument(0); - auto x = lambdaNode->fctargument(1); - auto y = lambdaNode->fctargument(2); + auto c = lambdaNode->GetFunctionArguments()[0]; + auto x = lambdaNode->GetFunctionArguments()[1]; + auto y = lambdaNode->GetFunctionArguments()[2]; auto gammaNode1 = jlm::rvsdg::GammaNode::create(c, 2); auto gammaInput1 = gammaNode1->add_entryvar(c); @@ -72,8 +72,8 @@ TestGamma() RunInvariantValueRedirection(*rvsdgModule); // Assert - assert(lambdaNode->fctresult(0)->origin() == x); - assert(lambdaNode->fctresult(1)->origin() == y); + assert(lambdaNode->GetFunctionResults()[0]->origin() == x); + assert(lambdaNode->GetFunctionResults()[1]->origin() == y); return 0; } @@ -99,9 +99,9 @@ TestTheta() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "test", linkage::external_linkage); - auto c = lambdaNode->fctargument(0); - auto x = lambdaNode->fctargument(1); - auto l = lambdaNode->fctargument(2); + auto c = lambdaNode->GetFunctionArguments()[0]; + auto x = lambdaNode->GetFunctionArguments()[1]; + auto l = lambdaNode->GetFunctionArguments()[2]; auto thetaNode1 = jlm::rvsdg::ThetaNode::create(lambdaNode->subregion()); auto thetaOutput1 = thetaNode1->add_loopvar(c); @@ -125,9 +125,9 @@ TestTheta() RunInvariantValueRedirection(*rvsdgModule); // Assert - assert(lambdaNode->fctresult(0)->origin() == c); - assert(lambdaNode->fctresult(1)->origin() == x); - assert(lambdaNode->fctresult(2)->origin() == thetaOutput3); + assert(lambdaNode->GetFunctionResults()[0]->origin() == c); + assert(lambdaNode->GetFunctionResults()[1]->origin() == x); + assert(lambdaNode->GetFunctionResults()[2]->origin() == thetaOutput3); return 0; } @@ -151,16 +151,16 @@ TestCall() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - lambda::output * lambdaOutputTest1; + jlm::rvsdg::output * lambdaOutputTest1; { auto lambdaNode = lambda::node::create(rvsdg.root(), functionTypeTest1, "test1", linkage::external_linkage); - auto controlArgument = lambdaNode->fctargument(0); - auto xArgument = lambdaNode->fctargument(1); - auto yArgument = lambdaNode->fctargument(2); - auto ioStateArgument = lambdaNode->fctargument(3); - auto memoryStateArgument = lambdaNode->fctargument(4); + auto controlArgument = lambdaNode->GetFunctionArguments()[0]; + auto xArgument = lambdaNode->GetFunctionArguments()[1]; + auto yArgument = lambdaNode->GetFunctionArguments()[2]; + auto ioStateArgument = lambdaNode->GetFunctionArguments()[3]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[4]; auto gammaNode = jlm::rvsdg::GammaNode::create(controlArgument, 2); auto gammaInputX = gammaNode->add_entryvar(xArgument); @@ -180,7 +180,7 @@ TestCall() { gammaOutputX, gammaOutputY, gammaOutputIOState, gammaOutputMemoryState }); } - lambda::output * lambdaOutputTest2; + jlm::rvsdg::output * lambdaOutputTest2; { auto functionType = FunctionType::Create( { valueType, valueType, ioStateType, memoryStateType }, @@ -188,11 +188,11 @@ TestCall() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "test2", linkage::external_linkage); - auto xArgument = lambdaNode->fctargument(0); - auto yArgument = lambdaNode->fctargument(1); - auto ioStateArgument = lambdaNode->fctargument(2); - auto memoryStateArgument = lambdaNode->fctargument(3); - auto lambdaArgumentTest1 = lambdaNode->add_ctxvar(lambdaOutputTest1); + auto xArgument = lambdaNode->GetFunctionArguments()[0]; + auto yArgument = lambdaNode->GetFunctionArguments()[1]; + auto ioStateArgument = lambdaNode->GetFunctionArguments()[2]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[3]; + auto lambdaArgumentTest1 = lambdaNode->AddContextVar(*lambdaOutputTest1).inner; auto controlResult = jlm::rvsdg::control_constant(lambdaNode->subregion(), 2, 0); @@ -209,12 +209,12 @@ TestCall() RunInvariantValueRedirection(*rvsdgModule); // Assert - auto lambdaNode = lambdaOutputTest2->node(); - assert(lambdaNode->nfctresults() == 4); - assert(lambdaNode->fctresult(0)->origin() == lambdaNode->fctargument(1)); - assert(lambdaNode->fctresult(1)->origin() == lambdaNode->fctargument(0)); - assert(lambdaNode->fctresult(2)->origin() == lambdaNode->fctargument(2)); - assert(lambdaNode->fctresult(3)->origin() == lambdaNode->fctargument(3)); + auto & lambdaNode = jlm::rvsdg::AssertGetOwnerNode(*lambdaOutputTest2); + assert(lambdaNode.GetFunctionResults().size() == 4); + assert(lambdaNode.GetFunctionResults()[0]->origin() == lambdaNode.GetFunctionArguments()[1]); + assert(lambdaNode.GetFunctionResults()[1]->origin() == lambdaNode.GetFunctionArguments()[0]); + assert(lambdaNode.GetFunctionResults()[2]->origin() == lambdaNode.GetFunctionArguments()[2]); + assert(lambdaNode.GetFunctionResults()[3]->origin() == lambdaNode.GetFunctionArguments()[3]); return 0; } @@ -238,15 +238,15 @@ TestCallWithMemoryStateNodes() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - lambda::output * lambdaOutputTest1; + jlm::rvsdg::output * lambdaOutputTest1; { auto lambdaNode = lambda::node::create(rvsdg.root(), functionTypeTest1, "test1", linkage::external_linkage); - auto controlArgument = lambdaNode->fctargument(0); - auto xArgument = lambdaNode->fctargument(1); - auto ioStateArgument = lambdaNode->fctargument(2); - auto memoryStateArgument = lambdaNode->fctargument(3); + auto controlArgument = lambdaNode->GetFunctionArguments()[0]; + auto xArgument = lambdaNode->GetFunctionArguments()[1]; + auto ioStateArgument = lambdaNode->GetFunctionArguments()[2]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[3]; auto lambdaEntrySplitResults = LambdaEntryMemoryStateSplitOperation::Create(*memoryStateArgument, 2); @@ -272,7 +272,7 @@ TestCallWithMemoryStateNodes() lambdaNode->finalize({ gammaOutputX, ioStateArgument, &lambdaExitMergeResult }); } - lambda::output * lambdaOutputTest2; + jlm::rvsdg::output * lambdaOutputTest2; { auto functionType = FunctionType::Create( { valueType, ioStateType, memoryStateType }, @@ -280,10 +280,10 @@ TestCallWithMemoryStateNodes() auto lambdaNode = lambda::node::create(rvsdg.root(), functionType, "test2", linkage::external_linkage); - auto xArgument = lambdaNode->fctargument(0); - auto ioStateArgument = lambdaNode->fctargument(1); - auto memoryStateArgument = lambdaNode->fctargument(2); - auto lambdaArgumentTest1 = lambdaNode->add_ctxvar(lambdaOutputTest1); + auto xArgument = lambdaNode->GetFunctionArguments()[0]; + auto ioStateArgument = lambdaNode->GetFunctionArguments()[1]; + auto memoryStateArgument = lambdaNode->GetFunctionArguments()[2]; + auto lambdaArgumentTest1 = lambdaNode->AddContextVar(*lambdaOutputTest1).inner; auto lambdaEntrySplitResults = LambdaEntryMemoryStateSplitOperation::Create(*memoryStateArgument, 2); @@ -314,13 +314,13 @@ TestCallWithMemoryStateNodes() RunInvariantValueRedirection(*rvsdgModule); // Assert - auto lambdaNode = lambdaOutputTest2->node(); - assert(lambdaNode->nfctresults() == 3); - assert(lambdaNode->fctresult(0)->origin() == lambdaNode->fctargument(0)); - assert(lambdaNode->fctresult(1)->origin() == lambdaNode->fctargument(1)); + auto & lambdaNode = jlm::rvsdg::AssertGetOwnerNode(*lambdaOutputTest2); + assert(lambdaNode.GetFunctionResults().size() == 3); + assert(lambdaNode.GetFunctionResults()[0]->origin() == lambdaNode.GetFunctionArguments()[0]); + assert(lambdaNode.GetFunctionResults()[1]->origin() == lambdaNode.GetFunctionArguments()[1]); - auto lambdaEntrySplit = lambda::node::GetMemoryStateEntrySplit(*lambdaNode); - auto lambdaExitMerge = lambda::node::GetMemoryStateExitMerge(*lambdaNode); + auto lambdaEntrySplit = lambda::node::GetMemoryStateEntrySplit(lambdaNode); + auto lambdaExitMerge = lambda::node::GetMemoryStateExitMerge(lambdaNode); assert(lambdaEntrySplit->noutputs() == 2); assert(lambdaExitMerge->ninputs() == 2); @@ -347,11 +347,11 @@ TestLambdaCallArgumentMismatch() auto & callNode = test.GetCall(); auto & lambdaNode = test.GetLambdaMain(); - assert(lambdaNode.nfctresults() == 3); - assert(lambdaNode.nfctresults() == callNode.NumResults()); - assert(lambdaNode.fctresult(0)->origin() == callNode.Result(0)); - assert(lambdaNode.fctresult(1)->origin() == callNode.Result(1)); - assert(lambdaNode.fctresult(2)->origin() == callNode.Result(2)); + assert(lambdaNode.GetFunctionResults().size() == 3); + assert(lambdaNode.GetFunctionResults().size() == callNode.NumResults()); + assert(lambdaNode.GetFunctionResults()[0]->origin() == callNode.Result(0)); + assert(lambdaNode.GetFunctionResults()[1]->origin() == callNode.Result(1)); + assert(lambdaNode.GetFunctionResults()[2]->origin() == callNode.Result(2)); return 0; } diff --git a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp index 87b2d6e01..83e386703 100644 --- a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp +++ b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp @@ -39,7 +39,7 @@ PrintRvsdgTree() functionType, "f", linkage::external_linkage); - auto lambdaOutput = lambda->finalize({ lambda->fctargument(0) }); + auto lambdaOutput = lambda->finalize({ lambda->GetFunctionArguments()[0] }); jlm::tests::GraphExport::Create(*lambdaOutput, "f"); auto tempDirectory = std::filesystem::temp_directory_path(); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index bd7d92f8f..2e7062053 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -258,11 +258,14 @@ TestLambda() "f", linkage::external_linkage); - auto cv1 = lambda->add_ctxvar(x); - auto cv2 = lambda->add_ctxvar(y); - jlm::tests::create_testop(lambda->subregion(), { lambda->fctargument(0), cv1 }, { vt }); + auto cv1 = lambda->AddContextVar(*x).inner; + auto cv2 = lambda->AddContextVar(*y).inner; + jlm::tests::create_testop( + lambda->subregion(), + { lambda->GetFunctionArguments()[0], cv1 }, + { vt }); - auto output = lambda->finalize({ lambda->fctargument(0), cv2 }); + auto output = lambda->finalize({ lambda->GetFunctionArguments()[0], cv2 }); GraphExport::Create(*output, "f"); @@ -293,12 +296,12 @@ TestPhi() [&](jlm::rvsdg::Region & region, phi::rvoutput & rv2, jlm::rvsdg::RegionArgument & dx) { auto lambda1 = lambda::node::create(®ion, functionType, "f1", linkage::external_linkage); - auto f2Argument = lambda1->add_ctxvar(rv2.argument()); - auto xArgument = lambda1->add_ctxvar(&dx); + auto f2Argument = lambda1->AddContextVar(*rv2.argument()).inner; + auto xArgument = lambda1->AddContextVar(dx).inner; auto result = jlm::tests::SimpleNode::Create( *lambda1->subregion(), - { lambda1->fctargument(0), f2Argument, xArgument }, + { lambda1->GetFunctionArguments()[0], f2Argument, xArgument }, { valueType }) .output(0); @@ -309,12 +312,12 @@ TestPhi() [&](jlm::rvsdg::Region & region, phi::rvoutput & rv1, jlm::rvsdg::RegionArgument & dy) { auto lambda2 = lambda::node::create(®ion, functionType, "f2", linkage::external_linkage); - auto f1Argument = lambda2->add_ctxvar(rv1.argument()); - lambda2->add_ctxvar(&dy); + auto f1Argument = lambda2->AddContextVar(*rv1.argument()).inner; + lambda2->AddContextVar(dy); auto result = jlm::tests::SimpleNode::Create( *lambda2->subregion(), - { lambda2->fctargument(0), f1Argument }, + { lambda2->GetFunctionArguments()[0], f1Argument }, { valueType }) .output(0); @@ -324,11 +327,11 @@ TestPhi() auto setupF3 = [&](jlm::rvsdg::Region & region, jlm::rvsdg::RegionArgument & dz) { auto lambda3 = lambda::node::create(®ion, functionType, "f3", linkage::external_linkage); - auto zArgument = lambda3->add_ctxvar(&dz); + auto zArgument = lambda3->AddContextVar(dz).inner; auto result = jlm::tests::SimpleNode::Create( *lambda3->subregion(), - { lambda3->fctargument(0), zArgument }, + { lambda3->GetFunctionArguments()[0], zArgument }, { valueType }) .output(0); @@ -338,7 +341,7 @@ TestPhi() auto setupF4 = [&](jlm::rvsdg::Region & region) { auto lambda = lambda::node::create(®ion, functionType, "f4", linkage::external_linkage); - return lambda->finalize({ lambda->fctargument(0) }); + return lambda->finalize({ lambda->GetFunctionArguments()[0] }); }; phi::builder phiBuilder; diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index 1980d0381..5957ea797 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -161,7 +161,7 @@ TestLoad1() auto & lambda = ptg->GetLambdaNode(*test.lambda); auto & lambdaOutput = ptg->GetRegisterNode(*test.lambda->output()); - auto & lambdaArgument0 = ptg->GetRegisterNode(*test.lambda->fctargument(0)); + auto & lambdaArgument0 = ptg->GetRegisterNode(*test.lambda->GetFunctionArguments()[0]); assert(TargetsExactly(loadResult, { &lambda, &ptg->GetExternalMemoryNode() })); @@ -267,7 +267,7 @@ TestBitCast() auto & lambda = ptg->GetLambdaNode(*test.lambda); auto & lambdaOut = ptg->GetRegisterNode(*test.lambda->output()); - auto & lambdaArg = ptg->GetRegisterNode(*test.lambda->fctargument(0)); + auto & lambdaArg = ptg->GetRegisterNode(*test.lambda->GetFunctionArguments()[0]); auto & bitCast = ptg->GetRegisterNode(*test.bitCast->output(0)); assert(TargetsExactly(lambdaOut, { &lambda })); @@ -291,7 +291,7 @@ TestConstantPointerNull() auto & lambda = ptg->GetLambdaNode(*test.lambda); auto & lambdaOut = ptg->GetRegisterNode(*test.lambda->output()); - auto & lambdaArg = ptg->GetRegisterNode(*test.lambda->fctargument(0)); + auto & lambdaArg = ptg->GetRegisterNode(*test.lambda->GetFunctionArguments()[0]); auto & constantPointerNull = ptg->GetRegisterNode(*test.constantPointerNullNode->output(0)); @@ -357,14 +357,14 @@ TestCall1() auto & plambda_g = ptg->GetRegisterNode(*test.lambda_g->output()); auto & plambda_h = ptg->GetRegisterNode(*test.lambda_h->output()); - auto & lambda_f_arg0 = ptg->GetRegisterNode(*test.lambda_f->fctargument(0)); - auto & lambda_f_arg1 = ptg->GetRegisterNode(*test.lambda_f->fctargument(1)); + auto & lambda_f_arg0 = ptg->GetRegisterNode(*test.lambda_f->GetFunctionArguments()[0]); + auto & lambda_f_arg1 = ptg->GetRegisterNode(*test.lambda_f->GetFunctionArguments()[1]); - auto & lambda_g_arg0 = ptg->GetRegisterNode(*test.lambda_g->fctargument(0)); - auto & lambda_g_arg1 = ptg->GetRegisterNode(*test.lambda_g->fctargument(1)); + auto & lambda_g_arg0 = ptg->GetRegisterNode(*test.lambda_g->GetFunctionArguments()[0]); + auto & lambda_g_arg1 = ptg->GetRegisterNode(*test.lambda_g->GetFunctionArguments()[1]); - auto & lambda_h_cv0 = ptg->GetRegisterNode(*test.lambda_h->cvargument(0)); - auto & lambda_h_cv1 = ptg->GetRegisterNode(*test.lambda_h->cvargument(1)); + auto & lambda_h_cv0 = ptg->GetRegisterNode(*test.lambda_h->GetContextVars()[0].inner); + auto & lambda_h_cv1 = ptg->GetRegisterNode(*test.lambda_h->GetContextVars()[1].inner); assert(TargetsExactly(palloca_x, { &alloca_x })); assert(TargetsExactly(palloca_y, { &alloca_y })); @@ -405,12 +405,12 @@ TestCall2() auto & lambda_destroy = ptg->GetLambdaNode(*test.lambda_destroy); auto & lambda_destroy_out = ptg->GetRegisterNode(*test.lambda_destroy->output()); - auto & lambda_destroy_arg = ptg->GetRegisterNode(*test.lambda_destroy->fctargument(0)); + auto & lambda_destroy_arg = ptg->GetRegisterNode(*test.lambda_destroy->GetFunctionArguments()[0]); auto & lambda_test = ptg->GetLambdaNode(*test.lambda_test); auto & lambda_test_out = ptg->GetRegisterNode(*test.lambda_test->output()); - auto & lambda_test_cv1 = ptg->GetRegisterNode(*test.lambda_test->cvargument(0)); - auto & lambda_test_cv2 = ptg->GetRegisterNode(*test.lambda_test->cvargument(1)); + auto & lambda_test_cv1 = ptg->GetRegisterNode(*test.lambda_test->GetContextVars()[0].inner); + auto & lambda_test_cv2 = ptg->GetRegisterNode(*test.lambda_test->GetContextVars()[1].inner); auto & call_create1_out = ptg->GetRegisterNode(*test.CallCreate1().output(0)); auto & call_create2_out = ptg->GetRegisterNode(*test.CallCreate2().output(0)); @@ -456,13 +456,14 @@ TestIndirectCall1() auto & lambda_indcall = ptg->GetLambdaNode(test.GetLambdaIndcall()); auto & lambda_indcall_out = ptg->GetRegisterNode(*test.GetLambdaIndcall().output()); - auto & lambda_indcall_arg = ptg->GetRegisterNode(*test.GetLambdaIndcall().fctargument(0)); + auto & lambda_indcall_arg = + ptg->GetRegisterNode(*test.GetLambdaIndcall().GetFunctionArguments()[0]); auto & lambda_test = ptg->GetLambdaNode(test.GetLambdaTest()); auto & lambda_test_out = ptg->GetRegisterNode(*test.GetLambdaTest().output()); - auto & lambda_test_cv0 = ptg->GetRegisterNode(*test.GetLambdaTest().cvargument(0)); - auto & lambda_test_cv1 = ptg->GetRegisterNode(*test.GetLambdaTest().cvargument(1)); - auto & lambda_test_cv2 = ptg->GetRegisterNode(*test.GetLambdaTest().cvargument(2)); + auto & lambda_test_cv0 = ptg->GetRegisterNode(*test.GetLambdaTest().GetContextVars()[0].inner); + auto & lambda_test_cv1 = ptg->GetRegisterNode(*test.GetLambdaTest().GetContextVars()[1].inner); + auto & lambda_test_cv2 = ptg->GetRegisterNode(*test.GetLambdaTest().GetContextVars()[2].inner); assert(TargetsExactly(lambda_three_out, { &lambda_three })); @@ -522,8 +523,8 @@ TestExternalCall1() assert(ptg->NumMappedRegisters() == 10); auto & lambdaF = ptg->GetLambdaNode(test.LambdaF()); - auto & lambdaFArgument0 = ptg->GetRegisterNode(*test.LambdaF().fctargument(0)); - auto & lambdaFArgument1 = ptg->GetRegisterNode(*test.LambdaF().fctargument(1)); + auto & lambdaFArgument0 = ptg->GetRegisterNode(*test.LambdaF().GetFunctionArguments()[0]); + auto & lambdaFArgument1 = ptg->GetRegisterNode(*test.LambdaF().GetFunctionArguments()[1]); auto & importG = ptg->GetImportNode(test.ExternalGArgument()); auto & callResult = ptg->GetRegisterNode(*test.CallG().Result(0)); @@ -553,7 +554,7 @@ TestGamma() for (size_t n = 1; n < 5; n++) { - auto & lambdaArgument = ptg->GetRegisterNode(*test.lambda->fctargument(n)); + auto & lambdaArgument = ptg->GetRegisterNode(*test.lambda->GetFunctionArguments()[n]); assert(TargetsExactly(lambdaArgument, { &lambda, &ptg->GetExternalMemoryNode() })); } @@ -588,7 +589,7 @@ TestTheta() assert(ptg->NumMappedRegisters() == 5); auto & lambda = ptg->GetLambdaNode(*test.lambda); - auto & lambdaArgument1 = ptg->GetRegisterNode(*test.lambda->fctargument(1)); + auto & lambdaArgument1 = ptg->GetRegisterNode(*test.lambda->GetFunctionArguments()[1]); auto & lambdaOutput = ptg->GetRegisterNode(*test.lambda->output()); auto & gepOutput = ptg->GetRegisterNode(*test.gep->output(0)); @@ -625,12 +626,12 @@ TestDelta1() auto & lambda_g = ptg->GetLambdaNode(*test.lambda_g); auto & plambda_g = ptg->GetRegisterNode(*test.lambda_g->output()); - auto & lambda_g_arg0 = ptg->GetRegisterNode(*test.lambda_g->fctargument(0)); + auto & lambda_g_arg0 = ptg->GetRegisterNode(*test.lambda_g->GetFunctionArguments()[0]); auto & lambda_h = ptg->GetLambdaNode(*test.lambda_h); auto & plambda_h = ptg->GetRegisterNode(*test.lambda_h->output()); - auto & lambda_h_cv0 = ptg->GetRegisterNode(*test.lambda_h->cvargument(0)); - auto & lambda_h_cv1 = ptg->GetRegisterNode(*test.lambda_h->cvargument(1)); + auto & lambda_h_cv0 = ptg->GetRegisterNode(*test.lambda_h->GetContextVars()[0].inner); + auto & lambda_h_cv1 = ptg->GetRegisterNode(*test.lambda_h->GetContextVars()[1].inner); assert(TargetsExactly(pdelta_f, { &delta_f })); @@ -666,13 +667,13 @@ TestDelta2() auto & lambda_f1 = ptg->GetLambdaNode(*test.lambda_f1); auto & lambda_f1_out = ptg->GetRegisterNode(*test.lambda_f1->output()); - auto & lambda_f1_cvd1 = ptg->GetRegisterNode(*test.lambda_f1->cvargument(0)); + auto & lambda_f1_cvd1 = ptg->GetRegisterNode(*test.lambda_f1->GetContextVars()[0].inner); auto & lambda_f2 = ptg->GetLambdaNode(*test.lambda_f2); auto & lambda_f2_out = ptg->GetRegisterNode(*test.lambda_f2->output()); - auto & lambda_f2_cvd1 = ptg->GetRegisterNode(*test.lambda_f2->cvargument(0)); - auto & lambda_f2_cvd2 = ptg->GetRegisterNode(*test.lambda_f2->cvargument(1)); - auto & lambda_f2_cvf1 = ptg->GetRegisterNode(*test.lambda_f2->cvargument(2)); + auto & lambda_f2_cvd1 = ptg->GetRegisterNode(*test.lambda_f2->GetContextVars()[0].inner); + auto & lambda_f2_cvd2 = ptg->GetRegisterNode(*test.lambda_f2->GetContextVars()[1].inner); + auto & lambda_f2_cvf1 = ptg->GetRegisterNode(*test.lambda_f2->GetContextVars()[2].inner); assert(TargetsExactly(delta_d1_out, { &delta_d1 })); assert(TargetsExactly(delta_d2_out, { &delta_d2 })); @@ -709,13 +710,13 @@ TestImports() auto & lambda_f1 = ptg->GetLambdaNode(*test.lambda_f1); auto & lambda_f1_out = ptg->GetRegisterNode(*test.lambda_f1->output()); - auto & lambda_f1_cvd1 = ptg->GetRegisterNode(*test.lambda_f1->cvargument(0)); + auto & lambda_f1_cvd1 = ptg->GetRegisterNode(*test.lambda_f1->GetContextVars()[0].inner); auto & lambda_f2 = ptg->GetLambdaNode(*test.lambda_f2); auto & lambda_f2_out = ptg->GetRegisterNode(*test.lambda_f2->output()); - auto & lambda_f2_cvd1 = ptg->GetRegisterNode(*test.lambda_f2->cvargument(0)); - auto & lambda_f2_cvd2 = ptg->GetRegisterNode(*test.lambda_f2->cvargument(1)); - auto & lambda_f2_cvf1 = ptg->GetRegisterNode(*test.lambda_f2->cvargument(2)); + auto & lambda_f2_cvd1 = ptg->GetRegisterNode(*test.lambda_f2->GetContextVars()[0].inner); + auto & lambda_f2_cvd2 = ptg->GetRegisterNode(*test.lambda_f2->GetContextVars()[1].inner); + auto & lambda_f2_cvf1 = ptg->GetRegisterNode(*test.lambda_f2->GetContextVars()[2].inner); assert(TargetsExactly(import_d1, { &d1 })); assert(TargetsExactly(import_d2, { &d2 })); @@ -746,7 +747,7 @@ TestPhi1() auto & lambda_fib = ptg->GetLambdaNode(*test.lambda_fib); auto & lambda_fib_out = ptg->GetRegisterNode(*test.lambda_fib->output()); - auto & lambda_fib_arg1 = ptg->GetRegisterNode(*test.lambda_fib->fctargument(1)); + auto & lambda_fib_arg1 = ptg->GetRegisterNode(*test.lambda_fib->GetFunctionArguments()[1]); auto & lambda_test = ptg->GetLambdaNode(*test.lambda_test); auto & lambda_test_out = ptg->GetRegisterNode(*test.lambda_test->output()); @@ -789,8 +790,8 @@ TestExternalMemory() assert(ptg->NumMappedRegisters() == 3); auto & lambdaF = ptg->GetLambdaNode(*test.LambdaF); - auto & lambdaFArgument0 = ptg->GetRegisterNode(*test.LambdaF->fctargument(0)); - auto & lambdaFArgument1 = ptg->GetRegisterNode(*test.LambdaF->fctargument(1)); + auto & lambdaFArgument0 = ptg->GetRegisterNode(*test.LambdaF->GetFunctionArguments()[0]); + auto & lambdaFArgument1 = ptg->GetRegisterNode(*test.LambdaF->GetFunctionArguments()[1]); assert(TargetsExactly(lambdaFArgument0, { &lambdaF, &ptg->GetExternalMemoryNode() })); assert(TargetsExactly(lambdaFArgument1, { &lambdaF, &ptg->GetExternalMemoryNode() })); @@ -813,8 +814,8 @@ TestEscapedMemory1() assert(ptg->NumLambdaNodes() == 1); assert(ptg->NumMappedRegisters() == 10); - auto & lambdaTestArgument0 = ptg->GetRegisterNode(*test.LambdaTest->fctargument(0)); - auto & lambdaTestCv0 = ptg->GetRegisterNode(*test.LambdaTest->cvargument(0)); + auto & lambdaTestArgument0 = ptg->GetRegisterNode(*test.LambdaTest->GetFunctionArguments()[0]); + auto & lambdaTestCv0 = ptg->GetRegisterNode(*test.LambdaTest->GetContextVars()[0].inner); auto & loadNode1Output = ptg->GetRegisterNode(*test.LoadNode1->output(0)); auto deltaA = &ptg->GetDeltaNode(*test.DeltaA); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 62edbbeb7..1e28a5e0c 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -79,7 +79,8 @@ ValidateStoreTest1SteensgaardAgnostic(const jlm::tests::StoreTest1 & test) assert(test.lambda->subregion()->nnodes() == 10); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 6, 1)); assert(test.alloca_d->output(1)->nusers() == 1); @@ -112,7 +113,8 @@ ValidateStoreTest1SteensgaardRegionAware(const jlm::tests::StoreTest1 & test) assert(test.lambda->subregion()->nnodes() == 9); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 4, 1)); assert(test.alloca_d->output(1)->nusers() == 1); @@ -145,7 +147,8 @@ ValidateStoreTest1SteensgaardAgnosticTopDown(const jlm::tests::StoreTest1 & test assert(test.lambda->subregion()->nnodes() == 2); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -160,7 +163,8 @@ ValidateStoreTest2SteensgaardAgnostic(const jlm::tests::StoreTest2 & test) assert(test.lambda->subregion()->nnodes() == 12); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 7, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -200,7 +204,8 @@ ValidateStoreTest2SteensgaardRegionAware(const jlm::tests::StoreTest2 & test) assert(test.lambda->subregion()->nnodes() == 11); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 5, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -240,7 +245,8 @@ ValidateStoreTest2SteensgaardAgnosticTopDown(const jlm::tests::StoreTest2 & test assert(test.lambda->subregion()->nnodes() == 2); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -255,20 +261,22 @@ ValidateLoadTest1SteensgaardAgnostic(const jlm::tests::LoadTest1 & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); auto loadX = jlm::rvsdg::output::GetNode(*loadA->input(0)->origin()); assert(is(*loadA, 3, 3)); assert(jlm::rvsdg::output::GetNode(*loadA->input(1)->origin()) == loadX); assert(is(*loadX, 3, 3)); - assert(loadX->input(0)->origin() == test.lambda->fctargument(0)); + assert(loadX->input(0)->origin() == test.lambda->GetFunctionArguments()[0]); assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); } @@ -279,20 +287,22 @@ ValidateLoadTest1SteensgaardRegionAware(const jlm::tests::LoadTest1 & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); auto loadX = jlm::rvsdg::output::GetNode(*loadA->input(0)->origin()); assert(is(*loadA, 3, 3)); assert(jlm::rvsdg::output::GetNode(*loadA->input(1)->origin()) == loadX); assert(is(*loadX, 3, 3)); - assert(loadX->input(0)->origin() == test.lambda->fctargument(0)); + assert(loadX->input(0)->origin() == test.lambda->GetFunctionArguments()[0]); assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); } @@ -303,20 +313,22 @@ ValidateLoadTest1SteensgaardAgnosticTopDown(const jlm::tests::LoadTest1 & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto loadA = jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); auto loadX = jlm::rvsdg::output::GetNode(*loadA->input(0)->origin()); assert(is(*loadA, 3, 3)); assert(jlm::rvsdg::output::GetNode(*loadA->input(1)->origin()) == loadX); assert(is(*loadX, 3, 3)); - assert(loadX->input(0)->origin() == test.lambda->fctargument(0)); + assert(loadX->input(0)->origin() == test.lambda->GetFunctionArguments()[0]); assert(jlm::rvsdg::output::GetNode(*loadX->input(1)->origin()) == lambdaEntrySplit); } @@ -327,7 +339,8 @@ ValidateLoadTest2SteensgaardAgnostic(const jlm::tests::LoadTest2 & test) assert(test.lambda->subregion()->nnodes() == 14); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 7, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -377,7 +390,8 @@ ValidateLoadTest2SteensgaardRegionAware(const jlm::tests::LoadTest2 & test) assert(test.lambda->subregion()->nnodes() == 13); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 5, 1)); assert(test.alloca_a->output(1)->nusers() == 1); @@ -424,7 +438,8 @@ ValidateLoadTest2SteensgaardAgnosticTopDown(const jlm::tests::LoadTest2 & test) assert(test.lambda->subregion()->nnodes() == 2); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -439,13 +454,15 @@ ValidateLoadFromUndefSteensgaardAgnostic(const jlm::tests::LoadFromUndefTest & t assert(test.Lambda().subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.Lambda().GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto load = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.Lambda().GetFunctionResults()[0]->origin()); assert(is(*load, 1, 1)); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.Lambda().fctargument(0)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.Lambda().GetFunctionArguments()[0]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); } @@ -456,10 +473,11 @@ ValidateLoadFromUndefSteensgaardRegionAware(const jlm::tests::LoadFromUndefTest assert(test.Lambda().subregion()->nnodes() == 3); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.Lambda().GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 0, 1)); - auto load = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.Lambda().GetFunctionResults()[0]->origin()); assert(is(*load, 1, 1)); } @@ -470,13 +488,15 @@ ValidateLoadFromUndefSteensgaardAgnosticTopDown(const jlm::tests::LoadFromUndefT assert(test.Lambda().subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.Lambda().GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto load = jlm::rvsdg::output::GetNode(*test.Lambda().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.Lambda().GetFunctionResults()[0]->origin()); assert(is(*load, 1, 1)); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.Lambda().fctargument(0)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.Lambda().GetFunctionArguments()[0]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); } @@ -487,10 +507,12 @@ ValidateCallTest1SteensgaardAgnostic(const jlm::tests::CallTest1 & test) /* validate f */ { - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f->fctresult(2)->origin()); - auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(0)->begin()); - auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[3]->begin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_f->GetFunctionResults()[2]->origin()); + auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[0]->begin()); + auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[1]->begin()); assert(is(*lambdaExitMerge, 7, 1)); assert(is(*lambdaEntrySplit, 1, 7)); @@ -504,10 +526,12 @@ ValidateCallTest1SteensgaardAgnostic(const jlm::tests::CallTest1 & test) /* validate g */ { - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_g->fctresult(2)->origin()); - auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); - auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[3]->begin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_g->GetFunctionResults()[2]->origin()); + auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[0]->begin()); + auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[1]->begin()); assert(is(*lambdaExitMerge, 7, 1)); assert(is(*lambdaEntrySplit, 1, 7)); @@ -542,10 +566,12 @@ ValidateCallTest1SteensgaardRegionAware(const jlm::tests::CallTest1 & test) /* validate f */ { - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f->fctresult(2)->origin()); - auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(0)->begin()); - auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[3]->begin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_f->GetFunctionResults()[2]->origin()); + auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[0]->begin()); + auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[1]->begin()); assert(is(*lambdaExitMerge, 2, 1)); assert(is(*lambdaEntrySplit, 1, 2)); @@ -559,10 +585,12 @@ ValidateCallTest1SteensgaardRegionAware(const jlm::tests::CallTest1 & test) /* validate g */ { - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_g->fctresult(2)->origin()); - auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); - auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[3]->begin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_g->GetFunctionResults()[2]->origin()); + auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[0]->begin()); + auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[1]->begin()); assert(is(*lambdaExitMerge, 1, 1)); assert(is(*lambdaEntrySplit, 1, 1)); @@ -597,10 +625,12 @@ ValidateCallTest1SteensgaardAgnosticTopDown(const jlm::tests::CallTest1 & test) // validate function f { - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f->fctresult(2)->origin()); - auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(0)->begin()); - auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[3]->begin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_f->GetFunctionResults()[2]->origin()); + auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[0]->begin()); + auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_f->GetFunctionArguments()[1]->begin()); assert(is(*lambdaExitMerge, 7, 1)); assert(is(*lambdaEntrySplit, 1, 7)); @@ -614,10 +644,12 @@ ValidateCallTest1SteensgaardAgnosticTopDown(const jlm::tests::CallTest1 & test) // validate function g { - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(3)->begin()); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_g->fctresult(2)->origin()); - auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); - auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[3]->begin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_g->GetFunctionResults()[2]->origin()); + auto loadX = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[0]->begin()); + auto loadY = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[1]->begin()); assert(is(*lambdaExitMerge, 7, 1)); assert(is(*lambdaEntrySplit, 1, 7)); @@ -753,7 +785,7 @@ ValidateIndirectCallTest1SteensgaardAgnostic(const jlm::tests::IndirectCallTest1 assert(test.GetLambdaIndcall().subregion()->nnodes() == 5); auto lambda_exit_mux = - jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().GetFunctionResults()[2]->origin()); assert(is(*lambda_exit_mux, 5, 1)); auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); @@ -774,7 +806,7 @@ ValidateIndirectCallTest1SteensgaardAgnostic(const jlm::tests::IndirectCallTest1 assert(test.GetLambdaTest().subregion()->nnodes() == 9); auto lambda_exit_mux = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().GetFunctionResults()[2]->origin()); assert(is(*lambda_exit_mux, 5, 1)); auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); @@ -810,7 +842,7 @@ ValidateIndirectCallTest1SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaIndcall().subregion()->nnodes() == 5); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 1, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -831,7 +863,7 @@ ValidateIndirectCallTest1SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaTest().subregion()->nnodes() == 9); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 1, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -867,7 +899,7 @@ ValidateIndirectCallTest1SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaIndcall().subregion()->nnodes() == 5); auto lambda_exit_mux = - jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaIndcall().GetFunctionResults()[2]->origin()); assert(is(*lambda_exit_mux, 5, 1)); auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); @@ -888,7 +920,7 @@ ValidateIndirectCallTest1SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaTest().subregion()->nnodes() == 9); auto lambda_exit_mux = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().GetFunctionResults()[2]->origin()); assert(is(*lambda_exit_mux, 5, 1)); auto call_exit_mux = jlm::rvsdg::output::GetNode(*lambda_exit_mux->input(0)->origin()); @@ -924,11 +956,11 @@ ValidateIndirectCallTest2SteensgaardAgnostic(const jlm::tests::IndirectCallTest2 assert(test.GetLambdaThree().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaThree().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaThree().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaThree().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaThree().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 13)); } @@ -937,11 +969,11 @@ ValidateIndirectCallTest2SteensgaardAgnostic(const jlm::tests::IndirectCallTest2 assert(test.GetLambdaFour().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaFour().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaFour().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaFour().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaFour().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 13)); } @@ -949,7 +981,8 @@ ValidateIndirectCallTest2SteensgaardAgnostic(const jlm::tests::IndirectCallTest2 { assert(test.GetLambdaI().subregion()->nnodes() == 5); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaI().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaI().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -973,7 +1006,7 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaThree().subregion()->nnodes() == 2); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaThree().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaThree().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 0, 1)); } @@ -982,7 +1015,7 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaFour().subregion()->nnodes() == 2); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaFour().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaFour().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 0, 1)); } @@ -990,7 +1023,8 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe { assert(test.GetLambdaI().subregion()->nnodes() == 5); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaI().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaI().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 6, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1007,7 +1041,8 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe { assert(test.GetLambdaX().subregion()->nnodes() == 7); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaX().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaX().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 6, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1043,7 +1078,8 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe { assert(test.GetLambdaY().subregion()->nnodes() == 7); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaY().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaY().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 6, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1080,17 +1116,19 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaTest().subregion()->nnodes() == 16); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 6, 1)); - auto loadG1 = jlm::rvsdg::input::GetNode(**test.GetLambdaTest().cvargument(2)->begin()); + auto loadG1 = + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetContextVars()[2].inner->begin()); assert(is(*loadG1, 2, 2)); - auto loadG2 = jlm::rvsdg::input::GetNode(**test.GetLambdaTest().cvargument(3)->begin()); + auto loadG2 = + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetContextVars()[3].inner->begin()); assert(is(*loadG2, 2, 2)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaTest().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 6)); } @@ -1099,11 +1137,11 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe assert(test.GetLambdaTest2().subregion()->nnodes() == 7); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest2().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest2().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 6, 1)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaTest().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 6)); } } @@ -1118,11 +1156,11 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaThree().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaThree().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaThree().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaThree().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaThree().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 13)); } @@ -1131,11 +1169,11 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaFour().subregion()->nnodes() == 3); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaFour().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaFour().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaFour().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaFour().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 13)); } @@ -1143,7 +1181,8 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa { assert(test.GetLambdaI().subregion()->nnodes() == 5); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaI().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaI().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1160,7 +1199,8 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa { assert(test.GetLambdaX().subregion()->nnodes() == 7); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaX().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaX().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 13, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1196,7 +1236,8 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa { assert(test.GetLambdaY().subregion()->nnodes() == 8); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.GetLambdaY().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.GetLambdaY().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 12, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1240,10 +1281,11 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaTest().subregion()->nnodes() == 17); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 10, 1)); - auto loadG1 = jlm::rvsdg::input::GetNode(**test.GetLambdaTest().cvargument(2)->begin()); + auto loadG1 = + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetContextVars()[2].inner->begin()); assert(is(*loadG1, 2, 2)); auto callXEntryMerge = jlm::rvsdg::output::GetNode(*test.GetTestCallX().input(3)->origin()); @@ -1265,11 +1307,12 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(undefNode->output(0)->nusers() == 1); assert(jlm::rvsdg::input::GetNode(**undefNode->output(0)->begin()) == callXEntryMerge); - auto loadG2 = jlm::rvsdg::input::GetNode(**test.GetLambdaTest().cvargument(3)->begin()); + auto loadG2 = + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetContextVars()[3].inner->begin()); assert(is(*loadG2, 2, 2)); auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaTest().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaTest().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 10)); } @@ -1278,7 +1321,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa assert(test.GetLambdaTest2().subregion()->nnodes() == 8); auto lambdaExitMerge = - jlm::rvsdg::output::GetNode(*test.GetLambdaTest2().fctresult(2)->origin()); + jlm::rvsdg::output::GetNode(*test.GetLambdaTest2().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 10, 1)); auto callXEntryMerge = jlm::rvsdg::output::GetNode(*test.GetTest2CallX().input(3)->origin()); @@ -1304,7 +1347,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa } auto lambdaEntrySplit = - jlm::rvsdg::input::GetNode(**test.GetLambdaTest2().fctargument(1)->begin()); + jlm::rvsdg::input::GetNode(**test.GetLambdaTest2().GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 10)); } } @@ -1314,7 +1357,8 @@ ValidateGammaTestSteensgaardAgnostic(const jlm::tests::GammaTest & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto loadTmp2 = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1332,7 +1376,8 @@ ValidateGammaTestSteensgaardRegionAware(const jlm::tests::GammaTest & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto loadTmp2 = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1350,7 +1395,8 @@ ValidateGammaTestSteensgaardAgnosticTopDown(const jlm::tests::GammaTest & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto loadTmp2 = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1370,7 +1416,8 @@ ValidateThetaTestSteensgaardAgnostic(const jlm::tests::ThetaTest & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambda_exit_mux = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambda_exit_mux = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambda_exit_mux, 2, 1)); auto thetaOutput = @@ -1394,7 +1441,8 @@ ValidateThetaTestSteensgaardRegionAware(const jlm::tests::ThetaTest & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 2, 1)); auto thetaOutput = @@ -1418,7 +1466,8 @@ ValidateThetaTestSteensgaardAgnosticTopDown(const jlm::tests::ThetaTest & test) assert(test.lambda->subregion()->nnodes() == 4); - auto lambda_exit_mux = jlm::rvsdg::output::GetNode(*test.lambda->fctresult(0)->origin()); + auto lambda_exit_mux = + jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambda_exit_mux, 2, 1)); auto thetaOutput = @@ -1442,7 +1491,8 @@ ValidateDeltaTest1SteensgaardAgnostic(const jlm::tests::DeltaTest1 & test) assert(test.lambda_h->subregion()->nnodes() == 7); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_h->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_h->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 4)); auto storeF = jlm::rvsdg::input::GetNode(**test.constantFive->output(0)->begin()); @@ -1451,7 +1501,7 @@ ValidateDeltaTest1SteensgaardAgnostic(const jlm::tests::DeltaTest1 & test) auto deltaStateIndex = storeF->input(2)->origin()->index(); - auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); + auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[0]->begin()); assert(is(*loadF, 2, 2)); assert(loadF->input(1)->origin()->index() == deltaStateIndex); } @@ -1463,7 +1513,8 @@ ValidateDeltaTest1SteensgaardRegionAware(const jlm::tests::DeltaTest1 & test) assert(test.lambda_h->subregion()->nnodes() == 7); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_h->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_h->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 1)); auto storeF = jlm::rvsdg::input::GetNode(**test.constantFive->output(0)->begin()); @@ -1472,7 +1523,7 @@ ValidateDeltaTest1SteensgaardRegionAware(const jlm::tests::DeltaTest1 & test) auto deltaStateIndex = storeF->input(2)->origin()->index(); - auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); + auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[0]->begin()); assert(is(*loadF, 2, 2)); assert(loadF->input(1)->origin()->index() == deltaStateIndex); } @@ -1484,14 +1535,15 @@ ValidateDeltaTest1SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest1 & test assert(test.lambda_h->subregion()->nnodes() == 7); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_h->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_h->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 4)); auto storeF = jlm::rvsdg::input::GetNode(**test.constantFive->output(0)->begin()); assert(is(*storeF, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeF->input(2)->origin()) == lambdaEntrySplit); - auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->fctargument(0)->begin()); + auto loadF = jlm::rvsdg::input::GetNode(**test.lambda_g->GetFunctionArguments()[0]->begin()); assert(is(*loadF, 2, 2)); } @@ -1502,21 +1554,25 @@ ValidateDeltaTest2SteensgaardAgnostic(const jlm::tests::DeltaTest2 & test) assert(test.lambda_f2->subregion()->nnodes() == 9); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f2->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 5)); - auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); + auto storeD1InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); auto d1StateIndex = storeD1InF2->input(2)->origin()->index(); - auto storeD1InF1 = jlm::rvsdg::input::GetNode(**test.lambda_f1->cvargument(0)->begin()); + auto storeD1InF1 = + jlm::rvsdg::input::GetNode(**test.lambda_f1->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF1, 3, 1)); assert(d1StateIndex == storeD1InF1->input(2)->origin()->index()); - auto storeD2InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); + auto storeD2InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[1].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(d1StateIndex != storeD2InF2->input(2)->origin()->index()); @@ -1531,7 +1587,8 @@ ValidateDeltaTest2SteensgaardRegionAware(const jlm::tests::DeltaTest2 & test) { assert(test.lambda_f1->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f1->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_f1->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 1, 1)); auto storeNode = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1545,14 +1602,15 @@ ValidateDeltaTest2SteensgaardRegionAware(const jlm::tests::DeltaTest2 & test) { assert(test.lambda_f2->subregion()->nnodes() == 9); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f2->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto storeD1 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); + auto storeD1 = jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[0].inner->begin()); assert(is(*storeD1, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD1->input(2)->origin()) == lambdaEntrySplit); - auto storeD2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); + auto storeD2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[1].inner->begin()); assert(is(*storeD2, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD2->input(2)->origin()) == lambdaEntrySplit); @@ -1577,19 +1635,23 @@ ValidateDeltaTest2SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest2 & test assert(test.lambda_f2->subregion()->nnodes() == 9); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f2->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 5)); - auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); + auto storeD1InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); auto d1StateIndex = storeD1InF2->input(2)->origin()->index(); - auto storeD1InF1 = jlm::rvsdg::input::GetNode(**test.lambda_f1->cvargument(0)->begin()); + auto storeD1InF1 = + jlm::rvsdg::input::GetNode(**test.lambda_f1->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF1, 3, 1)); - auto storeD2InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); + auto storeD2InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[1].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(d1StateIndex != storeD2InF2->input(2)->origin()->index()); @@ -1604,10 +1666,11 @@ ValidateDeltaTest3SteensgaardAgnostic(const jlm::tests::DeltaTest3 & test) { assert(test.LambdaF().subregion()->nnodes() == 6); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); + auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[0]->origin()); assert(is(*truncNode, 1, 1)); auto loadG1Node = jlm::rvsdg::output::GetNode(*truncNode->input(0)->origin()); @@ -1646,10 +1709,11 @@ ValidateDeltaTest3SteensgaardRegionAware(const jlm::tests::DeltaTest3 & test) { assert(test.LambdaF().subregion()->nnodes() == 6); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); + auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[0]->origin()); assert(is(*truncNode, 1, 1)); auto loadG1Node = jlm::rvsdg::output::GetNode(*truncNode->input(0)->origin()); @@ -1688,10 +1752,11 @@ ValidateDeltaTest3SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest3 & test { assert(test.LambdaF().subregion()->nnodes() == 6); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); + auto truncNode = jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[0]->origin()); assert(is(*truncNode, 1, 1)); auto loadG1Node = jlm::rvsdg::output::GetNode(*truncNode->input(0)->origin()); @@ -1728,21 +1793,25 @@ ValidateImportTestSteensgaardAgnostic(const jlm::tests::ImportTest & test) assert(test.lambda_f2->subregion()->nnodes() == 9); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f2->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 5)); - auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); + auto storeD1InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); auto d1StateIndex = storeD1InF2->input(2)->origin()->index(); - auto storeD1InF1 = jlm::rvsdg::input::GetNode(**test.lambda_f1->cvargument(0)->begin()); + auto storeD1InF1 = + jlm::rvsdg::input::GetNode(**test.lambda_f1->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF1, 3, 1)); assert(d1StateIndex == storeD1InF1->input(2)->origin()->index()); - auto storeD2InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); + auto storeD2InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[1].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(d1StateIndex != storeD2InF2->input(2)->origin()->index()); @@ -1757,7 +1826,8 @@ ValidateImportTestSteensgaardRegionAware(const jlm::tests::ImportTest & test) { assert(test.lambda_f1->subregion()->nnodes() == 4); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_f1->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_f1->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 1, 1)); auto storeNode = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1771,14 +1841,15 @@ ValidateImportTestSteensgaardRegionAware(const jlm::tests::ImportTest & test) { assert(test.lambda_f2->subregion()->nnodes() == 9); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f2->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 2)); - auto storeD1 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); + auto storeD1 = jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[0].inner->begin()); assert(is(*storeD1, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD1->input(2)->origin()) == lambdaEntrySplit); - auto storeD2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); + auto storeD2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[1].inner->begin()); assert(is(*storeD2, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD2->input(2)->origin()) == lambdaEntrySplit); @@ -1803,23 +1874,27 @@ ValidateImportTestSteensgaardAgnosticTopDown(const jlm::tests::ImportTest & test assert(test.lambda_f2->subregion()->nnodes() == 9); - auto lambdaEntrySplit = jlm::rvsdg::input::GetNode(**test.lambda_f2->fctargument(1)->begin()); + auto lambdaEntrySplit = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetFunctionArguments()[1]->begin()); assert(is(*lambdaEntrySplit, 1, 5)); - auto storeD1InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(0)->begin()); + auto storeD1InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(jlm::rvsdg::output::GetNode(*storeD1InF2->input(2)->origin()) == lambdaEntrySplit); assert(storeD1InF2->output(0)->nusers() == 1); auto d1StateIndexEntry = (*storeD1InF2->output(0)->begin())->index(); - auto storeD1InF1 = jlm::rvsdg::input::GetNode(**test.lambda_f1->cvargument(0)->begin()); + auto storeD1InF1 = + jlm::rvsdg::input::GetNode(**test.lambda_f1->GetContextVars()[0].inner->begin()); assert(is(*storeD1InF1, 3, 1)); assert(d1StateIndexEntry == storeD1InF1->input(2)->origin()->index()); assert(storeD1InF1->output(0)->nusers() == 1); auto d1StateIndexExit = (*storeD1InF1->output(0)->begin())->index(); - auto storeD2InF2 = jlm::rvsdg::input::GetNode(**test.lambda_f2->cvargument(1)->begin()); + auto storeD2InF2 = + jlm::rvsdg::input::GetNode(**test.lambda_f2->GetContextVars()[1].inner->begin()); assert(is(*storeD1InF2, 3, 1)); assert(d1StateIndexExit != storeD2InF2->input(2)->origin()->index()); @@ -1832,7 +1907,8 @@ ValidatePhiTestSteensgaardAgnostic(const jlm::tests::PhiTest1 & test) auto arrayStateIndex = (*test.alloca->output(1)->begin())->index(); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_fib->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_fib->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 4, 1)); auto store = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(arrayStateIndex)->origin()); @@ -1860,7 +1936,8 @@ ValidatePhiTestSteensgaardRegionAware(const jlm::tests::PhiTest1 & test) auto arrayStateIndex = (*test.alloca->output(1)->begin())->index(); - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_fib->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_fib->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 1, 1)); auto store = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(arrayStateIndex)->origin()); @@ -1886,7 +1963,8 @@ ValidatePhiTestSteensgaardAgnosticTopDown(const jlm::tests::PhiTest1 & test) { using namespace jlm::llvm; - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.lambda_fib->fctresult(1)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.lambda_fib->GetFunctionResults()[1]->origin()); assert(is(*lambdaExitMerge, 4, 1)); const StoreNonVolatileNode * storeNode = nullptr; @@ -1930,10 +2008,11 @@ ValidateMemcpySteensgaardAgnostic(const jlm::tests::MemcpyTest & test) * Validate function f */ { - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[0]->origin()); assert(is(*load, 3, 3)); auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); @@ -1947,7 +2026,8 @@ ValidateMemcpySteensgaardAgnostic(const jlm::tests::MemcpyTest & test) * Validate function g */ { - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaG().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaG().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 5, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -1983,10 +2063,11 @@ ValidateMemcpySteensgaardRegionAware(const jlm::tests::MemcpyTest & test) * Validate function f */ { - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[0]->origin()); assert(is(*load, 3, 3)); auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); @@ -2000,7 +2081,7 @@ ValidateMemcpySteensgaardRegionAware(const jlm::tests::MemcpyTest & test) * Validate function g */ { - auto callNode = jlm::rvsdg::input::GetNode(**test.LambdaG().cvargument(2)->begin()); + auto callNode = jlm::rvsdg::input::GetNode(**test.LambdaG().GetContextVars()[2].inner->begin()); assert(is(*callNode, 3, 3)); auto callEntryMerge = jlm::rvsdg::output::GetNode(*callNode->input(2)->origin()); @@ -2028,10 +2109,11 @@ ValidateMemcpyTestSteensgaardAgnosticTopDown(const jlm::tests::MemcpyTest & test // Validate function f { - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 5, 1)); - auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().fctresult(0)->origin()); + auto load = jlm::rvsdg::output::GetNode(*test.LambdaF().GetFunctionResults()[0]->origin()); assert(is(*load, 3, 3)); auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); @@ -2043,7 +2125,8 @@ ValidateMemcpyTestSteensgaardAgnosticTopDown(const jlm::tests::MemcpyTest & test // Validate function g { - auto lambdaExitMerge = jlm::rvsdg::output::GetNode(*test.LambdaG().fctresult(2)->origin()); + auto lambdaExitMerge = + jlm::rvsdg::output::GetNode(*test.LambdaG().GetFunctionResults()[2]->origin()); assert(is(*lambdaExitMerge, 5, 1)); auto callExitSplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); @@ -2080,7 +2163,7 @@ ValidateFreeNullTestSteensgaardAgnostic(const jlm::tests::FreeNullTest & test) jlm::rvsdg::output::GetNode(*test.LambdaMain().GetMemoryStateRegionResult().origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto free = jlm::rvsdg::output::GetNode(*test.LambdaMain().fctresult(0)->origin()); + auto free = jlm::rvsdg::output::GetNode(*test.LambdaMain().GetFunctionResults()[0]->origin()); assert(is(*free, 2, 1)); auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*lambdaExitMerge->input(0)->origin()); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 79319e385..716a08fe1 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -472,7 +472,7 @@ TestEscapedFunctionConstraint() const auto & localFunction = rvsdg.GetLocalFunction(); const auto & localFunctionRegister = rvsdg.GetLocalFunctionRegister(); const auto & exportedFunction = rvsdg.GetExportedFunction(); - const auto & exportedFunctionReturn = *exportedFunction.fctresult(0)->origin(); + const auto & exportedFunctionReturn = *exportedFunction.GetFunctionResults()[0]->origin(); PointerObjectSet set; const auto localFunctionPO = set.CreateFunctionMemoryObject(localFunction); @@ -632,8 +632,10 @@ TestFunctionCallConstraint() PointerObjectSet set; const auto lambdaF = set.CreateFunctionMemoryObject(*rvsdg.lambda_f); const auto lambdaFRegister = set.CreateRegisterPointerObject(*rvsdg.lambda_f->output()); - const auto lambdaFArgumentX = set.CreateRegisterPointerObject(*rvsdg.lambda_f->fctargument(0)); - const auto lambdaFArgumentY = set.CreateRegisterPointerObject(*rvsdg.lambda_f->fctargument(1)); + const auto lambdaFArgumentX = + set.CreateRegisterPointerObject(*rvsdg.lambda_f->GetFunctionArguments()[0]); + const auto lambdaFArgumentY = + set.CreateRegisterPointerObject(*rvsdg.lambda_f->GetFunctionArguments()[1]); const auto allocaX = set.CreateAllocaMemoryObject(*rvsdg.alloca_x, true); const auto allocaY = set.CreateAllocaMemoryObject(*rvsdg.alloca_y, true); const auto allocaXRegister = set.CreateRegisterPointerObject(*rvsdg.alloca_x->output(0)); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp index 8a8530211..58e447574 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp @@ -153,7 +153,7 @@ TestLoad1() auto & lambda = pointsToGraph.GetLambdaNode(*test.lambda); auto & lambdaOutput = pointsToGraph.GetRegisterNode(*test.lambda->output()); - auto & lambdaArgument0 = pointsToGraph.GetRegisterNode(*test.lambda->fctargument(0)); + auto & lambdaArgument0 = pointsToGraph.GetRegisterNode(*test.lambda->GetFunctionArguments()[0]); assertTargets(loadResult, { &lambda, &pointsToGraph.GetExternalMemoryNode() }); @@ -284,7 +284,7 @@ TestBitCast() auto & lambda = pointsToGraph.GetLambdaNode(*test.lambda); auto & lambdaOut = pointsToGraph.GetRegisterNode(*test.lambda->output()); - auto & lambdaArg = pointsToGraph.GetRegisterNode(*test.lambda->fctargument(0)); + auto & lambdaArg = pointsToGraph.GetRegisterNode(*test.lambda->GetFunctionArguments()[0]); auto & bitCast = pointsToGraph.GetRegisterNode(*test.bitCast->output(0)); @@ -316,7 +316,7 @@ TestConstantPointerNull() auto & lambda = pointsToGraph.GetLambdaNode(*test.lambda); auto & lambdaOut = pointsToGraph.GetRegisterNode(*test.lambda->output()); - auto & lambdaArg = pointsToGraph.GetRegisterNode(*test.lambda->fctargument(0)); + auto & lambdaArg = pointsToGraph.GetRegisterNode(*test.lambda->GetFunctionArguments()[0]); auto & externalMemoryNode = pointsToGraph.GetExternalMemoryNode(); auto & constantPointerNull = @@ -402,14 +402,14 @@ TestCall1() auto & plambda_g = ptg.GetRegisterNode(*test.lambda_g->output()); auto & plambda_h = ptg.GetRegisterNode(*test.lambda_h->output()); - auto & lambda_f_arg0 = ptg.GetRegisterNode(*test.lambda_f->fctargument(0)); - auto & lambda_f_arg1 = ptg.GetRegisterNode(*test.lambda_f->fctargument(1)); + auto & lambda_f_arg0 = ptg.GetRegisterNode(*test.lambda_f->GetFunctionArguments()[0]); + auto & lambda_f_arg1 = ptg.GetRegisterNode(*test.lambda_f->GetFunctionArguments()[1]); - auto & lambda_g_arg0 = ptg.GetRegisterNode(*test.lambda_g->fctargument(0)); - auto & lambda_g_arg1 = ptg.GetRegisterNode(*test.lambda_g->fctargument(1)); + auto & lambda_g_arg0 = ptg.GetRegisterNode(*test.lambda_g->GetFunctionArguments()[0]); + auto & lambda_g_arg1 = ptg.GetRegisterNode(*test.lambda_g->GetFunctionArguments()[1]); - auto & lambda_h_cv0 = ptg.GetRegisterNode(*test.lambda_h->cvargument(0)); - auto & lambda_h_cv1 = ptg.GetRegisterNode(*test.lambda_h->cvargument(1)); + auto & lambda_h_cv0 = ptg.GetRegisterNode(*test.lambda_h->GetContextVars()[0].inner); + auto & lambda_h_cv1 = ptg.GetRegisterNode(*test.lambda_h->GetContextVars()[1].inner); assertTargets(palloca_x, { &alloca_x }); assertTargets(palloca_y, { &alloca_y }); @@ -457,12 +457,13 @@ TestCall2() auto & lambda_destroy = ptg.GetLambdaNode(*test.lambda_destroy); auto & lambda_destroy_out = ptg.GetRegisterNode(*test.lambda_destroy->output()); - auto & lambda_destroy_arg = ptg.GetRegisterNode(*test.lambda_destroy->fctargument(0)); + auto & lambda_destroy_arg = + ptg.GetRegisterNode(*test.lambda_destroy->GetFunctionArguments()[0]); auto & lambda_test = ptg.GetLambdaNode(*test.lambda_test); auto & lambda_test_out = ptg.GetRegisterNode(*test.lambda_test->output()); - auto & lambda_test_cv1 = ptg.GetRegisterNode(*test.lambda_test->cvargument(0)); - auto & lambda_test_cv2 = ptg.GetRegisterNode(*test.lambda_test->cvargument(1)); + auto & lambda_test_cv1 = ptg.GetRegisterNode(*test.lambda_test->GetContextVars()[0].inner); + auto & lambda_test_cv2 = ptg.GetRegisterNode(*test.lambda_test->GetContextVars()[1].inner); auto & call_create1_out = ptg.GetRegisterNode(*test.CallCreate1().output(0)); auto & call_create2_out = ptg.GetRegisterNode(*test.CallCreate2().output(0)); @@ -515,13 +516,14 @@ TestIndirectCall() auto & lambda_indcall = ptg.GetLambdaNode(test.GetLambdaIndcall()); auto & lambda_indcall_out = ptg.GetRegisterNode(*test.GetLambdaIndcall().output()); - auto & lambda_indcall_arg = ptg.GetRegisterNode(*test.GetLambdaIndcall().fctargument(0)); + auto & lambda_indcall_arg = + ptg.GetRegisterNode(*test.GetLambdaIndcall().GetFunctionArguments()[0]); auto & lambda_test = ptg.GetLambdaNode(test.GetLambdaTest()); auto & lambda_test_out = ptg.GetRegisterNode(*test.GetLambdaTest().output()); - auto & lambda_test_cv0 = ptg.GetRegisterNode(*test.GetLambdaTest().cvargument(0)); - auto & lambda_test_cv1 = ptg.GetRegisterNode(*test.GetLambdaTest().cvargument(1)); - auto & lambda_test_cv2 = ptg.GetRegisterNode(*test.GetLambdaTest().cvargument(2)); + auto & lambda_test_cv0 = ptg.GetRegisterNode(*test.GetLambdaTest().GetContextVars()[0].inner); + auto & lambda_test_cv1 = ptg.GetRegisterNode(*test.GetLambdaTest().GetContextVars()[1].inner); + auto & lambda_test_cv2 = ptg.GetRegisterNode(*test.GetLambdaTest().GetContextVars()[2].inner); assertTargets(lambda_three_out, { &lambda_three, &lambda_four }); @@ -591,8 +593,10 @@ TestExternalCall1() assert(pointsToGraph.NumRegisterNodes() == 7); auto & lambdaF = pointsToGraph.GetLambdaNode(test.LambdaF()); - auto & lambdaFArgument0 = pointsToGraph.GetRegisterNode(*test.LambdaF().fctargument(0)); - auto & lambdaFArgument1 = pointsToGraph.GetRegisterNode(*test.LambdaF().fctargument(1)); + auto & lambdaFArgument0 = + pointsToGraph.GetRegisterNode(*test.LambdaF().GetFunctionArguments()[0]); + auto & lambdaFArgument1 = + pointsToGraph.GetRegisterNode(*test.LambdaF().GetFunctionArguments()[1]); auto & callResult = pointsToGraph.GetRegisterNode(*test.CallG().Result(0)); @@ -649,7 +653,8 @@ TestGamma() for (size_t n = 1; n < 5; n++) { - auto & lambdaArgument = pointsToGraph.GetRegisterNode(*test.lambda->fctargument(n)); + auto & lambdaArgument = + pointsToGraph.GetRegisterNode(*test.lambda->GetFunctionArguments()[n]); assertTargets(lambdaArgument, { &lambda, &pointsToGraph.GetExternalMemoryNode() }); } @@ -691,7 +696,7 @@ TestTheta() assert(pointsToGraph.NumRegisterNodes() == 2); auto & lambda = pointsToGraph.GetLambdaNode(*test.lambda); - auto & lambdaArgument1 = pointsToGraph.GetRegisterNode(*test.lambda->fctargument(1)); + auto & lambdaArgument1 = pointsToGraph.GetRegisterNode(*test.lambda->GetFunctionArguments()[1]); auto & lambdaOutput = pointsToGraph.GetRegisterNode(*test.lambda->output()); auto & gepOutput = pointsToGraph.GetRegisterNode(*test.gep->output(0)); @@ -735,12 +740,12 @@ TestDelta1() auto & lambda_g = ptg.GetLambdaNode(*test.lambda_g); auto & plambda_g = ptg.GetRegisterNode(*test.lambda_g->output()); - auto & lambda_g_arg0 = ptg.GetRegisterNode(*test.lambda_g->fctargument(0)); + auto & lambda_g_arg0 = ptg.GetRegisterNode(*test.lambda_g->GetFunctionArguments()[0]); auto & lambda_h = ptg.GetLambdaNode(*test.lambda_h); auto & plambda_h = ptg.GetRegisterNode(*test.lambda_h->output()); - auto & lambda_h_cv0 = ptg.GetRegisterNode(*test.lambda_h->cvargument(0)); - auto & lambda_h_cv1 = ptg.GetRegisterNode(*test.lambda_h->cvargument(1)); + auto & lambda_h_cv0 = ptg.GetRegisterNode(*test.lambda_h->GetContextVars()[0].inner); + auto & lambda_h_cv1 = ptg.GetRegisterNode(*test.lambda_h->GetContextVars()[1].inner); assertTargets(pdelta_f, { &delta_f }); @@ -783,13 +788,13 @@ TestDelta2() auto & lambda_f1 = ptg.GetLambdaNode(*test.lambda_f1); auto & lambda_f1_out = ptg.GetRegisterNode(*test.lambda_f1->output()); - auto & lambda_f1_cvd1 = ptg.GetRegisterNode(*test.lambda_f1->cvargument(0)); + auto & lambda_f1_cvd1 = ptg.GetRegisterNode(*test.lambda_f1->GetContextVars()[0].inner); auto & lambda_f2 = ptg.GetLambdaNode(*test.lambda_f2); auto & lambda_f2_out = ptg.GetRegisterNode(*test.lambda_f2->output()); - auto & lambda_f2_cvd1 = ptg.GetRegisterNode(*test.lambda_f2->cvargument(0)); - auto & lambda_f2_cvd2 = ptg.GetRegisterNode(*test.lambda_f2->cvargument(1)); - auto & lambda_f2_cvf1 = ptg.GetRegisterNode(*test.lambda_f2->cvargument(2)); + auto & lambda_f2_cvd1 = ptg.GetRegisterNode(*test.lambda_f2->GetContextVars()[0].inner); + auto & lambda_f2_cvd2 = ptg.GetRegisterNode(*test.lambda_f2->GetContextVars()[1].inner); + auto & lambda_f2_cvf1 = ptg.GetRegisterNode(*test.lambda_f2->GetContextVars()[2].inner); assertTargets(delta_d1_out, { &delta_d1 }); assertTargets(delta_d2_out, { &delta_d2 }); @@ -833,13 +838,13 @@ TestImports() auto & lambda_f1 = ptg.GetLambdaNode(*test.lambda_f1); auto & lambda_f1_out = ptg.GetRegisterNode(*test.lambda_f1->output()); - auto & lambda_f1_cvd1 = ptg.GetRegisterNode(*test.lambda_f1->cvargument(0)); + auto & lambda_f1_cvd1 = ptg.GetRegisterNode(*test.lambda_f1->GetContextVars()[0].inner); auto & lambda_f2 = ptg.GetLambdaNode(*test.lambda_f2); auto & lambda_f2_out = ptg.GetRegisterNode(*test.lambda_f2->output()); - auto & lambda_f2_cvd1 = ptg.GetRegisterNode(*test.lambda_f2->cvargument(0)); - auto & lambda_f2_cvd2 = ptg.GetRegisterNode(*test.lambda_f2->cvargument(1)); - auto & lambda_f2_cvf1 = ptg.GetRegisterNode(*test.lambda_f2->cvargument(2)); + auto & lambda_f2_cvd1 = ptg.GetRegisterNode(*test.lambda_f2->GetContextVars()[0].inner); + auto & lambda_f2_cvd2 = ptg.GetRegisterNode(*test.lambda_f2->GetContextVars()[1].inner); + auto & lambda_f2_cvf1 = ptg.GetRegisterNode(*test.lambda_f2->GetContextVars()[2].inner); assertTargets(import_d1, { &d1 }); assertTargets(import_d2, { &d2 }); @@ -877,7 +882,7 @@ TestPhi1() auto & lambda_fib = ptg.GetLambdaNode(*test.lambda_fib); auto & lambda_fib_out = ptg.GetRegisterNode(*test.lambda_fib->output()); - auto & lambda_fib_arg1 = ptg.GetRegisterNode(*test.lambda_fib->fctargument(1)); + auto & lambda_fib_arg1 = ptg.GetRegisterNode(*test.lambda_fib->GetFunctionArguments()[1]); auto & lambda_test = ptg.GetLambdaNode(*test.lambda_test); auto & lambda_test_out = ptg.GetRegisterNode(*test.lambda_test->output()); @@ -927,8 +932,10 @@ TestExternalMemory() assert(pointsToGraph.NumRegisterNodes() == 3); auto & lambdaF = pointsToGraph.GetLambdaNode(*test.LambdaF); - auto & lambdaFArgument0 = pointsToGraph.GetRegisterNode(*test.LambdaF->fctargument(0)); - auto & lambdaFArgument1 = pointsToGraph.GetRegisterNode(*test.LambdaF->fctargument(1)); + auto & lambdaFArgument0 = + pointsToGraph.GetRegisterNode(*test.LambdaF->GetFunctionArguments()[0]); + auto & lambdaFArgument1 = + pointsToGraph.GetRegisterNode(*test.LambdaF->GetFunctionArguments()[1]); assertTargets(lambdaFArgument0, { &lambdaF, &pointsToGraph.GetExternalMemoryNode() }); assertTargets(lambdaFArgument1, { &lambdaF, &pointsToGraph.GetExternalMemoryNode() }); @@ -956,8 +963,10 @@ TestEscapedMemory1() assert(pointsToGraph.NumLambdaNodes() == 1); assert(pointsToGraph.NumRegisterNodes() == 7); - auto & lambdaTestArgument0 = pointsToGraph.GetRegisterNode(*test.LambdaTest->fctargument(0)); - auto & lambdaTestCv0 = pointsToGraph.GetRegisterNode(*test.LambdaTest->cvargument(0)); + auto & lambdaTestArgument0 = + pointsToGraph.GetRegisterNode(*test.LambdaTest->GetFunctionArguments()[0]); + auto & lambdaTestCv0 = + pointsToGraph.GetRegisterNode(*test.LambdaTest->GetContextVars()[0].inner); auto & loadNode1Output = pointsToGraph.GetRegisterNode(*test.LoadNode1->output(0)); auto deltaA = &pointsToGraph.GetDeltaNode(*test.DeltaA); @@ -1129,11 +1138,15 @@ TestMemcpy2() assert(pointsToGraph->NumLambdaNodes() == 2); auto & lambdaFNode = pointsToGraph->GetLambdaNode(test.LambdaF()); - auto & lambdaFArgument0 = pointsToGraph->GetRegisterNode(*test.LambdaF().fctargument(0)); - auto & lambdaFArgument1 = pointsToGraph->GetRegisterNode(*test.LambdaF().fctargument(1)); + auto & lambdaFArgument0 = + pointsToGraph->GetRegisterNode(*test.LambdaF().GetFunctionArguments()[0]); + auto & lambdaFArgument1 = + pointsToGraph->GetRegisterNode(*test.LambdaF().GetFunctionArguments()[1]); - auto & lambdaGArgument0 = pointsToGraph->GetRegisterNode(*test.LambdaG().fctargument(0)); - auto & lambdaGArgument1 = pointsToGraph->GetRegisterNode(*test.LambdaG().fctargument(1)); + auto & lambdaGArgument0 = + pointsToGraph->GetRegisterNode(*test.LambdaG().GetFunctionArguments()[0]); + auto & lambdaGArgument1 = + pointsToGraph->GetRegisterNode(*test.LambdaG().GetFunctionArguments()[1]); auto & memcpyOperand0 = pointsToGraph->GetRegisterNode(*test.Memcpy().input(0)->origin()); auto & memcpyOperand1 = pointsToGraph->GetRegisterNode(*test.Memcpy().input(1)->origin()); @@ -1168,7 +1181,7 @@ TestMemcpy3() assert(pointsToGraph->NumAllocaNodes() == 1); auto & lambdaNode = pointsToGraph->GetLambdaNode(test.Lambda()); - auto & lambdaArgument0 = pointsToGraph->GetRegisterNode(*test.Lambda().fctargument(0)); + auto & lambdaArgument0 = pointsToGraph->GetRegisterNode(*test.Lambda().GetFunctionArguments()[0]); auto & allocaNode = pointsToGraph->GetAllocaNode(test.Alloca()); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 38204e832..3995b8dae 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -402,8 +402,8 @@ test_lambda() auto lambda = lambda::node::create(graph.root(), ft, "f", linkage::external_linkage); - auto d1 = lambda->add_ctxvar(x); - auto d2 = lambda->add_ctxvar(x); + auto d1 = lambda->AddContextVar(*x).inner; + auto d2 = lambda->AddContextVar(*x).inner; auto b1 = jlm::tests::create_testop(lambda->subregion(), { d1, d2 }, { vt })[0]; @@ -446,11 +446,11 @@ test_phi() auto r2 = pb.add_recvar(PointerType::Create()); auto lambda1 = lambda::node::create(region, ft, "f", linkage::external_linkage); - auto cv1 = lambda1->add_ctxvar(d1); + auto cv1 = lambda1->AddContextVar(*d1).inner; auto f1 = lambda1->finalize({ cv1 }); auto lambda2 = lambda::node::create(region, ft, "f", linkage::external_linkage); - auto cv2 = lambda2->add_ctxvar(d2); + auto cv2 = lambda2->AddContextVar(*d2).inner; auto f2 = lambda2->finalize({ cv2 }); r1->set_rvorigin(f1); @@ -466,7 +466,9 @@ test_phi() cne.run(rm, statisticsCollector); // jlm::rvsdg::view(graph.root(), stdout); - assert(f1->node()->input(0)->origin() == f2->node()->input(0)->origin()); + assert( + jlm::rvsdg::AssertGetOwnerNode(*f1).input(0)->origin() + == jlm::rvsdg::AssertGetOwnerNode(*f2).input(0)->origin()); } static int diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index 0e4ea0ff4..ad1b4fa8d 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -38,14 +38,18 @@ test1() { vt, iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); - lambda->add_ctxvar(i); + lambda->AddContextVar(*i); - auto t = jlm::tests::test_op::create(lambda->subregion(), { lambda->fctargument(0) }, { vt }); + auto t = jlm::tests::test_op::create( + lambda->subregion(), + { lambda->GetFunctionArguments()[0] }, + { vt }); - return lambda->finalize({ t->output(0), lambda->fctargument(1), lambda->fctargument(2) }); + return lambda->finalize( + { t->output(0), lambda->GetFunctionArguments()[1], lambda->GetFunctionArguments()[2] }); }; - auto SetupF2 = [&](lambda::output * f1) + auto SetupF2 = [&](jlm::rvsdg::output * f1) { auto vt = jlm::tests::valuetype::Create(); auto iOStateType = iostatetype::Create(); @@ -59,11 +63,11 @@ test1() { vt, iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); - auto d = lambda->add_ctxvar(f1); - auto controlArgument = lambda->fctargument(0); - auto valueArgument = lambda->fctargument(1); - auto iOStateArgument = lambda->fctargument(2); - auto memoryStateArgument = lambda->fctargument(3); + auto d = lambda->AddContextVar(*f1).inner; + auto controlArgument = lambda->GetFunctionArguments()[0]; + auto valueArgument = lambda->GetFunctionArguments()[1]; + auto iOStateArgument = lambda->GetFunctionArguments()[2]; + auto memoryStateArgument = lambda->GetFunctionArguments()[3]; auto gamma = jlm::rvsdg::GammaNode::create(controlArgument, 2); auto gammaInputF1 = gamma->add_entryvar(d); @@ -73,7 +77,7 @@ test1() auto callResults = CallNode::Create( gammaInputF1->argument(0), - f1->node()->Type(), + jlm::rvsdg::AssertGetOwnerNode(*f1).Type(), { gammaInputValue->argument(0), gammaInputIoState->argument(0), gammaInputMemoryState->argument(0) }); @@ -129,10 +133,11 @@ test2() auto SetupF1 = [&](const std::shared_ptr & functionType) { auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); - return lambda->finalize({ lambda->fctargument(1), lambda->fctargument(2) }); + return lambda->finalize( + { lambda->GetFunctionArguments()[1], lambda->GetFunctionArguments()[2] }); }; - auto SetupF2 = [&](lambda::output * f1) + auto SetupF2 = [&](jlm::rvsdg::output * f1) { auto iOStateType = iostatetype::Create(); auto memoryStateType = MemoryStateType::Create(); @@ -141,10 +146,10 @@ test2() { iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create(graph.root(), functionType, "f2", linkage::external_linkage); - auto cvi = lambda->add_ctxvar(i); - auto cvf1 = lambda->add_ctxvar(f1); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto cvi = lambda->AddContextVar(*i).inner; + auto cvf1 = lambda->AddContextVar(*f1).inner; + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto callResults = CallNode::Create(cvi, functionType2, { cvf1, iOStateArgument, memoryStateArgument }); @@ -166,7 +171,8 @@ test2() // Assert // Function f1 should not have been inlined. - assert(is(jlm::rvsdg::output::GetNode(*f2->node()->fctresult(0)->origin()))); + assert(is(jlm::rvsdg::output::GetNode( + *jlm::rvsdg::AssertGetOwnerNode(*f2).GetFunctionResults()[0]->origin()))); } static int diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index f646f3fe4..4a0e0e281 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -33,8 +33,8 @@ TestLambda() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto constant = jlm::rvsdg::create_bitconstant(lambda->subregion(), 32, 4); @@ -152,8 +152,8 @@ TestAddOperation() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; // Create add operation std::cout << "Add Operation" << std::endl; @@ -253,8 +253,8 @@ TestComZeroExt() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; // Create add operation std::cout << "Add Operation" << std::endl; @@ -399,8 +399,8 @@ TestMatch() auto lambda = lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); - auto iOStateArgument = lambda->fctargument(0); - auto memoryStateArgument = lambda->fctargument(1); + auto iOStateArgument = lambda->GetFunctionArguments()[0]; + auto memoryStateArgument = lambda->GetFunctionArguments()[1]; // Create a match operation std::cout << "Match Operation" << std::endl; From c91e82e385e3c55fe51ff07586d60b526324851b Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Mon, 2 Dec 2024 21:47:00 +0100 Subject: [PATCH 126/170] gamma: change API for mapping input to region argument (#674) Remove the GammaInput class and the way it is used to map an input to the corresponding argument inside the lambda region. Replace this with an API function attached to the node instead. --- .../backend/rvsdg2rhls/GammaConversion.cpp | 24 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 7 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- .../rvsdg2rhls/distribute-constants.cpp | 17 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 39 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 68 +-- .../rvsdg2rhls/remove-unused-state.cpp | 7 +- jlm/hls/opt/cne.cpp | 19 +- .../InterProceduralGraphConversion.cpp | 19 +- jlm/llvm/ir/operators/call.cpp | 24 +- jlm/llvm/ir/operators/lambda.cpp | 12 +- jlm/llvm/opt/DeadNodeElimination.cpp | 12 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 9 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 22 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 8 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 28 +- jlm/llvm/opt/cne.cpp | 19 +- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 54 +- jlm/llvm/opt/pull.cpp | 57 +- jlm/llvm/opt/push.cpp | 6 +- jlm/llvm/opt/unroll.cpp | 16 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 4 +- jlm/rvsdg/gamma.cpp | 258 +++++--- jlm/rvsdg/gamma.hpp | 573 ++++++------------ tests/TestRvsdgs.cpp | 123 ++-- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 16 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 41 +- .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 37 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 45 +- .../opt/InvariantValueRedirectionTests.cpp | 67 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 18 +- .../llvm/opt/alias-analyses/TestAndersen.cpp | 7 +- .../alias-analyses/TestMemoryStateEncoder.cpp | 12 +- .../opt/alias-analyses/TestSteensgaard.cpp | 7 +- tests/jlm/llvm/opt/test-cne.cpp | 24 +- tests/jlm/llvm/opt/test-inlining.cpp | 26 +- tests/jlm/llvm/opt/test-inversion.cpp | 24 +- tests/jlm/llvm/opt/test-pull.cpp | 26 +- tests/jlm/llvm/opt/test-push.cpp | 12 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 8 +- tests/jlm/rvsdg/test-gamma.cpp | 144 ++--- 43 files changed, 909 insertions(+), 1036 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index d2d643128..9fae4a13d 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -18,14 +18,14 @@ ConvertGammaNodeWithoutSpeculation(rvsdg::GammaNode & gammaNode) // create a branch for each gamma input and map the corresponding argument of each subregion to an // output of the branch - for (size_t i = 0; i < gammaNode.nentryvars(); i++) + for (const auto & entryvar : gammaNode.GetEntryVars()) { auto branchResults = - hls::branch_op::create(*gammaNode.predicate()->origin(), *gammaNode.entryvar(i)->origin()); + hls::branch_op::create(*gammaNode.predicate()->origin(), *entryvar.input->origin()); for (size_t s = 0; s < gammaNode.nsubregions(); s++) { - substitutionMap.insert(gammaNode.subregion(s)->argument(i), branchResults[s]); + substitutionMap.insert(entryvar.branchArgument[s], branchResults[s]); } } @@ -34,19 +34,19 @@ ConvertGammaNodeWithoutSpeculation(rvsdg::GammaNode & gammaNode) gammaNode.subregion(s)->copy(gammaNode.region(), substitutionMap, false, false); } - for (size_t i = 0; i < gammaNode.nexitvars(); i++) + for (const auto & ex : gammaNode.GetExitVars()) { std::vector alternatives; for (size_t s = 0; s < gammaNode.nsubregions(); s++) { - alternatives.push_back(substitutionMap.lookup(gammaNode.subregion(s)->result(i)->origin())); + alternatives.push_back(substitutionMap.lookup(ex.branchResult[s]->origin())); } // create mux nodes for each gamma output // use mux instead of merge in case of paths with different delay - otherwise one could overtake // the other see https://ieeexplore.ieee.org/abstract/document/9515491 auto mux = hls::mux_op::create(*gammaNode.predicate()->origin(), alternatives, false); - gammaNode.exitvar(i)->divert_users(mux[0]); + ex.output->divert_users(mux[0]); } remove(&gammaNode); @@ -58,13 +58,11 @@ ConvertGammaNodeWithSpeculation(rvsdg::GammaNode & gammaNode) rvsdg::SubstitutionMap substitutionMap; // Map arguments to origins of inputs. Forks will automatically be created later - for (size_t i = 0; i < gammaNode.nentryvars(); i++) + for (const auto & entryvar : gammaNode.GetEntryVars()) { - auto gammaInput = gammaNode.entryvar(i); - for (size_t s = 0; s < gammaNode.nsubregions(); s++) { - substitutionMap.insert(gammaNode.subregion(s)->argument(i), gammaInput->origin()); + substitutionMap.insert(entryvar.branchArgument[s], entryvar.input->origin()); } } @@ -73,18 +71,18 @@ ConvertGammaNodeWithSpeculation(rvsdg::GammaNode & gammaNode) gammaNode.subregion(s)->copy(gammaNode.region(), substitutionMap, false, false); } - for (size_t i = 0; i < gammaNode.nexitvars(); i++) + for (const auto & ex : gammaNode.GetExitVars()) { std::vector alternatives; for (size_t s = 0; s < gammaNode.nsubregions(); s++) { - alternatives.push_back(substitutionMap.lookup(gammaNode.subregion(s)->result(i)->origin())); + alternatives.push_back(substitutionMap.lookup(ex.branchResult[s]->origin())); } // create discarding mux for each gamma output auto merge = hls::mux_op::create(*gammaNode.predicate()->origin(), alternatives, true); - gammaNode.exitvar(i)->divert_users(merge[0]); + ex.output->divert_users(merge[0]); } remove(&gammaNode); diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index a7fe6275c..2eb8e9dd6 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -132,10 +132,11 @@ RemovePassthroughArgument(const rvsdg::RegionArgument & argument) static void RemoveUnusedStatesFromGammaNode(rvsdg::GammaNode & gammaNode) { - for (int i = gammaNode.nentryvars() - 1; i >= 0; --i) + auto entryvars = gammaNode.GetEntryVars(); + for (int i = entryvars.size() - 1; i >= 0; --i) { size_t resultIndex = 0; - auto argument = gammaNode.subregion(0)->argument(i); + auto argument = entryvars[i].branchArgument[0]; if (argument->nusers() == 1) { auto result = dynamic_cast(*argument->begin()); @@ -154,7 +155,7 @@ RemoveUnusedStatesFromGammaNode(rvsdg::GammaNode & gammaNode) if (shouldRemove) { - auto origin = gammaNode.entryvar(i)->origin(); + auto origin = entryvars[i].input->origin(); gammaNode.output(resultIndex)->divert_users(origin); for (size_t r = 0; r < gammaNode.nsubregions(); r++) diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index ad0adf254..297310b1e 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -79,7 +79,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) if (auto gamma = dynamic_cast(region->node())) { - gamma->add_entryvar(output); + gamma->AddEntryVar(output); output = region->argument(region->narguments() - 1); } else if (auto theta = dynamic_cast(region->node())) diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 7ee354e2d..4dbb0eac8 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -113,7 +113,7 @@ add_triggers(rvsdg::Region * region) { JLM_ASSERT(trigger != nullptr); JLM_ASSERT(get_trigger(gn->subregion(0)) == nullptr); - gn->add_entryvar(trigger); + gn->AddEntryVar(trigger); for (size_t i = 0; i < gn->nsubregions(); ++i) { add_triggers(gn->subregion(i)); diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 4f8a729bc..0bb6d26bc 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -24,6 +24,7 @@ distribute_constant(const rvsdg::SimpleOperation & op, rvsdg::simple_output * ou changed = false; for (auto user : *out) { + auto node = rvsdg::input::GetNode(*user); if (auto ti = dynamic_cast(user)) { auto arg = ti->argument(); @@ -45,24 +46,24 @@ distribute_constant(const rvsdg::SimpleOperation & op, rvsdg::simple_output * ou break; } } - if (auto gi = dynamic_cast(user)) + if (auto gammaNode = dynamic_cast(node)) { - if (gi->node()->predicate() == gi) + if (gammaNode->predicate() == user) { continue; } - for (int i = gi->narguments() - 1; i >= 0; --i) + for (auto argument : gammaNode->MapInputEntryVar(*user).branchArgument) { - if (gi->argument(i)->nusers()) + if (argument->nusers()) { auto arg_replacement = dynamic_cast( - rvsdg::simple_node::create_normalized(gi->argument(i)->region(), op, {})[0]); - gi->argument(i)->divert_users(arg_replacement); + rvsdg::simple_node::create_normalized(argument->region(), op, {})[0]); + argument->divert_users(arg_replacement); distribute_constant(op, arg_replacement); } - gi->node()->subregion(i)->RemoveArgument(gi->argument(i)->index()); + argument->region()->RemoveArgument(argument->index()); } - gi->node()->RemoveInput(gi->index()); + gammaNode->RemoveInput(user->index()); changed = true; break; } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 97785a40a..c920be727 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -103,18 +103,13 @@ route_through(rvsdg::Region * target, jlm::rvsdg::output * response) auto parrent_user = *parent_response->begin(); if (auto gn = dynamic_cast(target->node())) { - auto ip = gn->add_entryvar(parent_response); - std::vector vec; - for (auto & arg : ip->arguments) - { - vec.push_back(&arg); - } - parrent_user->divert_to(gn->add_exitvar(vec)); - for (auto & arg : ip->arguments) + auto ip = gn->AddEntryVar(parent_response); + parrent_user->divert_to(gn->AddExitVar(ip.branchArgument).output); + for (auto arg : ip.branchArgument) { - if (arg.region() == target) + if (arg->region() == target) { - return &arg; + return arg; } } JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN"); @@ -188,26 +183,28 @@ trace_edge( JLM_ASSERT(new_edge->nusers() == 1); auto user = *common_edge->begin(); auto new_next = *new_edge->begin(); + auto node = rvsdg::input::GetNode(*user); if (auto res = dynamic_cast(user)) { // end of region reached return res; } - else if (auto gi = dynamic_cast(user)) + else if (auto gammaNode = dynamic_cast(node)) { - auto gn = gi->node(); - auto ip = gn->add_entryvar(new_edge); + auto ip = gammaNode->AddEntryVar(new_edge); std::vector vec; - for (auto & arg : ip->arguments) - { - vec.push_back(&arg); - } - new_edge = gn->add_exitvar(vec); + new_edge = gammaNode->AddExitVar(ip.branchArgument).output; new_next->divert_to(new_edge); - for (size_t i = 0; i < gn->nsubregions(); ++i) + + auto entryvar = gammaNode->MapInputEntryVar(*user); + for (size_t i = 0; i < gammaNode->nsubregions(); ++i) { - auto subres = - trace_edge(gi->argument(i), ip->argument(i), load_nodes, store_nodes, decouple_nodes); + auto subres = trace_edge( + entryvar.branchArgument[i], + ip.branchArgument[i], + load_nodes, + store_nodes, + decouple_nodes); common_edge = subres->output(); } } diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index cf0faef79..fce80d75e 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -121,23 +121,23 @@ fix_match_inversion(rvsdg::GammaNode * old_gamma) auto new_gamma = rvsdg::GammaNode::create(new_match, match->nalternatives()); rvsdg::SubstitutionMap rmap0; // subregion 0 of the new gamma - 1 of the old rvsdg::SubstitutionMap rmap1; - for (auto oev = old_gamma->begin_entryvar(); oev != old_gamma->end_entryvar(); oev++) + for (const auto & oev : old_gamma->GetEntryVars()) { - auto nev = new_gamma->add_entryvar(oev->origin()); - rmap0.insert(oev->argument(1), nev->argument(0)); - rmap1.insert(oev->argument(0), nev->argument(1)); + auto nev = new_gamma->AddEntryVar(oev.input->origin()); + rmap0.insert(oev.branchArgument[1], nev.branchArgument[0]); + rmap1.insert(oev.branchArgument[0], nev.branchArgument[1]); } /* copy subregions */ old_gamma->subregion(0)->copy(new_gamma->subregion(1), rmap1, false, false); old_gamma->subregion(1)->copy(new_gamma->subregion(0), rmap0, false, false); - for (auto oex = old_gamma->begin_exitvar(); oex != old_gamma->end_exitvar(); oex++) + for (auto oex : old_gamma->GetExitVars()) { std::vector operands; - operands.push_back(rmap0.lookup(oex->result(1)->origin())); - operands.push_back(rmap1.lookup(oex->result(0)->origin())); - auto nex = new_gamma->add_exitvar(operands); - oex.output()->divert_users(nex); + operands.push_back(rmap0.lookup(oex.branchResult[1]->origin())); + operands.push_back(rmap1.lookup(oex.branchResult[0]->origin())); + auto nex = new_gamma->AddExitVar(operands).output; + oex.output->divert_users(nex); } remove(old_gamma); remove(no->node()); @@ -250,18 +250,17 @@ depends_on(jlm::rvsdg::output * output, jlm::rvsdg::node * node) return false; } -rvsdg::GammaInput * +rvsdg::GammaNode::EntryVar get_entryvar(jlm::rvsdg::output * origin, rvsdg::GammaNode * gamma) { for (auto user : *origin) { - auto gi = dynamic_cast(user); - if (gi && gi->node() == gamma) + if (rvsdg::TryGetOwnerNode(*user) == gamma) { - return gi; + return gamma->MapInputEntryVar(*user); } } - return gamma->add_entryvar(origin); + return gamma->AddEntryVar(origin); } bool @@ -269,48 +268,44 @@ merge_gamma(rvsdg::GammaNode * gamma) { for (auto user : *gamma->predicate()->origin()) { - auto gi = dynamic_cast(user); - if (gi && gi != gamma->predicate()) + auto other_gamma = dynamic_cast(rvsdg::input::GetNode(*user)); + if (other_gamma && gamma != other_gamma) { // other gamma depending on same predicate - auto other_gamma = gi->node(); JLM_ASSERT(other_gamma->nsubregions() == gamma->nsubregions()); bool can_merge = true; - for (size_t i = 0; i < gamma->nentryvars(); ++i) + for (const auto & ev : gamma->GetEntryVars()) { - auto ev = gamma->entryvar(i); // we only merge gammas whose inputs directly, or not at all, depend on the gamma being // merged into - can_merge &= - is_output_of(ev->origin(), other_gamma) || !depends_on(ev->origin(), other_gamma); + can_merge &= is_output_of(ev.input->origin(), other_gamma) + || !depends_on(ev.input->origin(), other_gamma); } - for (size_t i = 0; i < other_gamma->nentryvars(); ++i) + for (const auto & oev : other_gamma->GetEntryVars()) { - auto oev = other_gamma->entryvar(i); // prevent cycles - can_merge &= !depends_on(oev->origin(), gamma); + can_merge &= !depends_on(oev.input->origin(), gamma); } if (can_merge) { std::vector rmap(gamma->nsubregions()); // populate argument mappings - for (size_t i = 0; i < gamma->nentryvars(); ++i) + for (const auto & ev : gamma->GetEntryVars()) { - auto ev = gamma->entryvar(i); - if (is_output_of(ev->origin(), other_gamma)) + if (rvsdg::TryGetOwnerNode(*ev.input->origin()) == other_gamma) { - auto go = dynamic_cast(ev->origin()); + auto oex = other_gamma->MapOutputExitVar(*ev.input->origin()); for (size_t j = 0; j < gamma->nsubregions(); ++j) { - rmap[j].insert(ev->argument(j), go->result(j)->origin()); + rmap[j].insert(ev.branchArgument[j], oex.branchResult[j]->origin()); } } else { - auto oev = get_entryvar(ev->origin(), other_gamma); + auto oev = get_entryvar(ev.input->origin(), other_gamma); for (size_t j = 0; j < gamma->nsubregions(); ++j) { - rmap[j].insert(ev->argument(j), oev->argument(j)); + rmap[j].insert(ev.branchArgument[j], oev.branchArgument[j]); } } } @@ -320,16 +315,15 @@ merge_gamma(rvsdg::GammaNode * gamma) gamma->subregion(j)->copy(other_gamma->subregion(j), rmap[j], false, false); } // handle exitvars - for (size_t i = 0; i < gamma->nexitvars(); ++i) + for (const auto & ex : gamma->GetExitVars()) { - auto ex = gamma->exitvar(i); std::vector operands; - for (size_t j = 0; j < ex->nresults(); j++) + for (size_t j = 0; j < ex.branchResult.size(); j++) { - operands.push_back(rmap[j].lookup(ex->result(j)->origin())); + operands.push_back(rmap[j].lookup(ex.branchResult[j]->origin())); } - auto oex = other_gamma->add_exitvar(operands); - ex->divert_users(oex); + auto oex = other_gamma->AddExitVar(operands).output; + ex.output->divert_users(oex); } remove(gamma); return true; diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index 4f626bd7b..80c30b844 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -130,11 +130,12 @@ remove_unused_state(llvm::RvsdgModule & rm) void remove_gamma_passthrough(rvsdg::GammaNode * gn) { // remove inputs in reverse - for (int i = gn->nentryvars() - 1; i >= 0; --i) + auto entryvars = gn->GetEntryVars(); + for (int i = entryvars.size() - 1; i >= 0; --i) { bool can_remove = true; size_t res_index = 0; - auto arg = gn->subregion(0)->argument(i); + auto arg = entryvars[i].branchArgument[0]; if (arg->nusers() == 1) { auto res = dynamic_cast(*arg->begin()); @@ -150,7 +151,7 @@ remove_gamma_passthrough(rvsdg::GammaNode * gn) } if (can_remove) { - auto origin = gn->entryvar(i)->origin(); + auto origin = entryvars[i].input->origin(); // divert users of output to origin of input gn->output(res_index)->divert_users(origin); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 91be7cef4..490bb5459 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -238,12 +238,15 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return true; } - if (is(o1) && is(o2)) + if (auto g1 = rvsdg::TryGetRegionParentNode(*o1)) { - JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); - return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); + if (auto g2 = rvsdg::TryGetRegionParentNode(*o2)) + { + JLM_ASSERT(g1 == g2); + auto origin1 = g1->MapBranchArgumentEntryVar(*o1).input->origin(); + auto origin2 = g2->MapBranchArgumentEntryVar(*o2).input->origin(); + return congruent(origin1, origin2, vs, ctx); + } } if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) @@ -508,10 +511,10 @@ divert_gamma(rvsdg::StructuralNode * node, cnectx & ctx) JLM_ASSERT(rvsdg::is(node)); auto gamma = static_cast(node); - for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) + for (const auto & ev : gamma->GetEntryVars()) { - for (size_t n = 0; n < ev->narguments(); n++) - divert_users(ev->argument(n), ctx); + for (auto input : ev.branchArgument) + divert_users(input, ctx); } for (size_t r = 0; r < node->nsubregions(); r++) diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 9c2ecc0bd..687b78e18 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -485,10 +485,10 @@ ConvertSelect( auto predicate = rvsdg::simple_node::create_normalized(®ion, op, { p })[0]; auto gamma = rvsdg::GammaNode::create(predicate, 2); - auto ev1 = gamma->add_entryvar(variableMap.lookup(threeAddressCode.operand(2))); - auto ev2 = gamma->add_entryvar(variableMap.lookup(threeAddressCode.operand(1))); - auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); - variableMap.insert(threeAddressCode.result(0), ex); + auto ev1 = gamma->AddEntryVar(variableMap.lookup(threeAddressCode.operand(2))); + auto ev2 = gamma->AddEntryVar(variableMap.lookup(threeAddressCode.operand(1))); + auto ex = gamma->AddExitVar({ ev1.branchArgument[0], ev2.branchArgument[1] }); + variableMap.insert(threeAddressCode.result(0), ex.output); } static void @@ -709,9 +709,10 @@ Convert( * Add gamma inputs. */ auto & demandSet = demandMap.Lookup(branchAggregationNode); - std::unordered_map gammaInputMap; + std::unordered_map gammaInputMap; for (auto & v : demandSet.InputVariables().Variables()) - gammaInputMap[&v] = gamma->add_entryvar(regionalizedVariableMap.GetTopVariableMap().lookup(&v)); + gammaInputMap[&v] = + gamma->AddEntryVar(regionalizedVariableMap.GetTopVariableMap().lookup(&v)).input; /* * Convert subregions. @@ -722,7 +723,9 @@ Convert( { regionalizedVariableMap.PushRegion(*gamma->subregion(n)); for (const auto & pair : gammaInputMap) - regionalizedVariableMap.GetTopVariableMap().insert(pair.first, pair.second->argument(n)); + regionalizedVariableMap.GetTopVariableMap().insert( + pair.first, + gamma->MapInputEntryVar(*pair.second).branchArgument[n]); ConvertAggregationNode( *branchAggregationNode.child(n), @@ -741,7 +744,7 @@ Convert( for (auto & v : demandSet.OutputVariables().Variables()) { JLM_ASSERT(xvmap.find(&v) != xvmap.end()); - regionalizedVariableMap.GetTopVariableMap().insert(&v, gamma->add_exitvar(xvmap[&v])); + regionalizedVariableMap.GetTopVariableMap().insert(&v, gamma->AddExitVar(xvmap[&v]).output); } } diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index c5480a3a8..baca5e613 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -21,13 +21,17 @@ static rvsdg::input * invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutputs); static rvsdg::StructuralInput * -invariantInput(const rvsdg::GammaOutput & output, InvariantOutputMap & invariantOutputs) +invariantInput( + const rvsdg::GammaNode & gamma, + const rvsdg::output & output, + InvariantOutputMap & invariantOutputs) { size_t n; rvsdg::StructuralInput * input = nullptr; - for (n = 0; n < output.nresults(); n++) + auto exitvar = gamma.MapOutputExitVar(output); + for (n = 0; n < exitvar.branchResult.size(); n++) { - auto origin = output.result(n)->origin(); + auto origin = exitvar.branchResult[n]->origin(); bool resultIsInvariant = false; while (true) @@ -52,7 +56,7 @@ invariantInput(const rvsdg::GammaOutput & output, InvariantOutputMap & invariant break; } - if (n == output.nresults()) + if (n == exitvar.branchResult.size()) { invariantOutputs[&output] = input; return input; @@ -106,8 +110,8 @@ invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutpu return invariantInput(*thetaInput->output(), invariantOutputs); } - if (auto gammaOutput = dynamic_cast(&output)) - return invariantInput(*gammaOutput, invariantOutputs); + if (auto gamma = rvsdg::TryGetOwnerNode(output)) + return invariantInput(*gamma, output, invariantOutputs); return nullptr; } @@ -184,9 +188,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) } } - if (auto gammaOutput = dynamic_cast(origin)) + if (rvsdg::TryGetOwnerNode(*origin)) { - if (auto input = invariantInput(*gammaOutput)) + if (auto input = invariantInput(*origin)) { origin = input->origin(); continue; @@ -195,9 +199,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) return origin; } - if (auto gammaArgument = dynamic_cast(origin)) + if (auto gamma = rvsdg::TryGetRegionParentNode(*origin)) { - origin = gammaArgument->input()->origin(); + origin = gamma->MapBranchArgumentEntryVar(*origin).input->origin(); continue; } diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index ad32f7609..2114e2b05 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -290,16 +290,18 @@ node::ComputeCallSummary() const continue; } - if (auto gamma_input = dynamic_cast(input)) + if (auto gammaNode = dynamic_cast(inputNode)) { - for (auto & argument : *gamma_input) - worklist.insert(worklist.end(), argument.begin(), argument.end()); + for (auto & argument : gammaNode->MapInputEntryVar(*input).branchArgument) + { + worklist.insert(worklist.end(), argument->begin(), argument->end()); + } continue; } - if (auto gammaResult = dynamic_cast(input)) + if (auto gamma = rvsdg::TryGetRegionParentNode(*input)) { - auto output = gammaResult->output(); + auto output = gamma->MapBranchResultExitVar(*input).output; worklist.insert(worklist.end(), output->begin(), output->end()); continue; } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 21299c67d..334821b35 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -198,19 +198,19 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto gammaOutput = dynamic_cast(&output)) + if (auto gamma = rvsdg::TryGetOwnerNode(output)) { - MarkOutput(*gammaOutput->node()->predicate()->origin()); - for (const auto & result : gammaOutput->results) + MarkOutput(*gamma->predicate()->origin()); + for (const auto & result : gamma->MapOutputExitVar(output).branchResult) { - MarkOutput(*result.origin()); + MarkOutput(*result->origin()); } return; } - if (auto gammaArgument = dynamic_cast(&output)) + if (auto gamma = rvsdg::TryGetRegionParentNode(output)) { - MarkOutput(*gammaArgument->input()->origin()); + MarkOutput(*gamma->MapBranchArgumentEntryVar(output).input->origin()); return; } diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 203d85511..85923567b 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -140,14 +140,11 @@ InvariantValueRedirection::RedirectInSubregions(rvsdg::StructuralNode & structur void InvariantValueRedirection::RedirectGammaOutputs(rvsdg::GammaNode & gammaNode) { - for (auto it = gammaNode.begin_exitvar(); it != gammaNode.end_exitvar(); it++) + for (auto exitvar : gammaNode.GetExitVars()) { - auto & gammaOutput = *it; - - rvsdg::output * invariantOrigin = nullptr; - if (gammaOutput.IsInvariant(&invariantOrigin)) + if (auto invariantOrigin = rvsdg::GetGammaInvariantOrigin(gammaNode, exitvar)) { - it->divert_users(invariantOrigin); + exitvar.output->divert_users(*invariantOrigin); } } } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 15544f514..0873b6609 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -1088,16 +1088,16 @@ void Andersen::AnalyzeGamma(const rvsdg::GammaNode & gamma) { // Handle input variables - for (auto ev = gamma.begin_entryvar(); ev != gamma.end_entryvar(); ++ev) + for (const auto & ev : gamma.GetEntryVars()) { - if (!IsOrContainsPointerType(ev->type())) + if (!IsOrContainsPointerType(ev.input->type())) continue; - auto & inputRegister = *ev->origin(); + auto & inputRegister = *ev.input->origin(); const auto inputRegisterPO = Set_->GetRegisterPointerObject(inputRegister); - for (auto & argument : *ev) - Set_->MapRegisterToExistingPointerObject(argument, inputRegisterPO); + for (auto & argument : ev.branchArgument) + Set_->MapRegisterToExistingPointerObject(*argument, inputRegisterPO); } // Handle subregions @@ -1105,17 +1105,17 @@ Andersen::AnalyzeGamma(const rvsdg::GammaNode & gamma) AnalyzeRegion(*gamma.subregion(n)); // Handle exit variables - for (auto ex = gamma.begin_exitvar(); ex != gamma.end_exitvar(); ++ex) + for (const auto & ex : gamma.GetExitVars()) { - if (!IsOrContainsPointerType(ex->type())) + if (!IsOrContainsPointerType(ex.output->type())) continue; - auto & outputRegister = *ex.output(); - const auto outputRegisterPO = Set_->CreateRegisterPointerObject(outputRegister); + auto & outputRegister = ex.output; + const auto outputRegisterPO = Set_->CreateRegisterPointerObject(*outputRegister); - for (auto & result : *ex) + for (auto result : ex.branchResult) { - const auto resultRegisterPO = Set_->GetRegisterPointerObject(*result.origin()); + const auto resultRegisterPO = Set_->GetRegisterPointerObject(*result->origin()); Constraints_->AddConstraint(SupersetConstraint(outputRegisterPO, resultRegisterPO)); } } diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 18ee787b8..b702221f3 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -858,9 +858,9 @@ MemoryStateEncoder::EncodeGammaEntry(rvsdg::GammaNode & gammaNode) auto memoryNodeStatePairs = stateMap.GetStates(*region, memoryNodes); for (auto & memoryNodeStatePair : memoryNodeStatePairs) { - auto gammaInput = gammaNode.add_entryvar(&memoryNodeStatePair->State()); - for (auto & argument : *gammaInput) - stateMap.InsertState(memoryNodeStatePair->MemoryNode(), argument); + auto gammaInput = gammaNode.AddEntryVar(&memoryNodeStatePair->State()); + for (auto & argument : gammaInput.branchArgument) + stateMap.InsertState(memoryNodeStatePair->MemoryNode(), *argument); } } @@ -882,7 +882,7 @@ MemoryStateEncoder::EncodeGammaExit(rvsdg::GammaNode & gammaNode) states.push_back(&state); } - auto state = gammaNode.add_exitvar(states); + auto state = gammaNode.AddExitVar(states).output; memoryNodeStatePair->ReplaceState(*state); } } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index cdfc22ba9..483b72950 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -230,7 +230,7 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":cv:", index); } - if (is(Output_)) + if (rvsdg::TryGetRegionParentNode(*Output_)) { auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg", index); @@ -248,9 +248,9 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":out", index); } - if (is(Output_)) + if (auto node = rvsdg::TryGetOwnerNode(*Output_)) { - auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->GetOperation().debug_string(); + auto dbgstr = node->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); } @@ -1621,16 +1621,16 @@ void Steensgaard::AnalyzeGamma(const rvsdg::GammaNode & node) { // Handle entry variables - for (auto ev = node.begin_entryvar(); ev != node.end_entryvar(); ev++) + for (const auto & ev : node.GetEntryVars()) { - auto & origin = *ev->origin(); + auto & origin = *ev.input->origin(); if (HasOrContainsPointerType(origin)) { - auto & originLocation = Context_->GetLocation(*ev->origin()); - for (auto & argument : *ev) + auto & originLocation = Context_->GetLocation(*ev.input->origin()); + for (auto argument : ev.branchArgument) { - auto & argumentLocation = Context_->GetOrInsertRegisterLocation(argument); + auto & argumentLocation = Context_->GetOrInsertRegisterLocation(*argument); Context_->Join(argumentLocation, originLocation); } } @@ -1641,16 +1641,14 @@ Steensgaard::AnalyzeGamma(const rvsdg::GammaNode & node) AnalyzeRegion(*node.subregion(n)); // Handle exit variables - for (auto ex = node.begin_exitvar(); ex != node.end_exitvar(); ex++) + for (auto ex : node.GetExitVars()) { - auto & output = *ex.output(); - - if (HasOrContainsPointerType(output)) + if (HasOrContainsPointerType(*ex.output)) { - auto & outputLocation = Context_->GetOrInsertRegisterLocation(output); - for (auto & result : *ex) + auto & outputLocation = Context_->GetOrInsertRegisterLocation(*ex.output); + for (auto result : ex.branchResult) { - auto & resultLocation = Context_->GetLocation(*result.origin()); + auto & resultLocation = Context_->GetLocation(*result->origin()); Context_->Join(outputLocation, resultLocation); } } diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 5d8bb071f..7c6fbe2f0 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -222,12 +222,15 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & return true; } - if (is(o1) && is(o2)) + if (auto g1 = rvsdg::TryGetRegionParentNode(*o1)) { - JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); - return congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx); + if (auto g2 = rvsdg::TryGetRegionParentNode(*o2)) + { + JLM_ASSERT(g1 == g2); + auto origin1 = g1->MapBranchArgumentEntryVar(*o1).input->origin(); + auto origin2 = g2->MapBranchArgumentEntryVar(*o2).input->origin(); + return congruent(origin1, origin2, vs, ctx); + } } if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2) @@ -469,10 +472,10 @@ divert_gamma(rvsdg::StructuralNode * node, cnectx & ctx) JLM_ASSERT(rvsdg::is(node)); auto gamma = static_cast(node); - for (auto ev = gamma->begin_entryvar(); ev != gamma->end_entryvar(); ev++) + for (const auto & ev : gamma->GetEntryVars()) { - for (size_t n = 0; n < ev->narguments(); n++) - divert_users(ev->argument(n), ctx); + for (auto input : ev.branchArgument) + divert_users(input, ctx); } for (size_t r = 0; r < node->nsubregions(); r++) diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 1616ca675..217ee8c9d 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -73,7 +73,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) if (auto gamma = dynamic_cast(region->node())) { - gamma->add_entryvar(output); + gamma->AddEntryVar(output); output = region->argument(region->narguments() - 1); } else if (auto theta = dynamic_cast(region->node())) diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index d7546e445..2deb867e5 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -81,9 +81,9 @@ pullin(rvsdg::GammaNode * gamma, rvsdg::ThetaNode * theta) { if (jlm::rvsdg::output::GetNode(*lv->result()->origin()) != gamma) { - auto ev = gamma->add_entryvar(lv->result()->origin()); - JLM_ASSERT(ev->narguments() == 2); - auto xv = gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); + auto ev = gamma->AddEntryVar(lv->result()->origin()); + JLM_ASSERT(ev.branchArgument.size() == 2); + auto xv = gamma->AddExitVar({ ev.branchArgument[0], ev.branchArgument[1] }).output; lv->result()->divert_to(xv); } } @@ -160,18 +160,18 @@ invert(rvsdg::ThetaNode * otheta) { /* setup substitution map for exit region copying */ auto osubregion0 = ogamma->subregion(0); - for (auto oev = ogamma->begin_entryvar(); oev != ogamma->end_entryvar(); oev++) + for (const auto & oev : ogamma->GetEntryVars()) { - if (auto argument = to_argument(oev->origin())) + if (auto argument = to_argument(oev.input->origin())) { - auto nev = ngamma->add_entryvar(argument->input()->origin()); - r0map.insert(oev->argument(0), nev->argument(0)); + auto nev = ngamma->AddEntryVar(argument->input()->origin()); + r0map.insert(oev.branchArgument[0], nev.branchArgument[0]); } else { - auto substitute = smap.lookup(oev->origin()); - auto nev = ngamma->add_entryvar(substitute); - r0map.insert(oev->argument(0), nev->argument(0)); + auto substitute = smap.lookup(oev.input->origin()); + auto nev = ngamma->AddEntryVar(substitute); + r0map.insert(oev.branchArgument[0], nev.branchArgument[0]); } } @@ -198,24 +198,23 @@ invert(rvsdg::ThetaNode * otheta) std::unordered_map nlvs; for (const auto & olv : *otheta) { - auto ev = ngamma->add_entryvar(olv->input()->origin()); - auto nlv = ntheta->add_loopvar(ev->argument(1)); + auto ev = ngamma->AddEntryVar(olv->input()->origin()); + auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); r1map.insert(olv->argument(), nlv->argument()); nlvs[olv->input()] = nlv; } - for (size_t n = 1; n < ogamma->ninputs(); n++) + for (const auto & oev : ogamma->GetEntryVars()) { - auto oev = util::AssertedCast(ogamma->input(n)); - if (auto argument = to_argument(oev->origin())) + if (auto argument = to_argument(oev.input->origin())) { - r1map.insert(oev->argument(1), nlvs[argument->input()]->argument()); + r1map.insert(oev.branchArgument[1], nlvs[argument->input()]->argument()); } else { - auto ev = ngamma->add_entryvar(smap.lookup(oev->origin())); - auto nlv = ntheta->add_loopvar(ev->argument(1)); - r1map.insert(oev->argument(1), nlv->argument()); - nlvs[oev] = nlv; + auto ev = ngamma->AddEntryVar(smap.lookup(oev.input->origin())); + auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); + r1map.insert(oev.branchArgument[1], nlv->argument()); + nlvs[oev.input] = nlv; } } @@ -242,18 +241,17 @@ invert(rvsdg::ThetaNode * otheta) nlvs[olv->input()]->result()->divert_to(substitute); r1map.insert(olv->result()->origin(), nlvs[olv->input()]); } - for (size_t n = 1; n < ogamma->ninputs(); n++) + for (const auto & oev : ogamma->GetEntryVars()) { - auto oev = util::AssertedCast(ogamma->input(n)); - if (auto argument = to_argument(oev->origin())) + if (auto argument = to_argument(oev.input->origin())) { - r1map.insert(oev->argument(0), nlvs[argument->input()]); + r1map.insert(oev.branchArgument[0], nlvs[argument->input()]); } else { - auto substitute = r1map.lookup(oev->origin()); - nlvs[oev]->result()->divert_to(substitute); - r1map.insert(oev->argument(0), nlvs[oev]); + auto substitute = r1map.lookup(oev.input->origin()); + nlvs[oev.input]->result()->divert_to(substitute); + r1map.insert(oev.branchArgument[0], nlvs[oev.input]); } } @@ -276,7 +274,7 @@ invert(rvsdg::ThetaNode * otheta) { auto o0 = r0map.lookup(olv->result()->origin()); auto o1 = r1map.lookup(olv->result()->origin()); - auto ex = ngamma->add_exitvar({ o0, o1 }); + auto ex = ngamma->AddExitVar({ o0, o1 }).output; smap.insert(olv, ex); } diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 133399b82..2fe54ce7d 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -70,9 +70,9 @@ single_successor(const jlm::rvsdg::node * node) } static void -remove(rvsdg::GammaInput * input) +remove(rvsdg::input * input) { - auto gamma = input->node(); + auto gamma = jlm::util::AssertedCast(rvsdg::input::GetNode(*input)); for (size_t n = 0; n < gamma->nsubregions(); n++) gamma->subregion(n)->RemoveArgument(input->index() - 1); @@ -86,9 +86,10 @@ pullin_node(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) std::vector> operands(gamma->nsubregions()); for (size_t i = 0; i < node->ninputs(); i++) { - auto ev = gamma->add_entryvar(node->input(i)->origin()); - for (size_t a = 0; a < ev->narguments(); a++) - operands[a].push_back(ev->argument(a)); + auto ev = gamma->AddEntryVar(node->input(i)->origin()); + std::size_t index = 0; + for (auto input : ev.branchArgument) + operands[index++].push_back(input); } /* copy node into subregions */ @@ -119,7 +120,7 @@ cleanup(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) for (size_t n = 0; n < node->noutputs(); n++) { while (node->output(n)->nusers() != 0) - remove(util::AssertedCast(*node->output(n)->begin())); + remove(*node->output(n)->begin()); } remove(node); } @@ -128,10 +129,12 @@ void pullin_top(rvsdg::GammaNode * gamma) { /* FIXME: This is inefficient. We can do better. */ - auto ev = gamma->begin_entryvar(); - while (ev != gamma->end_entryvar()) + auto evs = gamma->GetEntryVars(); + size_t index = 0; + while (index < evs.size()) { - auto node = jlm::rvsdg::output::GetNode(*ev->origin()); + const auto & ev = evs[index]; + auto node = jlm::rvsdg::output::GetNode(*ev.input->origin()); auto tmp = jlm::rvsdg::output::GetNode(*gamma->predicate()->origin()); if (node && tmp != node && single_successor(node)) { @@ -139,11 +142,12 @@ pullin_top(rvsdg::GammaNode * gamma) cleanup(gamma, node); - ev = gamma->begin_entryvar(); + evs = gamma->GetEntryVars(); + index = 0; } else { - ev++; + index++; } } } @@ -185,8 +189,8 @@ pullin_bottom(rvsdg::GammaNode * gamma) } else { - auto ev = gamma->add_entryvar(input->origin()); - operands.push_back(ev->argument(r)); + auto ev = gamma->AddEntryVar(input->origin()); + operands.push_back(ev.branchArgument[r]); } } @@ -206,7 +210,7 @@ pullin_bottom(rvsdg::GammaNode * gamma) workset.insert(tmp); } - auto xv = gamma->add_exitvar(outputs[n]); + auto xv = gamma->AddExitVar(outputs[n]).output; output->divert_users(xv); } } @@ -218,12 +222,12 @@ is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const jlm::rvsdg::node * JLM_ASSERT(single_successor(node)); /* collect all gamma inputs */ - std::unordered_set inputs; + std::unordered_set inputs; for (size_t n = 0; n < node->noutputs(); n++) { for (const auto & user : *(node->output(n))) { - inputs.insert(util::AssertedCast(user)); + inputs.insert(user); } } @@ -231,10 +235,10 @@ is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const jlm::rvsdg::node * std::unordered_set subregions; for (const auto & input : inputs) { - for (const auto & argument : *input) + for (const auto & argument : gamma->MapInputEntryVar(*input).branchArgument) { - if (argument.nusers() != 0) - subregions.insert(argument.region()); + if (argument->nusers() != 0) + subregions.insert(argument->region()); } } @@ -254,13 +258,15 @@ pull(rvsdg::GammaNode * gamma) auto prednode = jlm::rvsdg::output::GetNode(*gamma->predicate()->origin()); /* FIXME: This is inefficient. We can do better. */ - auto ev = gamma->begin_entryvar(); - while (ev != gamma->end_entryvar()) + auto evs = gamma->GetEntryVars(); + size_t index = 0; + while (index < evs.size()) { - auto node = jlm::rvsdg::output::GetNode(*ev->origin()); + const auto & ev = evs[index]; + auto node = jlm::rvsdg::output::GetNode(*ev.input->origin()); if (!node || prednode == node || !single_successor(node)) { - ev++; + index++; continue; } @@ -272,11 +278,12 @@ pull(rvsdg::GammaNode * gamma) */ pullin_node(gamma, node); cleanup(gamma, node); - ev = gamma->begin_entryvar(); + evs = gamma->GetEntryVars(); + index = 0; } else { - ev++; + index++; } } } diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 02751a2fc..4a3f7d210 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -115,9 +115,9 @@ copy_from_gamma(jlm::rvsdg::node * node, size_t r) auto copy = node->copy(target, operands); for (size_t n = 0; n < copy->noutputs(); n++) { - auto ev = gamma->add_entryvar(copy->output(n)); - node->output(n)->divert_users(ev->argument(r)); - arguments.push_back(ev->argument(r)); + auto ev = gamma->AddEntryVar(copy->output(n)); + node->output(n)->divert_users(ev.branchArgument[r]); + arguments.push_back(util::AssertedCast(ev.branchArgument[r])); } return arguments; diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 88ab3cd67..4088d82a8 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -405,9 +405,9 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) rvsdg::SubstitutionMap rmap[2]; for (const auto & olv : *otheta) { - auto ev = ngamma->add_entryvar(olv->input()->origin()); - auto nlv = ntheta->add_loopvar(ev->argument(1)); - rmap[0].insert(olv, ev->argument(0)); + auto ev = ngamma->AddEntryVar(olv->input()->origin()); + auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); + rmap[0].insert(olv, ev.branchArgument[0]); rmap[1].insert(olv->argument(), nlv->argument()); } @@ -424,7 +424,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) for (const auto & olv : *otheta) { - auto xv = ngamma->add_exitvar({ rmap[0].lookup(olv), rmap[1].lookup(olv) }); + auto xv = ngamma->AddExitVar({ rmap[0].lookup(olv), rmap[1].lookup(olv) }).output; smap.insert(olv, xv); } } @@ -438,9 +438,9 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) rvsdg::SubstitutionMap rmap[2]; for (const auto & olv : *otheta) { - auto ev = ngamma->add_entryvar(smap.lookup(olv)); - auto nlv = ntheta->add_loopvar(ev->argument(1)); - rmap[0].insert(olv, ev->argument(0)); + auto ev = ngamma->AddEntryVar(smap.lookup(olv)); + auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); + rmap[0].insert(olv, ev.branchArgument[0]); rmap[1].insert(olv->argument(), nlv->argument()); } @@ -451,7 +451,7 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) { auto origin = rmap[1].lookup((*olv)->result()->origin()); (*nlv)->result()->divert_to(origin); - auto xv = ngamma->add_exitvar({ rmap[0].lookup(*olv), *nlv }); + auto xv = ngamma->AddExitVar({ rmap[0].lookup(*olv), *nlv }).output; smap.insert(*olv, xv); } } diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index 048a4f67e..726434ac6 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -335,7 +335,7 @@ MlirToJlmConverter::ConvertOperation( // Add inputs to the gamma node and to all it's subregions for (size_t i = 1; i < inputs.size(); i++) { - rvsdgGammaNode->add_entryvar(inputs[i]); + rvsdgGammaNode->AddEntryVar(inputs[i]); } ::llvm::SmallVector<::llvm::SmallVector> regionResults; @@ -355,7 +355,7 @@ MlirToJlmConverter::ConvertOperation( JLM_ASSERT(regionResults[regionIndex].size() == regionResults[0].size()); exitvars.push_back(regionResults[regionIndex][exitvarIndex]); } - rvsdgGammaNode->add_exitvar(exitvars); + rvsdgGammaNode->AddExitVar(exitvars); } return rvsdgGammaNode; diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 7e3946d95..43dba7bf6 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -29,13 +29,13 @@ perform_predicate_reduction(GammaNode * gamma) auto alternative = cop->value().alternative(); rvsdg::SubstitutionMap smap; - for (auto it = gamma->begin_entryvar(); it != gamma->end_entryvar(); it++) - smap.insert(it->argument(alternative), it->origin()); + for (const auto & ev : gamma->GetEntryVars()) + smap.insert(ev.branchArgument[alternative], ev.input->origin()); gamma->subregion(alternative)->copy(gamma->region(), smap, false, false); - for (auto it = gamma->begin_exitvar(); it != gamma->end_exitvar(); it++) - it->divert_users(smap.lookup(it->result(alternative)->origin())); + for (auto exitvar : gamma->GetExitVars()) + exitvar.output->divert_users(smap.lookup(exitvar.branchResult[alternative]->origin())); remove(gamma); } @@ -44,24 +44,25 @@ static bool perform_invariant_reduction(GammaNode * gamma) { bool was_normalized = true; - for (auto it = gamma->begin_exitvar(); it != gamma->end_exitvar(); it++) + for (auto exitvar : gamma->GetExitVars()) { - auto argument = dynamic_cast(it->result(0)->origin()); + auto argument = dynamic_cast(exitvar.branchResult[0]->origin()); if (!argument) continue; size_t n; auto input = argument->input(); - for (n = 1; n < it->nresults(); n++) + for (n = 1; n < exitvar.branchResult.size(); n++) { - auto argument = dynamic_cast(it->result(n)->origin()); + auto argument = + dynamic_cast(exitvar.branchResult[n]->origin()); if (!argument && argument->input() != input) break; } - if (n == it->nresults()) + if (n == exitvar.branchResult.size()) { - it->divert_users(argument->input()->origin()); + exitvar.output->divert_users(argument->input()->origin()); was_normalized = false; } } @@ -69,7 +70,7 @@ perform_invariant_reduction(GammaNode * gamma) return was_normalized; } -static std::unordered_set +static std::unordered_set is_control_constant_reducible(GammaNode * gamma) { /* check gamma predicate */ @@ -87,16 +88,16 @@ is_control_constant_reducible(GammaNode * gamma) return {}; /* check for constants */ - std::unordered_set outputs; - for (auto it = gamma->begin_exitvar(); it != gamma->end_exitvar(); it++) + std::unordered_set outputs; + for (const auto & exitvar : gamma->GetExitVars()) { - if (!is_ctltype(it->type())) + if (!is_ctltype(exitvar.output->type())) continue; size_t n; - for (n = 0; n < it->nresults(); n++) + for (n = 0; n < exitvar.branchResult.size(); n++) { - auto node = output::GetNode(*it->result(n)->origin()); + auto node = output::GetNode(*exitvar.branchResult[n]->origin()); if (!is(node)) break; @@ -104,18 +105,18 @@ is_control_constant_reducible(GammaNode * gamma) if (op->value().nalternatives() != 2) break; } - if (n == it->nresults()) - outputs.insert(it.output()); + if (n == exitvar.branchResult.size()) + outputs.insert(exitvar.output); } return outputs; } static void -perform_control_constant_reduction(std::unordered_set & outputs) +perform_control_constant_reduction(std::unordered_set & outputs) { - auto gamma = static_cast((*outputs.begin())->node()); - auto origin = static_cast(gamma->predicate()->origin()); + auto & gamma = rvsdg::AssertGetOwnerNode(**outputs.begin()); + auto origin = static_cast(gamma.predicate()->origin()); auto match = origin->node(); auto & match_op = to_match_op(match->GetOperation()); @@ -123,17 +124,17 @@ perform_control_constant_reduction(std::unordered_set & outp for (const auto & pair : match_op) map[pair.second] = pair.first; - for (auto xv = gamma->begin_exitvar(); xv != gamma->end_exitvar(); xv++) + for (const auto & xv : gamma.GetExitVars()) { - if (outputs.find(xv.output()) == outputs.end()) + if (outputs.find(xv.output) == outputs.end()) continue; size_t defalt = 0; size_t nalternatives = 0; std::unordered_map new_mapping; - for (size_t n = 0; n < xv->nresults(); n++) + for (size_t n = 0; n < xv.branchResult.size(); n++) { - auto origin = static_cast(xv->result(n)->origin()); + auto origin = static_cast(xv.branchResult[n]->origin()); auto & value = to_ctlconstant_op(origin->node()->GetOperation()).value(); nalternatives = value.nalternatives(); if (map.find(n) != map.end()) @@ -144,7 +145,7 @@ perform_control_constant_reduction(std::unordered_set & outp auto origin = match->input(0)->origin(); auto m = jlm::rvsdg::match(match_op.nbits(), new_mapping, defalt, nalternatives, origin); - xv->divert_users(m); + xv.output->divert_users(m); } } @@ -265,79 +266,132 @@ GammaOperation::operator==(const operation & other) const noexcept return op && op->nalternatives_ == nalternatives_; } -/* gamma input */ - -GammaInput::~GammaInput() noexcept = default; +/* gamma node */ -/* gamma output */ +GammaNode::~GammaNode() noexcept = default; -GammaOutput::~GammaOutput() noexcept = default; +GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) + : StructuralNode(GammaOperation(nalternatives), predicate->region(), nalternatives) +{ + node::add_input(std::unique_ptr( + new rvsdg::StructuralInput(this, predicate, ControlType::Create(nalternatives)))); +} -bool -GammaOutput::IsInvariant(rvsdg::output ** invariantOrigin) const noexcept +GammaNode::EntryVar +GammaNode::AddEntryVar(rvsdg::output * origin) { - auto argument = dynamic_cast(result(0)->origin()); - if (!argument) + auto gammaInput = new StructuralInput(this, origin, origin->Type()); + node::add_input(std::unique_ptr(gammaInput)); + + EntryVar ev; + ev.input = gammaInput; + + for (size_t n = 0; n < nsubregions(); n++) { - return false; + ev.branchArgument.push_back( + &RegionArgument::Create(*subregion(n), gammaInput, gammaInput->Type())); } - size_t n; - auto origin = argument->input()->origin(); - for (n = 1; n < nresults(); n++) + return ev; +} + +GammaNode::EntryVar +GammaNode::GetEntryVar(std::size_t index) const +{ + JLM_ASSERT(index <= ninputs() - 1); + EntryVar ev; + ev.input = input(index + 1); + for (size_t n = 0; n < nsubregions(); ++n) { - argument = dynamic_cast(result(n)->origin()); - if (argument == nullptr || argument->input()->origin() != origin) - break; + ev.branchArgument.push_back(subregion(n)->argument(index)); } + return ev; +} - auto isInvariant = (n == nresults()); - if (isInvariant && invariantOrigin != nullptr) +std::vector +GammaNode::GetEntryVars() const +{ + std::vector vars; + for (size_t n = 0; n < ninputs() - 1; ++n) { - *invariantOrigin = origin; + vars.push_back(GetEntryVar(n)); } - - return isInvariant; + return vars; } -/* gamma node */ +GammaNode::EntryVar +GammaNode::MapInputEntryVar(const rvsdg::input & input) const +{ + JLM_ASSERT(rvsdg::TryGetOwnerNode(input) == this); + JLM_ASSERT(input.index() != 0); + return GetEntryVar(input.index() - 1); +} -GammaNode::~GammaNode() noexcept = default; +GammaNode::EntryVar +GammaNode::MapBranchArgumentEntryVar(const rvsdg::output & output) const +{ + JLM_ASSERT(rvsdg::TryGetRegionParentNode(output) == this); + return GetEntryVar(output.index()); +} -const GammaNode::entryvar_iterator & -GammaNode::entryvar_iterator::operator++() noexcept +GammaNode::ExitVar +GammaNode::AddExitVar(std::vector values) { - if (input_ == nullptr) - return *this; + if (values.size() != nsubregions()) + throw jlm::util::error("Incorrect number of values."); - auto node = input_->node(); - auto index = input_->index(); - if (index == node->ninputs() - 1) + const auto & type = values[0]->Type(); + auto output = static_cast( + node::add_output(std::make_unique(this, type))); + + std::vector branchResults; + for (size_t n = 0; n < nsubregions(); n++) { - input_ = nullptr; - return *this; + branchResults.push_back( + &rvsdg::RegionResult::Create(*subregion(n), *values[n], output, output->Type())); } - input_ = static_cast(node->input(++index)); - return *this; + return ExitVar{ std::move(branchResults), std::move(output) }; } -const GammaNode::exitvar_iterator & -GammaNode::exitvar_iterator::operator++() noexcept +std::vector +GammaNode::GetExitVars() const { - if (output_ == nullptr) - return *this; + std::vector vars; + for (size_t n = 0; n < noutputs(); ++n) + { + std::vector branchResults; + for (size_t k = 0; k < nsubregions(); ++k) + { + branchResults.push_back(subregion(k)->result(n)); + } + vars.push_back(ExitVar{ std::move(branchResults), output(n) }); + } + return vars; +} - auto node = output_->node(); - auto index = output_->index(); - if (index == node->nexitvars() - 1) +GammaNode::ExitVar +GammaNode::MapOutputExitVar(const rvsdg::output & output) const +{ + JLM_ASSERT(TryGetOwnerNode(output) == this); + std::vector branchResults; + for (size_t k = 0; k < nsubregions(); ++k) { - output_ = nullptr; - return *this; + branchResults.push_back(subregion(k)->result(output.index())); } + return ExitVar{ std::move(branchResults), node::output(output.index()) }; +} - output_ = node->exitvar(++index); - return *this; +GammaNode::ExitVar +GammaNode::MapBranchResultExitVar(const rvsdg::input & input) const +{ + JLM_ASSERT(TryGetRegionParentNode(input) == this); + std::vector branchResults; + for (size_t k = 0; k < nsubregions(); ++k) + { + branchResults.push_back(subregion(k)->result(input.index())); + } + return ExitVar{ std::move(branchResults), node::output(input.index()) }; } GammaNode * @@ -347,11 +401,11 @@ GammaNode::copy(rvsdg::Region * region, SubstitutionMap & smap) const /* add entry variables to new gamma */ std::vector rmap(nsubregions()); - for (auto oev = begin_entryvar(); oev != end_entryvar(); oev++) + for (const auto & oev : GetEntryVars()) { - auto nev = gamma->add_entryvar(smap.lookup(oev->origin())); - for (size_t n = 0; n < nev->narguments(); n++) - rmap[n].insert(oev->argument(n), nev->argument(n)); + auto nev = gamma->AddEntryVar(smap.lookup(oev.input->origin())); + for (size_t n = 0; n < nsubregions(); n++) + rmap[n].insert(oev.branchArgument[n], nev.branchArgument[n]); } /* copy subregions */ @@ -359,34 +413,52 @@ GammaNode::copy(rvsdg::Region * region, SubstitutionMap & smap) const subregion(r)->copy(gamma->subregion(r), rmap[r], false, false); /* add exit variables to new gamma */ - for (auto oex = begin_exitvar(); oex != end_exitvar(); oex++) + for (const auto & oex : GetExitVars()) { std::vector operands; - for (size_t n = 0; n < oex->nresults(); n++) - operands.push_back(rmap[n].lookup(oex->result(n)->origin())); - auto nex = gamma->add_exitvar(operands); - smap.insert(oex.output(), nex); + for (size_t n = 0; n < oex.branchResult.size(); n++) + operands.push_back(rmap[n].lookup(oex.branchResult[n]->origin())); + auto nex = gamma->AddExitVar(std::move(operands)); + smap.insert(oex.output, nex.output); } return gamma; } -GammaArgument::~GammaArgument() noexcept = default; - -GammaArgument & -GammaArgument::Copy(rvsdg::Region & region, StructuralInput * input) +std::optional +GetGammaInvariantOrigin(const GammaNode & gamma, const GammaNode::ExitVar & exitvar) { - auto gammaInput = util::AssertedCast(input); - return Create(region, *gammaInput); -} + // For any region result, check if it directly maps to a + // gamma entry variable, and returns the origin of its + // corresponding value (the def site preceding the gamma node). + auto GetExternalOriginOf = [&gamma](rvsdg::input * use) -> std::optional + { + // Test whether origin of this is a region entry argument of + // this gamma node. + auto def = use->origin(); + if (rvsdg::TryGetRegionParentNode(*def) != &gamma) + { + return std::nullopt; + } + return gamma.MapBranchArgumentEntryVar(*def).input->origin(); + }; -GammaResult::~GammaResult() noexcept = default; + auto firstOrigin = GetExternalOriginOf(exitvar.branchResult[0]); + if (!firstOrigin) + { + return std::nullopt; + } -GammaResult & -GammaResult::Copy(rvsdg::output & origin, StructuralOutput * output) -{ - auto gammaOutput = util::AssertedCast(output); - return GammaResult::Create(*origin.region(), origin, *gammaOutput); + for (size_t n = 1; n < exitvar.branchResult.size(); ++n) + { + auto currentOrigin = GetExternalOriginOf(exitvar.branchResult[n]); + if (!currentOrigin || *firstOrigin != *currentOrigin) + { + return std::nullopt; + } + } + + return firstOrigin; } } diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index c77157b66..d842f10f4 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -7,6 +7,8 @@ #ifndef JLM_RVSDG_GAMMA_HPP #define JLM_RVSDG_GAMMA_HPP +#include + #include #include #include @@ -106,9 +108,6 @@ class GammaOperation final : public StructuralOperation /* gamma node */ -class GammaInput; -class GammaOutput; - class GammaNode : public StructuralNode { public: @@ -117,180 +116,184 @@ class GammaNode : public StructuralNode private: GammaNode(rvsdg::output * predicate, size_t nalternatives); - class entryvar_iterator - { - public: - constexpr entryvar_iterator(GammaInput * input) noexcept - : input_(input) - {} - - GammaInput * - input() const noexcept - { - return input_; - } - - const entryvar_iterator & - operator++() noexcept; - - inline const entryvar_iterator - operator++(int) noexcept - { - entryvar_iterator it(*this); - ++(*this); - return it; - } - - inline bool - operator==(const entryvar_iterator & other) const noexcept - { - return input_ == other.input_; - } - - inline bool - operator!=(const entryvar_iterator & other) const noexcept - { - return !(*this == other); - } - - GammaInput & - operator*() noexcept - { - return *input_; - } - - GammaInput * - operator->() noexcept - { - return input_; - } - - private: - GammaInput * input_; +public: + /** + * \brief A variable routed into all gamma regions. + */ + struct EntryVar + { + /** + * \brief Variable at entry point (input to gamma node). + */ + rvsdg::input * input; + /** + * \brief Variable inside each of the branch regions (argument per subregion). + */ + std::vector branchArgument; }; - class exitvar_iterator - { - public: - constexpr explicit exitvar_iterator(GammaOutput * output) noexcept - : output_(output) - {} - - [[nodiscard]] GammaOutput * - output() const noexcept - { - return output_; - } - - const exitvar_iterator & - operator++() noexcept; - - inline const exitvar_iterator - operator++(int) noexcept - { - exitvar_iterator it(*this); - ++(*this); - return it; - } - - inline bool - operator==(const exitvar_iterator & other) const noexcept - { - return output_ == other.output_; - } - - inline bool - operator!=(const exitvar_iterator & other) const noexcept - { - return !(*this == other); - } - - GammaOutput & - operator*() noexcept - { - return *output_; - } - - GammaOutput * - operator->() noexcept - { - return output_; - } - - private: - GammaOutput * output_; + /** + * \brief A variable routed out of all gamma regions as result. + */ + struct ExitVar + { + /** + * \brief Variable exit points (results per subregion). + */ + std::vector branchResult; + /** + * \brief Output of gamma. + */ + rvsdg::output * output; }; -public: static GammaNode * create(jlm::rvsdg::output * predicate, size_t nalternatives) { return new GammaNode(predicate, nalternatives); } - inline GammaInput * + inline rvsdg::input * predicate() const noexcept; - inline size_t - nentryvars() const noexcept - { - JLM_ASSERT(node::ninputs() != 0); - return node::ninputs() - 1; - } - - inline size_t - nexitvars() const noexcept - { - return node::noutputs(); - } - - inline GammaInput * - entryvar(size_t index) const noexcept; - - [[nodiscard]] inline GammaOutput * - exitvar(size_t index) const noexcept; + /** + * \brief Routes a variable into the gamma branches. + * + * \param origin + * Value to be routed in. + * + * \returns + * Description of entry variable. + * + * Routes a variable into a gamma region. To access the + * variable in each branch use \ref EntryVar::branchArgument. + */ + EntryVar + AddEntryVar(rvsdg::output * origin); - inline GammaNode::entryvar_iterator - begin_entryvar() const - { - if (nentryvars() == 0) - return entryvar_iterator(nullptr); + /** + * \brief Gets entry variable by index. + * + * \param index + * Index of entry variable + * + * \returns + * Description of entry variable. + * + * Looks up the \p index 'th entry variable into the gamma + * node and returns its description. + */ + EntryVar + GetEntryVar(std::size_t index) const; - return entryvar_iterator(entryvar(0)); - } + /** + * \brief Gets all entry variables for this gamma. + */ + std::vector + GetEntryVars() const; - inline GammaNode::entryvar_iterator - end_entryvar() const - { - return entryvar_iterator(nullptr); - } + /** + * \brief Maps gamma input to entry variable. + * + * \param input + * Input to be mapped. + * + * \returns + * The entry variable description corresponding to this input + * + * \pre + * \p input must be an input of this node and must not be the predicate + * + * Maps the gamma input to the entry variable description corresponding + * to it. This allows to trace the value through to users in the + * gamma subregions. + */ + EntryVar + MapInputEntryVar(const rvsdg::input & input) const; - inline GammaNode::exitvar_iterator - begin_exitvar() const - { - if (nexitvars() == 0) - return exitvar_iterator(nullptr); + /** + * \brief Maps branch subregion entry argument to gamma entry variable. + * + * \param output + * The branch argument to be mapped. + * + * \returns + * The entry variable description corresponding to this input + * + * \pre + * \p output must be the entry argument to a subregion of this gamma nade. + * + * Maps the subregion entry argument to the entry variable description + * corresponding to it. This allows to trace the value to users in other + * branches as well as its def site preceding the gamma node: + */ + EntryVar + MapBranchArgumentEntryVar(const rvsdg::output & output) const; - return exitvar_iterator(exitvar(0)); - } + /** + * \brief Routes per-branch result of gamma to output + * + * \param values + * Value to be routed out. + * + * \returns + * Description of exit variable. + * + * Routes per-branch values for a particular variable + * out of the gamma regions and makes it available as + * output of the gamma node. + */ + ExitVar + AddExitVar(std::vector values); - inline GammaNode::exitvar_iterator - end_exitvar() const - { - return exitvar_iterator(nullptr); - } + /** + * \brief Gets all exit variables for this gamma. + */ + std::vector + GetExitVars() const; - inline GammaInput * - add_entryvar(jlm::rvsdg::output * origin); + /** + * \brief Maps gamma output to exit variable description. + * + * \param output + * Output to be mapped. + * + * \returns + * The exit variable description corresponding to this output. + * + * \pre + * \p output must be an output of this node. + * + * Maps the gamma output to the exit variable description corresponding + * to it. This allows to trace the value through to users in the + * gamma subregions. + */ + ExitVar + MapOutputExitVar(const rvsdg::output & output) const; - inline GammaOutput * - add_exitvar(const std::vector & values); + /** + * \brief Maps gamma region exit result to exit variable description. + * + * \param input + * The result to be mapped to be mapped. + * + * \returns + * The exit variable description corresponding to this output. + * + * \pre + * \p input must be a result of a subregion of this node. + * + * Maps the gamma region result to the exit variable description + * corresponding to it. + */ + ExitVar + MapBranchResultExitVar(const rvsdg::input & input) const; /** * Removes all gamma outputs and their respective results. The outputs must have no users and * match the condition specified by \p match. * - * @tparam F A type that supports the function call operator: bool operator(const GammaOutput&) + * @tparam F A type that supports the function call operator: bool operator(const rvsdg::output&) * @param match Defines the condition of the elements to remove. */ template @@ -303,7 +306,7 @@ class GammaNode : public StructuralNode void PruneOutputs() { - auto match = [](const GammaOutput &) + auto match = [](const rvsdg::output &) { return true; }; @@ -315,251 +318,34 @@ class GammaNode : public StructuralNode copy(jlm::rvsdg::Region * region, SubstitutionMap & smap) const override; }; -/* gamma input */ - -class GammaInput final : public StructuralInput -{ - friend GammaNode; - -public: - ~GammaInput() noexcept override; - -private: - GammaInput(GammaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) - : StructuralInput(node, origin, std::move(type)) - {} - -public: - GammaNode * - node() const noexcept - { - return static_cast(StructuralInput::node()); - } - - inline argument_list::iterator - begin() - { - return arguments.begin(); - } - - inline argument_list::const_iterator - begin() const - { - return arguments.begin(); - } - - inline argument_list::iterator - end() - { - return arguments.end(); - } - - inline argument_list::const_iterator - end() const - { - return arguments.end(); - } - - inline size_t - narguments() const noexcept - { - return arguments.size(); - } - - [[nodiscard]] RegionArgument * - argument(size_t n) const noexcept - { - JLM_ASSERT(n < narguments()); - auto argument = node()->subregion(n)->argument(index() - 1); - JLM_ASSERT(argument->input() == this); - return argument; - } -}; - -/* gamma output */ - -class GammaOutput final : public StructuralOutput -{ - friend GammaNode; - -public: - ~GammaOutput() noexcept override; - - GammaOutput(GammaNode * node, std::shared_ptr type) - : StructuralOutput(node, std::move(type)) - {} - - GammaNode * - node() const noexcept - { - return static_cast(StructuralOutput::node()); - } - - inline result_list::iterator - begin() - { - return results.begin(); - } - - inline result_list::const_iterator - begin() const - { - return results.begin(); - } - - inline result_list::iterator - end() - { - return results.end(); - } - - inline result_list::const_iterator - end() const - { - return results.end(); - } - - inline size_t - nresults() const noexcept - { - return results.size(); - } - - [[nodiscard]] RegionResult * - result(size_t n) const noexcept - { - JLM_ASSERT(n < nresults()); - auto result = node()->subregion(n)->result(index()); - JLM_ASSERT(result->output() == this); - return result; - } - - /** - * Determines whether a gamma output is invariant. - * - * A gamma output is invariant if its value directly originates from gamma inputs and the origin - * of all these inputs is the same. - * - * @param invariantOrigin The origin of the gamma inputs if the gamma output is invariant and \p - * invariantOrigin is unequal NULL. - * @return True if the gamma output is invariant, otherwise false. - */ - bool - IsInvariant(rvsdg::output ** invariantOrigin = nullptr) const noexcept; -}; - -/* gamma node method definitions */ - -inline GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) - : StructuralNode(GammaOperation(nalternatives), predicate->region(), nalternatives) -{ - node::add_input(std::unique_ptr( - new GammaInput(this, predicate, ControlType::Create(nalternatives)))); -} - /** - * Represents a region argument in a gamma subregion. + * \brief Determines whether a gamma exit var is path-invariant. + * + * \param gamma + * The gamma node which we are testing for. + * + * \param exitvar + * Exit variable of the gamma node. + * + * \returns + * The common (invariant) origin of this output, or nullopt. + * + * \pre + * \p exitvar must be an \ref GammaNode::ExitVar of \p gamma + * + * Checks whether the gamma effectively assigns the same input value to + * this exit variable on all paths of the gamma. If this is the case, it + * returns the origin of the common input. */ -class GammaArgument final : public RegionArgument -{ - friend GammaNode; - -public: - ~GammaArgument() noexcept override; - - GammaArgument & - Copy(rvsdg::Region & region, StructuralInput * input) override; +std::optional +GetGammaInvariantOrigin(const GammaNode & gamma, const GammaNode::ExitVar & exitvar); -private: - GammaArgument(rvsdg::Region & region, GammaInput & input) - : RegionArgument(®ion, &input, input.Type()) - {} - - static GammaArgument & - Create(rvsdg::Region & region, GammaInput & input) - { - auto gammaArgument = new GammaArgument(region, input); - region.append_argument(gammaArgument); - return *gammaArgument; - } -}; - -/** - * Represents a region result in a gamma subregion. - */ -class GammaResult final : public RegionResult -{ - friend GammaNode; - -public: - ~GammaResult() noexcept override; - -private: - GammaResult(rvsdg::Region & region, rvsdg::output & origin, GammaOutput & gammaOutput) - : RegionResult(®ion, &origin, &gammaOutput, origin.Type()) - {} - - GammaResult & - Copy(rvsdg::output & origin, StructuralOutput * output) override; - - static GammaResult & - Create(rvsdg::Region & region, rvsdg::output & origin, GammaOutput & gammaOutput) - { - auto gammaResult = new GammaResult(region, origin, gammaOutput); - origin.region()->append_result(gammaResult); - return *gammaResult; - } -}; +/* gamma node method definitions */ -inline GammaInput * +inline rvsdg::input * GammaNode::predicate() const noexcept { - return util::AssertedCast(StructuralNode::input(0)); -} - -inline GammaInput * -GammaNode::entryvar(size_t index) const noexcept -{ - return util::AssertedCast(node::input(index + 1)); -} - -inline GammaOutput * -GammaNode::exitvar(size_t index) const noexcept -{ - return static_cast(node::output(index)); -} - -inline GammaInput * -GammaNode::add_entryvar(jlm::rvsdg::output * origin) -{ - auto input = - node::add_input(std::unique_ptr(new GammaInput(this, origin, origin->Type()))); - auto gammaInput = util::AssertedCast(input); - - for (size_t n = 0; n < nsubregions(); n++) - { - GammaArgument::Create(*subregion(n), *gammaInput); - } - - return gammaInput; -} - -inline GammaOutput * -GammaNode::add_exitvar(const std::vector & values) -{ - if (values.size() != nsubregions()) - throw jlm::util::error("Incorrect number of values."); - - const auto & type = values[0]->Type(); - node::add_output(std::make_unique(this, type)); - - auto output = exitvar(nexitvars() - 1); - for (size_t n = 0; n < nsubregions(); n++) - { - GammaResult::Create(*subregion(n), *values[n], *output); - } - - return output; + return StructuralNode::input(0); } template @@ -569,8 +355,7 @@ GammaNode::RemoveGammaOutputsWhere(const F & match) // iterate backwards to avoid the invalidation of 'n' by RemoveOutput() for (size_t n = noutputs() - 1; n != static_cast(-1); n--) { - auto & gammaOutput = *util::AssertedCast(output(n)); - if (gammaOutput.nusers() == 0 && match(gammaOutput)) + if (output(n)->nusers() == 0 && match(*output(n))) { for (size_t r = 0; r < nsubregions(); r++) { diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 094f39a88..1df50e903 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1336,20 +1336,21 @@ GammaTest::SetupRvsdg() auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, biteq); auto gammanode = jlm::rvsdg::GammaNode::create(predicate, 2); - auto p1ev = gammanode->add_entryvar(fct->GetFunctionArguments()[1]); - auto p2ev = gammanode->add_entryvar(fct->GetFunctionArguments()[2]); - auto p3ev = gammanode->add_entryvar(fct->GetFunctionArguments()[3]); - auto p4ev = gammanode->add_entryvar(fct->GetFunctionArguments()[4]); + auto p1ev = gammanode->AddEntryVar(fct->GetFunctionArguments()[1]); + auto p2ev = gammanode->AddEntryVar(fct->GetFunctionArguments()[2]); + auto p3ev = gammanode->AddEntryVar(fct->GetFunctionArguments()[3]); + auto p4ev = gammanode->AddEntryVar(fct->GetFunctionArguments()[4]); - auto tmp1 = gammanode->add_exitvar({ p1ev->argument(0), p3ev->argument(1) }); - auto tmp2 = gammanode->add_exitvar({ p2ev->argument(0), p4ev->argument(1) }); + auto tmp1 = gammanode->AddExitVar({ p1ev.branchArgument[0], p3ev.branchArgument[1] }); + auto tmp2 = gammanode->AddExitVar({ p2ev.branchArgument[0], p4ev.branchArgument[1] }); auto ld1 = LoadNonVolatileNode::Create( - tmp1, + tmp1.output, { fct->GetFunctionArguments()[5] }, jlm::rvsdg::bittype::Create(32), 4); - auto ld2 = LoadNonVolatileNode::Create(tmp2, { ld1[1] }, jlm::rvsdg::bittype::Create(32), 4); + auto ld2 = + LoadNonVolatileNode::Create(tmp2.output, { ld1[1] }, jlm::rvsdg::bittype::Create(32), 4); auto sum = jlm::rvsdg::bitadd_op::create(32, ld1[0], ld2[0]); fct->finalize({ sum, ld2[1] }); @@ -1386,39 +1387,39 @@ GammaTest2::SetupRvsdg() { auto gammaNode = rvsdg::GammaNode::create(predicate, 2); - auto gammaInputX = gammaNode->add_entryvar(xAddress); - auto gammaInputY = gammaNode->add_entryvar(yAddress); - auto gammaInputZ = gammaNode->add_entryvar(zAddress); - auto gammaInputMemoryState = gammaNode->add_entryvar(memoryState); + auto gammaInputX = gammaNode->AddEntryVar(xAddress); + auto gammaInputY = gammaNode->AddEntryVar(yAddress); + auto gammaInputZ = gammaNode->AddEntryVar(zAddress); + auto gammaInputMemoryState = gammaNode->AddEntryVar(memoryState); // gamma subregion 0 auto loadXResults = LoadNonVolatileNode::Create( - gammaInputX->argument(0), - { gammaInputMemoryState->argument(0) }, + gammaInputX.branchArgument[0], + { gammaInputMemoryState.branchArgument[0] }, jlm::rvsdg::bittype::Create(32), 4); auto one = rvsdg::create_bitconstant(gammaNode->subregion(0), 32, 1); auto storeZRegion0Results = - StoreNonVolatileNode::Create(gammaInputZ->argument(0), one, { loadXResults[1] }, 4); + StoreNonVolatileNode::Create(gammaInputZ.branchArgument[0], one, { loadXResults[1] }, 4); // gamma subregion 1 auto loadYResults = LoadNonVolatileNode::Create( - gammaInputY->argument(1), - { gammaInputMemoryState->argument(1) }, + gammaInputY.branchArgument[1], + { gammaInputMemoryState.branchArgument[1] }, jlm::rvsdg::bittype::Create(32), 4); auto two = rvsdg::create_bitconstant(gammaNode->subregion(1), 32, 2); auto storeZRegion1Results = - StoreNonVolatileNode::Create(gammaInputZ->argument(1), two, { loadYResults[1] }, 4); + StoreNonVolatileNode::Create(gammaInputZ.branchArgument[1], two, { loadYResults[1] }, 4); // finalize gamma - auto gammaOutputA = gammaNode->add_exitvar({ loadXResults[0], loadYResults[0] }); + auto gammaOutputA = gammaNode->AddExitVar({ loadXResults[0], loadYResults[0] }); auto gammaOutputMemoryState = - gammaNode->add_exitvar({ storeZRegion0Results[0], storeZRegion1Results[0] }); + gammaNode->AddExitVar({ storeZRegion0Results[0], storeZRegion1Results[0] }); - return std::make_tuple(gammaOutputA, gammaOutputMemoryState); + return std::make_tuple(gammaOutputA.output, gammaOutputMemoryState.output); }; auto iOStateType = iostatetype::Create(); @@ -1469,7 +1470,7 @@ GammaTest2::SetupRvsdg() return std::make_tuple( lambda->output(), - gammaOutputA->node(), + &rvsdg::AssertGetOwnerNode(*gammaOutputA), rvsdg::output::GetNode(*allocaZResults[0])); }; @@ -2056,32 +2057,35 @@ PhiTest1::SetupRvsdg() auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, bitult); auto gammaNode = jlm::rvsdg::GammaNode::create(predicate, 2); - auto nev = gammaNode->add_entryvar(valueArgument); - auto resultev = gammaNode->add_entryvar(pointerArgument); - auto fibev = gammaNode->add_entryvar(ctxVarFib); - auto gIIoState = gammaNode->add_entryvar(iOStateArgument); - auto gIMemoryState = gammaNode->add_entryvar(memoryStateArgument); + auto nev = gammaNode->AddEntryVar(valueArgument); + auto resultev = gammaNode->AddEntryVar(pointerArgument); + auto fibev = gammaNode->AddEntryVar(ctxVarFib); + auto gIIoState = gammaNode->AddEntryVar(iOStateArgument); + auto gIMemoryState = gammaNode->AddEntryVar(memoryStateArgument); /* gamma subregion 0 */ auto one = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 64, 1); - auto nm1 = jlm::rvsdg::bitsub_op::create(64, nev->argument(0), one); + auto nm1 = jlm::rvsdg::bitsub_op::create(64, nev.branchArgument[0], one); auto & callFibm1 = CallNode::CreateNode( - fibev->argument(0), + fibev.branchArgument[0], fibFunctionType, - { nm1, resultev->argument(0), gIIoState->argument(0), gIMemoryState->argument(0) }); + { nm1, + resultev.branchArgument[0], + gIIoState.branchArgument[0], + gIMemoryState.branchArgument[0] }); two = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 64, 2); - auto nm2 = jlm::rvsdg::bitsub_op::create(64, nev->argument(0), two); + auto nm2 = jlm::rvsdg::bitsub_op::create(64, nev.branchArgument[0], two); auto & callFibm2 = CallNode::CreateNode( - fibev->argument(0), + fibev.branchArgument[0], fibFunctionType, { nm2, - resultev->argument(0), + resultev.branchArgument[0], callFibm1.GetIoStateOutput(), callFibm1.GetMemoryStateOutput() }); auto gepnm1 = GetElementPtrOperation::Create( - resultev->argument(0), + resultev.branchArgument[0], { nm1 }, jlm::rvsdg::bittype::Create(64), pbit64); @@ -2092,7 +2096,7 @@ PhiTest1::SetupRvsdg() 8); auto gepnm2 = GetElementPtrOperation::Create( - resultev->argument(0), + resultev.branchArgument[0], { nm2 }, jlm::rvsdg::bittype::Create(64), pbit64); @@ -2104,19 +2108,19 @@ PhiTest1::SetupRvsdg() /* gamma subregion 1 */ /* Nothing needs to be done */ - auto sumex = gammaNode->add_exitvar({ sum, nev->argument(1) }); + auto sumex = gammaNode->AddExitVar({ sum, nev.branchArgument[1] }); auto gOIoState = - gammaNode->add_exitvar({ callFibm2.GetIoStateOutput(), gIIoState->argument(1) }); - auto gOMemoryState = gammaNode->add_exitvar({ ldnm2[1], gIMemoryState->argument(1) }); + gammaNode->AddExitVar({ callFibm2.GetIoStateOutput(), gIIoState.branchArgument[1] }); + auto gOMemoryState = gammaNode->AddExitVar({ ldnm2[1], gIMemoryState.branchArgument[1] }); auto gepn = GetElementPtrOperation::Create( pointerArgument, { valueArgument }, jlm::rvsdg::bittype::Create(64), pbit64); - auto store = StoreNonVolatileNode::Create(gepn, sumex, { gOMemoryState }, 8); + auto store = StoreNonVolatileNode::Create(gepn, sumex.output, { gOMemoryState.output }, 8); - auto lambdaOutput = lambda->finalize({ gOIoState, store[0] }); + auto lambdaOutput = lambda->finalize({ gOIoState.output, store[0] }); fibrv->result()->divert_to(lambdaOutput); auto phiNode = pb.end(); @@ -3919,21 +3923,24 @@ VariadicFunctionTest2::SetupRvsdg() auto matchResult = rvsdg::match_op::Create(*icmpResult, { { 1, 1 } }, 0, 2); auto gammaNode = rvsdg::GammaNode::create(matchResult, 2); - auto gammaVaAddress = gammaNode->add_entryvar(allocaResults[0]); - auto gammaLoadResult = gammaNode->add_entryvar(loadResults[0]); - auto gammaMemoryState = gammaNode->add_entryvar(loadResults[1]); + auto gammaVaAddress = gammaNode->AddEntryVar(allocaResults[0]); + auto gammaLoadResult = gammaNode->AddEntryVar(loadResults[0]); + auto gammaMemoryState = gammaNode->AddEntryVar(loadResults[1]); // gamma subregion 0 auto zero = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 64, 0); auto two = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 32, 2); auto eight = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 64, 8); auto gepResult1 = GetElementPtrOperation::Create( - gammaVaAddress->argument(0), + gammaVaAddress.branchArgument[0], { zero, two }, structType, pointerType); - auto loadResultsGamma0 = - LoadNonVolatileNode::Create(gepResult1, { gammaMemoryState->argument(0) }, pointerType, 8); + auto loadResultsGamma0 = LoadNonVolatileNode::Create( + gepResult1, + { gammaMemoryState.branchArgument[0] }, + pointerType, + 8); auto gepResult2 = GetElementPtrOperation::Create( loadResultsGamma0[0], { eight }, @@ -3947,32 +3954,36 @@ VariadicFunctionTest2::SetupRvsdg() auto eightBit32 = jlm::rvsdg::create_bitconstant(gammaNode->subregion(1), 32, 8); auto three = jlm::rvsdg::create_bitconstant(gammaNode->subregion(1), 32, 3); gepResult1 = GetElementPtrOperation::Create( - gammaVaAddress->argument(1), + gammaVaAddress.branchArgument[1], { zero, three }, structType, pointerType); - auto loadResultsGamma1 = - LoadNonVolatileNode::Create(gepResult1, { gammaMemoryState->argument(1) }, pointerType, 16); - auto & zextResult = zext_op::Create(*gammaLoadResult->argument(1), rvsdg::bittype::Create(64)); + auto loadResultsGamma1 = LoadNonVolatileNode::Create( + gepResult1, + { gammaMemoryState.branchArgument[1] }, + pointerType, + 16); + auto & zextResult = + zext_op::Create(*gammaLoadResult.branchArgument[1], rvsdg::bittype::Create(64)); gepResult2 = GetElementPtrOperation::Create( loadResultsGamma1[0], { &zextResult }, rvsdg::bittype::Create(8), pointerType); - auto addResult = rvsdg::bitadd_op::create(32, gammaLoadResult->argument(1), eightBit32); + auto addResult = rvsdg::bitadd_op::create(32, gammaLoadResult.branchArgument[1], eightBit32); auto storeResultsGamma1 = StoreNonVolatileNode::Create( - gammaVaAddress->argument(1), + gammaVaAddress.branchArgument[1], addResult, { loadResultsGamma1[1] }, 16); - auto gammaAddress = gammaNode->add_exitvar({ loadResultsGamma0[0], gepResult2 }); + auto gammaAddress = gammaNode->AddExitVar({ loadResultsGamma0[0], gepResult2 }); auto gammaOutputMemoryState = - gammaNode->add_exitvar({ storeResultsGamma0[0], storeResultsGamma1[0] }); + gammaNode->AddExitVar({ storeResultsGamma0[0], storeResultsGamma1[0] }); loadResults = LoadNonVolatileNode::Create( - gammaAddress, - { gammaOutputMemoryState }, + gammaAddress.output, + { gammaOutputMemoryState.output }, rvsdg::bittype::Create(32), 4); auto & callVaEnd = CallNode::CreateNode( diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index 6207a6969..8b319291e 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -30,11 +30,11 @@ TestWithMatch() auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->GetFunctionArguments()[0]); auto gamma = jlm::rvsdg::GammaNode::create(match, 2); - auto ev1 = gamma->add_entryvar(lambda->GetFunctionArguments()[1]); - auto ev2 = gamma->add_entryvar(lambda->GetFunctionArguments()[2]); - auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); + auto ev1 = gamma->AddEntryVar(lambda->GetFunctionArguments()[1]); + auto ev2 = gamma->AddEntryVar(lambda->GetFunctionArguments()[2]); + auto ex = gamma->AddExitVar({ ev1.branchArgument[0], ev2.branchArgument[1] }); - auto f = lambda->finalize({ ex }); + auto f = lambda->finalize({ ex.output }); jlm::llvm::GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); @@ -66,11 +66,11 @@ TestWithoutMatch() auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); auto gamma = jlm::rvsdg::GammaNode::create(lambda->GetFunctionArguments()[0], 2); - auto ev1 = gamma->add_entryvar(lambda->GetFunctionArguments()[1]); - auto ev2 = gamma->add_entryvar(lambda->GetFunctionArguments()[2]); - auto ex = gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); + auto ev1 = gamma->AddEntryVar(lambda->GetFunctionArguments()[1]); + auto ev2 = gamma->AddEntryVar(lambda->GetFunctionArguments()[2]); + auto ex = gamma->AddExitVar({ ev1.branchArgument[0], ev2.branchArgument[1] }); - auto f = lambda->finalize({ ex }); + auto f = lambda->finalize({ ex.output }); jlm::llvm::GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 86c9a923f..7fa36f34e 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -32,30 +32,29 @@ TestGamma() auto gammaNode = jlm::rvsdg::GammaNode::create(p, 2); - auto gammaInput1 = gammaNode->add_entryvar(x); - auto gammaInput2 = gammaNode->add_entryvar(y); - auto gammaInput3 = gammaNode->add_entryvar(z); - auto gammaInput4 = gammaNode->add_entryvar(x); - auto gammaInput5 = gammaNode->add_entryvar(x); - auto gammaInput6 = gammaNode->add_entryvar(x); - auto gammaInput7 = gammaNode->add_entryvar(x); - - auto gammaOutput1 = - gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput1->argument(1) }); + auto gammaInput1 = gammaNode->AddEntryVar(x); + auto gammaInput2 = gammaNode->AddEntryVar(y); + auto gammaInput3 = gammaNode->AddEntryVar(z); + auto gammaInput4 = gammaNode->AddEntryVar(x); + auto gammaInput5 = gammaNode->AddEntryVar(x); + auto gammaInput6 = gammaNode->AddEntryVar(x); + auto gammaInput7 = gammaNode->AddEntryVar(x); + + auto gammaOutput1 = gammaNode->AddExitVar(gammaInput1.branchArgument); auto gammaOutput2 = - gammaNode->add_exitvar({ gammaInput2->argument(0), gammaInput3->argument(1) }); + gammaNode->AddExitVar({ gammaInput2.branchArgument[0], gammaInput3.branchArgument[1] }); auto gammaOutput3 = - gammaNode->add_exitvar({ gammaInput4->argument(0), gammaInput5->argument(1) }); + gammaNode->AddExitVar({ gammaInput4.branchArgument[0], gammaInput5.branchArgument[1] }); auto gammaOutput4 = - gammaNode->add_exitvar({ gammaInput6->argument(0), gammaInput6->argument(1) }); + gammaNode->AddExitVar({ gammaInput6.branchArgument[0], gammaInput6.branchArgument[1] }); auto gammaOutput5 = - gammaNode->add_exitvar({ gammaInput6->argument(0), gammaInput7->argument(1) }); + gammaNode->AddExitVar({ gammaInput6.branchArgument[0], gammaInput7.branchArgument[1] }); - GraphExport::Create(*gammaOutput1, ""); - GraphExport::Create(*gammaOutput2, ""); - GraphExport::Create(*gammaOutput3, ""); - GraphExport::Create(*gammaOutput4, ""); - GraphExport::Create(*gammaOutput5, ""); + GraphExport::Create(*gammaOutput1.output, ""); + GraphExport::Create(*gammaOutput2.output, ""); + GraphExport::Create(*gammaOutput3.output, ""); + GraphExport::Create(*gammaOutput4.output, ""); + GraphExport::Create(*gammaOutput5.output, ""); // Act jlm::hls::RemoveUnusedStates(*rvsdgModule); @@ -63,8 +62,8 @@ TestGamma() // Assert assert(gammaNode->ninputs() == 7); // gammaInput1 was removed assert(gammaNode->noutputs() == 4); // gammaOutput1 was removed - assert(gammaInput2->index() == 1); - assert(gammaOutput2->index() == 0); + assert(gammaInput2.input->index() == 1); + assert(gammaOutput2.output->index() == 0); // FIXME: The transformation is way too conservative here. The only input and output it removes // are gammaInput1 and gammaOutput1, respectively. However, it could also remove gammaOutput3, // gammaOutput4, and gammaOutput5 as they are all invariant. This in turn would also render some diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp index 68caecaab..80a862e38 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -40,11 +40,12 @@ GammaWithMatch() auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->GetFunctionArguments()[0]); auto gamma = jlm::rvsdg::GammaNode::create(match, 2); - auto gammaInput1 = gamma->add_entryvar(lambdaNode->GetFunctionArguments()[1]); - auto gammaInput2 = gamma->add_entryvar(lambdaNode->GetFunctionArguments()[2]); - auto gammaOutput = gamma->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); + auto gammaInput1 = gamma->AddEntryVar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput2 = gamma->AddEntryVar(lambdaNode->GetFunctionArguments()[2]); + auto gammaOutput = + gamma->AddExitVar({ gammaInput1.branchArgument[0], gammaInput2.branchArgument[1] }); - auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + auto lambdaOutput = lambdaNode->finalize({ gammaOutput.output }); jlm::llvm::GraphExport::Create(*lambdaOutput, ""); view(rvsdgModule.Rvsdg(), stdout); @@ -93,11 +94,12 @@ GammaWithoutMatch() linkage::external_linkage); auto gammaNode = jlm::rvsdg::GammaNode::create(lambdaNode->GetFunctionArguments()[0], 2); - auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[1]); - auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[2]); - auto gammaOutput = gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); + auto gammaInput1 = gammaNode->AddEntryVar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput2 = gammaNode->AddEntryVar(lambdaNode->GetFunctionArguments()[2]); + auto gammaOutput = + gammaNode->AddExitVar({ gammaInput1.branchArgument[0], gammaInput2.branchArgument[1] }); - auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + auto lambdaOutput = lambdaNode->finalize({ gammaOutput.output }); jlm::llvm::GraphExport::Create(*lambdaOutput, ""); jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); @@ -150,12 +152,13 @@ EmptyGammaWithThreeSubregions() jlm::rvsdg::match(32, { { 0, 0 }, { 1, 1 } }, 2, 3, lambdaNode->GetFunctionArguments()[0]); auto gammaNode = jlm::rvsdg::GammaNode::create(match, 3); - auto gammaInput1 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[1]); - auto gammaInput2 = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[2]); - auto gammaOutput = gammaNode->add_exitvar( - { gammaInput1->argument(0), gammaInput1->argument(1), gammaInput2->argument(2) }); + auto gammaInput1 = gammaNode->AddEntryVar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput2 = gammaNode->AddEntryVar(lambdaNode->GetFunctionArguments()[2]); + auto gammaOutput = gammaNode->AddExitVar({ gammaInput1.branchArgument[0], + gammaInput1.branchArgument[1], + gammaInput2.branchArgument[2] }); - auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + auto lambdaOutput = lambdaNode->finalize({ gammaOutput.output }); jlm::llvm::GraphExport::Create(*lambdaOutput, ""); jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); @@ -201,14 +204,14 @@ PartialEmptyGamma() auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambdaNode->GetFunctionArguments()[0]); auto gammaNode = jlm::rvsdg::GammaNode::create(match, 2); - auto gammaInput = gammaNode->add_entryvar(lambdaNode->GetFunctionArguments()[1]); + auto gammaInput = gammaNode->AddEntryVar(lambdaNode->GetFunctionArguments()[1]); auto output = jlm::tests::create_testop( gammaNode->subregion(1), - { gammaInput->argument(1) }, + { gammaInput.branchArgument[1] }, { valueType })[0]; - auto gammaOutput = gammaNode->add_exitvar({ gammaInput->argument(0), output }); + auto gammaOutput = gammaNode->AddExitVar({ gammaInput.branchArgument[0], output }); - auto lambdaOutput = lambdaNode->finalize({ gammaOutput }); + auto lambdaOutput = lambdaNode->finalize({ gammaOutput.output }); jlm::llvm::GraphExport::Create(*lambdaOutput, ""); diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 571a5c5b6..b6aa84eb7 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -202,10 +202,10 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto predicate = jlm::rvsdg::control_false(innerTheta->subregion()); auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); - auto ev = gamma->add_entryvar(itf->argument()); - auto xv = gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); + auto ev = gamma->AddEntryVar(itf->argument()); + auto xv = gamma->AddExitVar(ev.branchArgument); - itf->result()->divert_to(xv); + itf->result()->divert_to(xv.output); otf->result()->divert_to(itf); return otf; @@ -407,29 +407,32 @@ TestCallTypeClassifierRecursiveDirectCall() auto predicate = jlm::rvsdg::match(1, { { 0, 1 } }, 0, 2, bitult); auto gammaNode = jlm::rvsdg::GammaNode::create(predicate, 2); - auto nev = gammaNode->add_entryvar(valueArgument); - auto resultev = gammaNode->add_entryvar(pointerArgument); - auto fibev = gammaNode->add_entryvar(ctxVarFib); - auto gIIoState = gammaNode->add_entryvar(iOStateArgument); - auto gIMemoryState = gammaNode->add_entryvar(memoryStateArgument); + auto nev = gammaNode->AddEntryVar(valueArgument); + auto resultev = gammaNode->AddEntryVar(pointerArgument); + auto fibev = gammaNode->AddEntryVar(ctxVarFib); + auto gIIoState = gammaNode->AddEntryVar(iOStateArgument); + auto gIMemoryState = gammaNode->AddEntryVar(memoryStateArgument); /* gamma subregion 0 */ auto one = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 64, 1); - auto nm1 = jlm::rvsdg::bitsub_op::create(64, nev->argument(0), one); + auto nm1 = jlm::rvsdg::bitsub_op::create(64, nev.branchArgument[0], one); auto callfibm1Results = CallNode::Create( - fibev->argument(0), + fibev.branchArgument[0], functionType, - { nm1, resultev->argument(0), gIIoState->argument(0), gIMemoryState->argument(0) }); + { nm1, + resultev.branchArgument[0], + gIIoState.branchArgument[0], + gIMemoryState.branchArgument[0] }); two = jlm::rvsdg::create_bitconstant(gammaNode->subregion(0), 64, 2); - auto nm2 = jlm::rvsdg::bitsub_op::create(64, nev->argument(0), two); + auto nm2 = jlm::rvsdg::bitsub_op::create(64, nev.branchArgument[0], two); auto callfibm2Results = CallNode::Create( - fibev->argument(0), + fibev.branchArgument[0], functionType, - { nm2, resultev->argument(0), callfibm1Results[0], callfibm1Results[1] }); + { nm2, resultev.branchArgument[0], callfibm1Results[0], callfibm1Results[1] }); auto gepnm1 = GetElementPtrOperation::Create( - resultev->argument(0), + resultev.branchArgument[0], { nm1 }, jlm::rvsdg::bittype::Create(64), pbit64); @@ -440,7 +443,7 @@ TestCallTypeClassifierRecursiveDirectCall() 8); auto gepnm2 = GetElementPtrOperation::Create( - resultev->argument(0), + resultev.branchArgument[0], { nm2 }, jlm::rvsdg::bittype::Create(64), pbit64); @@ -452,18 +455,18 @@ TestCallTypeClassifierRecursiveDirectCall() /* gamma subregion 1 */ /* Nothing needs to be done */ - auto sumex = gammaNode->add_exitvar({ sum, nev->argument(1) }); - auto gOIoState = gammaNode->add_exitvar({ callfibm2Results[0], gIIoState->argument(1) }); - auto gOMemoryState = gammaNode->add_exitvar({ ldnm2[1], gIMemoryState->argument(1) }); + auto sumex = gammaNode->AddExitVar({ sum, nev.branchArgument[1] }); + auto gOIoState = gammaNode->AddExitVar({ callfibm2Results[0], gIIoState.branchArgument[1] }); + auto gOMemoryState = gammaNode->AddExitVar({ ldnm2[1], gIMemoryState.branchArgument[1] }); auto gepn = GetElementPtrOperation::Create( pointerArgument, { valueArgument }, jlm::rvsdg::bittype::Create(64), pbit64); - auto store = StoreNonVolatileNode::Create(gepn, sumex, { gOMemoryState }, 8); + auto store = StoreNonVolatileNode::Create(gepn, sumex.output, { gOMemoryState.output }, 8); - auto lambdaOutput = lambda->finalize({ gOIoState, store[0] }); + auto lambdaOutput = lambda->finalize({ gOIoState.output, store[0] }); fibrv->result()->divert_to(lambdaOutput); pb.end(); diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index f6eae7852..6a6ec2212 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -51,18 +51,18 @@ TestGamma() auto y = lambdaNode->GetFunctionArguments()[2]; auto gammaNode1 = jlm::rvsdg::GammaNode::create(c, 2); - auto gammaInput1 = gammaNode1->add_entryvar(c); - auto gammaInput2 = gammaNode1->add_entryvar(x); - auto gammaInput3 = gammaNode1->add_entryvar(y); + auto gammaInput1 = gammaNode1->AddEntryVar(c); + auto gammaInput2 = gammaNode1->AddEntryVar(x); + auto gammaInput3 = gammaNode1->AddEntryVar(y); - auto gammaNode2 = jlm::rvsdg::GammaNode::create(gammaInput1->argument(0), 2); - auto gammaInput4 = gammaNode2->add_entryvar(gammaInput2->argument(0)); - auto gammaInput5 = gammaNode2->add_entryvar(gammaInput3->argument(0)); - gammaNode2->add_exitvar({ gammaInput4->argument(0), gammaInput4->argument(1) }); - gammaNode2->add_exitvar({ gammaInput5->argument(0), gammaInput5->argument(1) }); + auto gammaNode2 = jlm::rvsdg::GammaNode::create(gammaInput1.branchArgument[0], 2); + auto gammaInput4 = gammaNode2->AddEntryVar(gammaInput2.branchArgument[0]); + auto gammaInput5 = gammaNode2->AddEntryVar(gammaInput3.branchArgument[0]); + gammaNode2->AddExitVar(gammaInput4.branchArgument); + gammaNode2->AddExitVar(gammaInput5.branchArgument); - gammaNode1->add_exitvar({ gammaNode2->output(0), gammaInput2->argument(1) }); - gammaNode1->add_exitvar({ gammaNode2->output(1), gammaInput3->argument(1) }); + gammaNode1->AddExitVar({ gammaNode2->output(0), gammaInput2.branchArgument[1] }); + gammaNode1->AddExitVar({ gammaNode2->output(1), gammaInput3.branchArgument[1] }); auto lambdaOutput = lambdaNode->finalize({ gammaNode1->output(0), gammaNode1->output(1) }); @@ -163,21 +163,23 @@ TestCall() auto memoryStateArgument = lambdaNode->GetFunctionArguments()[4]; auto gammaNode = jlm::rvsdg::GammaNode::create(controlArgument, 2); - auto gammaInputX = gammaNode->add_entryvar(xArgument); - auto gammaInputY = gammaNode->add_entryvar(yArgument); - auto gammaInputIOState = gammaNode->add_entryvar(ioStateArgument); - auto gammaInputMemoryState = gammaNode->add_entryvar(memoryStateArgument); + auto gammaInputX = gammaNode->AddEntryVar(xArgument); + auto gammaInputY = gammaNode->AddEntryVar(yArgument); + auto gammaInputIOState = gammaNode->AddEntryVar(ioStateArgument); + auto gammaInputMemoryState = gammaNode->AddEntryVar(memoryStateArgument); auto gammaOutputX = - gammaNode->add_exitvar({ gammaInputY->argument(0), gammaInputY->argument(1) }); + gammaNode->AddExitVar({ gammaInputY.branchArgument[0], gammaInputY.branchArgument[1] }); auto gammaOutputY = - gammaNode->add_exitvar({ gammaInputX->argument(0), gammaInputX->argument(1) }); - auto gammaOutputIOState = - gammaNode->add_exitvar({ gammaInputIOState->argument(0), gammaInputIOState->argument(1) }); - auto gammaOutputMemoryState = gammaNode->add_exitvar( - { gammaInputMemoryState->argument(0), gammaInputMemoryState->argument(1) }); - - lambdaOutputTest1 = lambdaNode->finalize( - { gammaOutputX, gammaOutputY, gammaOutputIOState, gammaOutputMemoryState }); + gammaNode->AddExitVar({ gammaInputX.branchArgument[0], gammaInputX.branchArgument[1] }); + auto gammaOutputIOState = gammaNode->AddExitVar( + { gammaInputIOState.branchArgument[0], gammaInputIOState.branchArgument[1] }); + auto gammaOutputMemoryState = gammaNode->AddExitVar( + { gammaInputMemoryState.branchArgument[0], gammaInputMemoryState.branchArgument[1] }); + + lambdaOutputTest1 = lambdaNode->finalize({ gammaOutputX.output, + gammaOutputY.output, + gammaOutputIOState.output, + gammaOutputMemoryState.output }); } jlm::rvsdg::output * lambdaOutputTest2; @@ -253,23 +255,20 @@ TestCallWithMemoryStateNodes() auto gammaNode = jlm::rvsdg::GammaNode::create(controlArgument, 2); - auto gammaInputX = gammaNode->add_entryvar(xArgument); - auto gammaInputMemoryState1 = gammaNode->add_entryvar(lambdaEntrySplitResults[0]); - auto gammaInputMemoryState2 = gammaNode->add_entryvar(lambdaEntrySplitResults[1]); + auto gammaInputX = gammaNode->AddEntryVar(xArgument); + auto gammaInputMemoryState1 = gammaNode->AddEntryVar(lambdaEntrySplitResults[0]); + auto gammaInputMemoryState2 = gammaNode->AddEntryVar(lambdaEntrySplitResults[1]); - auto gammaOutputX = - gammaNode->add_exitvar({ gammaInputX->argument(0), gammaInputX->argument(1) }); - auto gammaOutputMemoryState1 = gammaNode->add_exitvar( - { gammaInputMemoryState2->argument(0), gammaInputMemoryState2->argument(1) }); - auto gammaOutputMemoryState2 = gammaNode->add_exitvar( - { gammaInputMemoryState1->argument(0), gammaInputMemoryState1->argument(1) }); + auto gammaOutputX = gammaNode->AddExitVar(gammaInputX.branchArgument); + auto gammaOutputMemoryState1 = gammaNode->AddExitVar(gammaInputMemoryState2.branchArgument); + auto gammaOutputMemoryState2 = gammaNode->AddExitVar(gammaInputMemoryState1.branchArgument); auto & lambdaExitMergeResult = LambdaExitMemoryStateMergeOperation::Create( *lambdaNode->subregion(), - { gammaOutputMemoryState1, gammaOutputMemoryState2 }); + { gammaOutputMemoryState1.output, gammaOutputMemoryState2.output }); lambdaOutputTest1 = - lambdaNode->finalize({ gammaOutputX, ioStateArgument, &lambdaExitMergeResult }); + lambdaNode->finalize({ gammaOutputX.output, ioStateArgument, &lambdaExitMergeResult }); } jlm::rvsdg::output * lambdaOutputTest2; diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 2e7062053..a217bae1e 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -60,15 +60,15 @@ TestGamma() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto gamma = jlm::rvsdg::GammaNode::create(c, 2); - auto ev1 = gamma->add_entryvar(x); - auto ev2 = gamma->add_entryvar(y); - auto ev3 = gamma->add_entryvar(x); + auto ev1 = gamma->AddEntryVar(x); + auto ev2 = gamma->AddEntryVar(y); + auto ev3 = gamma->AddEntryVar(x); - auto t = jlm::tests::create_testop(gamma->subregion(1), { ev2->argument(1) }, { vt })[0]; + auto t = jlm::tests::create_testop(gamma->subregion(1), { ev2.branchArgument[1] }, { vt })[0]; - gamma->add_exitvar({ ev1->argument(0), ev1->argument(1) }); - gamma->add_exitvar({ ev2->argument(0), t }); - gamma->add_exitvar({ ev3->argument(0), ev1->argument(1) }); + gamma->AddExitVar(ev1.branchArgument); + gamma->AddExitVar({ ev2.branchArgument[0], t }); + gamma->AddExitVar({ ev3.branchArgument[0], ev1.branchArgument[1] }); GraphExport::Create(*gamma->output(0), "z"); GraphExport::Create(*gamma->output(2), "w"); @@ -98,12 +98,12 @@ TestGamma2() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto gamma = jlm::rvsdg::GammaNode::create(c, 2); - gamma->add_entryvar(x); + gamma->AddEntryVar(x); auto n1 = jlm::tests::create_testop(gamma->subregion(0), {}, { vt })[0]; auto n2 = jlm::tests::create_testop(gamma->subregion(1), {}, { vt })[0]; - gamma->add_exitvar({ n1, n2 }); + gamma->AddExitVar({ n1, n2 }); GraphExport::Create(*gamma->output(0), "x"); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index 5957ea797..b448b2d9d 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -560,8 +560,9 @@ TestGamma() for (size_t n = 0; n < 4; n++) { - auto & argument0 = ptg->GetRegisterNode(*test.gamma->entryvar(n)->argument(0)); - auto & argument1 = ptg->GetRegisterNode(*test.gamma->entryvar(n)->argument(1)); + auto entryvar = test.gamma->GetEntryVar(n); + auto & argument0 = ptg->GetRegisterNode(*entryvar.branchArgument[0]); + auto & argument1 = ptg->GetRegisterNode(*entryvar.branchArgument[1]); assert(TargetsExactly(argument0, { &lambda, &ptg->GetExternalMemoryNode() })); assert(TargetsExactly(argument1, { &lambda, &ptg->GetExternalMemoryNode() })); @@ -569,7 +570,7 @@ TestGamma() for (size_t n = 0; n < 4; n++) { - auto & gammaOutput = ptg->GetRegisterNode(*test.gamma->exitvar(0)); + auto & gammaOutput = ptg->GetRegisterNode(*test.gamma->GetExitVars()[0].output); assert(TargetsExactly(gammaOutput, { &lambda, &ptg->GetExternalMemoryNode() })); } diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 1e28a5e0c..85c2162f1 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -1919,8 +1919,8 @@ ValidatePhiTestSteensgaardAgnostic(const jlm::tests::PhiTest1 & test) auto gammaStateIndex = store->input(2)->origin()->index(); - auto load1 = - jlm::rvsdg::output::GetNode(*test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); + auto load1 = jlm::rvsdg::output::GetNode( + *test.gamma->GetExitVars()[gammaStateIndex].branchResult[0]->origin()); assert(is(*load1, 2, 2)); auto load2 = jlm::rvsdg::output::GetNode(*load1->input(1)->origin()); @@ -1948,8 +1948,8 @@ ValidatePhiTestSteensgaardRegionAware(const jlm::tests::PhiTest1 & test) auto gammaStateIndex = store->input(2)->origin()->index(); - auto load1 = - jlm::rvsdg::output::GetNode(*test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); + auto load1 = jlm::rvsdg::output::GetNode( + *test.gamma->GetExitVars()[gammaStateIndex].branchResult[0]->origin()); assert(is(*load1, 2, 2)); auto load2 = jlm::rvsdg::output::GetNode(*load1->input(1)->origin()); @@ -1991,8 +1991,8 @@ ValidatePhiTestSteensgaardAgnosticTopDown(const jlm::tests::PhiTest1 & test) auto gammaStateIndex = storeNode->input(2)->origin()->index(); - auto load1 = - jlm::rvsdg::output::GetNode(*test.gamma->exitvar(gammaStateIndex)->result(0)->origin()); + auto load1 = jlm::rvsdg::output::GetNode( + *test.gamma->GetExitVars()[gammaStateIndex].branchResult[0]->origin()); assert(is(*load1, 2, 2)); auto load2 = jlm::rvsdg::output::GetNode(*load1->input(1)->origin()); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp index 58e447574..525e69b55 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp @@ -660,8 +660,9 @@ TestGamma() for (size_t n = 0; n < 4; n++) { - auto & argument0 = pointsToGraph.GetRegisterNode(*test.gamma->entryvar(n)->argument(0)); - auto & argument1 = pointsToGraph.GetRegisterNode(*test.gamma->entryvar(n)->argument(1)); + auto entryvar = test.gamma->GetEntryVar(n); + auto & argument0 = pointsToGraph.GetRegisterNode(*entryvar.branchArgument[0]); + auto & argument1 = pointsToGraph.GetRegisterNode(*entryvar.branchArgument[1]); assertTargets(argument0, { &lambda, &pointsToGraph.GetExternalMemoryNode() }); assertTargets(argument1, { &lambda, &pointsToGraph.GetExternalMemoryNode() }); @@ -669,7 +670,7 @@ TestGamma() for (size_t n = 0; n < 4; n++) { - auto & gammaOutput = pointsToGraph.GetRegisterNode(*test.gamma->exitvar(0)); + auto & gammaOutput = pointsToGraph.GetRegisterNode(*test.gamma->GetExitVars()[0].output); assertTargets(gammaOutput, { &lambda, &pointsToGraph.GetExternalMemoryNode() }); } diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 3995b8dae..a00c8e1d1 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -86,23 +86,23 @@ test_gamma() auto gamma = jlm::rvsdg::GammaNode::create(c, 2); - auto ev1 = gamma->add_entryvar(u1); - auto ev2 = gamma->add_entryvar(u2); - auto ev3 = gamma->add_entryvar(y); - auto ev4 = gamma->add_entryvar(z); - auto ev5 = gamma->add_entryvar(z); + auto ev1 = gamma->AddEntryVar(u1); + auto ev2 = gamma->AddEntryVar(u2); + auto ev3 = gamma->AddEntryVar(y); + auto ev4 = gamma->AddEntryVar(z); + auto ev5 = gamma->AddEntryVar(z); auto n1 = jlm::tests::create_testop(gamma->subregion(0), {}, { vt })[0]; auto n2 = jlm::tests::create_testop(gamma->subregion(0), {}, { vt })[0]; auto n3 = jlm::tests::create_testop(gamma->subregion(0), {}, { vt })[0]; - gamma->add_exitvar({ ev1->argument(0), ev2->argument(1) }); - gamma->add_exitvar({ ev2->argument(0), ev2->argument(1) }); - gamma->add_exitvar({ ev3->argument(0), ev3->argument(1) }); - gamma->add_exitvar({ n1, ev3->argument(1) }); - gamma->add_exitvar({ n2, ev3->argument(1) }); - gamma->add_exitvar({ n3, ev3->argument(1) }); - gamma->add_exitvar({ ev5->argument(0), ev4->argument(1) }); + gamma->AddExitVar({ ev1.branchArgument[0], ev1.branchArgument[1] }); + gamma->AddExitVar({ ev2.branchArgument[0], ev2.branchArgument[1] }); + gamma->AddExitVar({ ev3.branchArgument[0], ev3.branchArgument[1] }); + gamma->AddExitVar({ n1, ev3.branchArgument[1] }); + gamma->AddExitVar({ n2, ev3.branchArgument[1] }); + gamma->AddExitVar({ n3, ev3.branchArgument[1] }); + gamma->AddExitVar({ ev5.branchArgument[0], ev4.branchArgument[1] }); GraphExport::Create(*gamma->output(0), "x1"); GraphExport::Create(*gamma->output(1), "x2"); diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index ad1b4fa8d..d6c70d0dd 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -70,25 +70,27 @@ test1() auto memoryStateArgument = lambda->GetFunctionArguments()[3]; auto gamma = jlm::rvsdg::GammaNode::create(controlArgument, 2); - auto gammaInputF1 = gamma->add_entryvar(d); - auto gammaInputValue = gamma->add_entryvar(valueArgument); - auto gammaInputIoState = gamma->add_entryvar(iOStateArgument); - auto gammaInputMemoryState = gamma->add_entryvar(memoryStateArgument); + auto gammaInputF1 = gamma->AddEntryVar(d); + auto gammaInputValue = gamma->AddEntryVar(valueArgument); + auto gammaInputIoState = gamma->AddEntryVar(iOStateArgument); + auto gammaInputMemoryState = gamma->AddEntryVar(memoryStateArgument); auto callResults = CallNode::Create( - gammaInputF1->argument(0), + gammaInputF1.branchArgument[0], jlm::rvsdg::AssertGetOwnerNode(*f1).Type(), - { gammaInputValue->argument(0), - gammaInputIoState->argument(0), - gammaInputMemoryState->argument(0) }); + { gammaInputValue.branchArgument[0], + gammaInputIoState.branchArgument[0], + gammaInputMemoryState.branchArgument[0] }); - auto gammaOutputValue = gamma->add_exitvar({ callResults[0], gammaInputValue->argument(1) }); + auto gammaOutputValue = + gamma->AddExitVar({ callResults[0], gammaInputValue.branchArgument[1] }); auto gammaOutputIoState = - gamma->add_exitvar({ callResults[1], gammaInputIoState->argument(1) }); + gamma->AddExitVar({ callResults[1], gammaInputIoState.branchArgument[1] }); auto gammaOutputMemoryState = - gamma->add_exitvar({ callResults[2], gammaInputMemoryState->argument(1) }); + gamma->AddExitVar({ callResults[2], gammaInputMemoryState.branchArgument[1] }); - return lambda->finalize({ gammaOutputValue, gammaOutputIoState, gammaOutputMemoryState }); + return lambda->finalize( + { gammaOutputValue.output, gammaOutputIoState.output, gammaOutputMemoryState.output }); }; auto f1 = SetupF1(); diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index 2ef65955b..a2ac53058 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -43,21 +43,21 @@ test1() auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); - auto evx = gamma->add_entryvar(lvx->argument()); - auto evy = gamma->add_entryvar(lvy->argument()); + auto evx = gamma->AddEntryVar(lvx->argument()); + auto evy = gamma->AddEntryVar(lvy->argument()); auto b = jlm::tests::create_testop( gamma->subregion(0), - { evx->argument(0), evy->argument(0) }, + { evx.branchArgument[0], evy.branchArgument[0] }, { vt })[0]; auto c = jlm::tests::create_testop( gamma->subregion(1), - { evx->argument(1), evy->argument(1) }, + { evx.branchArgument[1], evy.branchArgument[1] }, { vt })[0]; - auto xvy = gamma->add_exitvar({ b, c }); + auto xvy = gamma->AddExitVar({ b, c }); - lvy->result()->divert_to(xvy); + lvy->result()->divert_to(xvy.output); theta->set_predicate(predicate); @@ -98,13 +98,13 @@ test2() auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); - auto ev1 = gamma->add_entryvar(n1); - auto ev2 = gamma->add_entryvar(lv1->argument()); - auto ev3 = gamma->add_entryvar(n2); + auto ev1 = gamma->AddEntryVar(n1); + auto ev2 = gamma->AddEntryVar(lv1->argument()); + auto ev3 = gamma->AddEntryVar(n2); - gamma->add_exitvar({ ev1->argument(0), ev1->argument(1) }); - gamma->add_exitvar({ ev2->argument(0), ev2->argument(1) }); - gamma->add_exitvar({ ev3->argument(0), ev3->argument(1) }); + gamma->AddExitVar(ev1.branchArgument); + gamma->AddExitVar(ev2.branchArgument); + gamma->AddExitVar(ev3.branchArgument); lv1->result()->divert_to(gamma->output(1)); diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index 84ef82b66..2de4c8f79 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -41,9 +41,9 @@ test_pullin_top() auto gamma = jlm::rvsdg::GammaNode::create(n4, 2); - gamma->add_entryvar(n4); - auto ev = gamma->add_entryvar(n5); - gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); + gamma->AddEntryVar(n4); + auto ev = gamma->AddEntryVar(n5); + gamma->AddExitVar(ev.branchArgument); GraphExport::Create(*gamma->output(0), "x"); GraphExport::Create(*n2, "y"); @@ -68,8 +68,8 @@ test_pullin_bottom() auto gamma = jlm::rvsdg::GammaNode::create(c, 2); - auto ev = gamma->add_entryvar(x); - gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); + auto ev = gamma->AddEntryVar(x); + gamma->AddExitVar(ev.branchArgument); auto b1 = jlm::tests::create_testop(graph.root(), { gamma->output(0), x }, { vt })[0]; auto b2 = jlm::tests::create_testop(graph.root(), { gamma->output(0), b1 }, { vt })[0]; @@ -99,21 +99,21 @@ test_pull() /* outer gamma */ auto gamma1 = jlm::rvsdg::GammaNode::create(p, 2); - auto ev1 = gamma1->add_entryvar(p); - auto ev2 = gamma1->add_entryvar(croot); + auto ev1 = gamma1->AddEntryVar(p); + auto ev2 = gamma1->AddEntryVar(croot); auto cg1 = jlm::tests::create_testop(gamma1->subregion(0), {}, { vt })[0]; /* inner gamma */ - auto gamma2 = jlm::rvsdg::GammaNode::create(ev1->argument(1), 2); - auto ev3 = gamma2->add_entryvar(ev2->argument(1)); + auto gamma2 = jlm::rvsdg::GammaNode::create(ev1.branchArgument[1], 2); + auto ev3 = gamma2->AddEntryVar(ev2.branchArgument[1]); auto cg2 = jlm::tests::create_testop(gamma2->subregion(0), {}, { vt })[0]; - auto un = jlm::tests::create_testop(gamma2->subregion(1), { ev3->argument(1) }, { vt })[0]; - auto g2xv = gamma2->add_exitvar({ cg2, un }); + auto un = jlm::tests::create_testop(gamma2->subregion(1), { ev3.branchArgument[1] }, { vt })[0]; + auto g2xv = gamma2->AddExitVar({ cg2, un }); - auto g1xv = gamma1->add_exitvar({ cg1, g2xv }); + auto g1xv = gamma1->AddExitVar({ cg1, g2xv.output }); - GraphExport::Create(*g1xv, ""); + GraphExport::Create(*g1xv.output, ""); jlm::rvsdg::view(graph, stdout); jlm::llvm::pullin pullin; diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index d67f4d543..2be811337 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -35,14 +35,16 @@ test_gamma() auto s = &jlm::tests::GraphImport::Create(graph, st, "s"); auto gamma = jlm::rvsdg::GammaNode::create(c, 2); - auto evx = gamma->add_entryvar(x); - auto evs = gamma->add_entryvar(s); + auto evx = gamma->AddEntryVar(x); + auto evs = gamma->AddEntryVar(s); auto null = jlm::tests::create_testop(gamma->subregion(0), {}, { vt })[0]; - auto bin = jlm::tests::create_testop(gamma->subregion(0), { null, evx->argument(0) }, { vt })[0]; - auto state = jlm::tests::create_testop(gamma->subregion(0), { bin, evs->argument(0) }, { st })[0]; + auto bin = + jlm::tests::create_testop(gamma->subregion(0), { null, evx.branchArgument[0] }, { vt })[0]; + auto state = + jlm::tests::create_testop(gamma->subregion(0), { bin, evs.branchArgument[0] }, { st })[0]; - gamma->add_exitvar({ state, evs->argument(1) }); + gamma->AddExitVar({ state, evs.branchArgument[1] }); GraphExport::Create(*gamma->output(0), "x"); diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 4a0e0e281..a5cc1507c 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -509,8 +509,8 @@ TestGamma() 3 // nalternatives ); - rvsdgGammaNode->add_entryvar(entryvar1); - rvsdgGammaNode->add_entryvar(entryvar2); + rvsdgGammaNode->AddEntryVar(entryvar1); + rvsdgGammaNode->AddEntryVar(entryvar2); std::vector exitvars1; std::vector exitvars2; @@ -521,8 +521,8 @@ TestGamma() jlm::rvsdg::create_bitconstant(rvsdgGammaNode->subregion(i), 32, 10 * (i + 1))); } - rvsdgGammaNode->add_exitvar(exitvars1); - rvsdgGammaNode->add_exitvar(exitvars2); + rvsdgGammaNode->AddExitVar(exitvars1); + rvsdgGammaNode->AddExitVar(exitvars2); // Convert the RVSDG to MLIR std::cout << "Convert to MLIR" << std::endl; diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index db046df4a..1edfd31e6 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -26,10 +26,10 @@ test_gamma(void) auto pred = match(2, { { 0, 0 }, { 1, 1 } }, 2, 3, cmp); auto gamma = GammaNode::create(pred, 3); - auto ev0 = gamma->add_entryvar(v0); - auto ev1 = gamma->add_entryvar(v1); - auto ev2 = gamma->add_entryvar(v2); - gamma->add_exitvar({ ev0->argument(0), ev1->argument(1), ev2->argument(2) }); + auto ev0 = gamma->AddEntryVar(v0); + auto ev1 = gamma->AddEntryVar(v1); + auto ev2 = gamma->AddEntryVar(v2); + gamma->AddExitVar({ ev0.branchArgument[0], ev1.branchArgument[1], ev2.branchArgument[2] }); jlm::tests::GraphExport::Create(*gamma->output(0), "dummy"); @@ -44,8 +44,8 @@ test_gamma(void) /* test entry and exit variable iterators */ auto gamma3 = GammaNode::create(v3, 2); - assert(gamma3->begin_entryvar() == gamma3->end_entryvar()); - assert(gamma3->begin_exitvar() == gamma3->end_exitvar()); + assert(gamma3->GetEntryVars().empty()); + assert(gamma3->GetExitVars().empty()); } static void @@ -65,10 +65,10 @@ test_predicate_reduction(void) auto pred = jlm::rvsdg::control_constant(graph.root(), 3, 1); auto gamma = GammaNode::create(pred, 3); - auto ev0 = gamma->add_entryvar(v0); - auto ev1 = gamma->add_entryvar(v1); - auto ev2 = gamma->add_entryvar(v2); - gamma->add_exitvar({ ev0->argument(0), ev1->argument(1), ev2->argument(2) }); + auto ev0 = gamma->AddEntryVar(v0); + auto ev1 = gamma->AddEntryVar(v1); + auto ev2 = gamma->AddEntryVar(v2); + gamma->AddExitVar({ ev0.branchArgument[0], ev1.branchArgument[1], ev2.branchArgument[2] }); auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); @@ -94,8 +94,8 @@ test_invariant_reduction(void) auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); auto gamma = GammaNode::create(pred, 2); - auto ev = gamma->add_entryvar(v); - gamma->add_exitvar({ ev->argument(0), ev->argument(1) }); + auto ev = gamma->AddEntryVar(v); + gamma->AddExitVar(ev.branchArgument); auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); @@ -127,11 +127,11 @@ test_control_constant_reduction() auto n0 = jlm::rvsdg::control_constant(gamma->subregion(0), 3, 0); auto n1 = jlm::rvsdg::control_constant(gamma->subregion(1), 3, 1); - auto xv1 = gamma->add_exitvar({ t, f }); - auto xv2 = gamma->add_exitvar({ n0, n1 }); + auto xv1 = gamma->AddExitVar({ t, f }); + auto xv2 = gamma->AddExitVar({ n0, n1 }); - auto & ex1 = jlm::tests::GraphExport::Create(*xv1, ""); - auto & ex2 = jlm::tests::GraphExport::Create(*xv2, ""); + auto & ex1 = jlm::tests::GraphExport::Create(*xv1.output, ""); + auto & ex2 = jlm::tests::GraphExport::Create(*xv2.output, ""); jlm::rvsdg::view(graph.root(), stdout); graph.normalize(); @@ -164,9 +164,9 @@ test_control_constant_reduction2() auto t3 = jlm::rvsdg::control_true(gamma->subregion(2)); auto f = jlm::rvsdg::control_false(gamma->subregion(3)); - auto xv = gamma->add_exitvar({ t1, t2, t3, f }); + auto xv = gamma->AddExitVar({ t1, t2, t3, f }); - auto & ex = jlm::tests::GraphExport::Create(*xv, ""); + auto & ex = jlm::tests::GraphExport::Create(*xv.output, ""); jlm::rvsdg::view(graph.root(), stdout); graph.normalize(); @@ -193,49 +193,45 @@ TestRemoveGammaOutputsWhere() auto v3 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto gammaNode = GammaNode::create(predicate, 2); - auto gammaInput0 = gammaNode->add_entryvar(v0); - auto gammaInput1 = gammaNode->add_entryvar(v1); - auto gammaInput2 = gammaNode->add_entryvar(v2); - auto gammaInput3 = gammaNode->add_entryvar(v3); + auto gammaInput0 = gammaNode->AddEntryVar(v0); + auto gammaInput1 = gammaNode->AddEntryVar(v1); + auto gammaInput2 = gammaNode->AddEntryVar(v2); + auto gammaInput3 = gammaNode->AddEntryVar(v3); - auto gammaOutput0 = - gammaNode->add_exitvar({ gammaInput0->argument(0), gammaInput0->argument(1) }); - auto gammaOutput1 = - gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput1->argument(1) }); - auto gammaOutput2 = - gammaNode->add_exitvar({ gammaInput2->argument(0), gammaInput2->argument(1) }); - auto gammaOutput3 = - gammaNode->add_exitvar({ gammaInput3->argument(0), gammaInput3->argument(1) }); + auto gammaOutput0 = gammaNode->AddExitVar(gammaInput0.branchArgument); + auto gammaOutput1 = gammaNode->AddExitVar(gammaInput1.branchArgument); + auto gammaOutput2 = gammaNode->AddExitVar(gammaInput2.branchArgument); + auto gammaOutput3 = gammaNode->AddExitVar(gammaInput3.branchArgument); - jlm::tests::GraphExport::Create(*gammaOutput0, ""); - jlm::tests::GraphExport::Create(*gammaOutput2, ""); + jlm::tests::GraphExport::Create(*gammaOutput0.output, ""); + jlm::tests::GraphExport::Create(*gammaOutput2.output, ""); // Act & Assert assert(gammaNode->noutputs() == 4); // Remove gammaOutput1 gammaNode->RemoveGammaOutputsWhere( - [&](const GammaOutput & output) + [&](const jlm::rvsdg::output & output) { - return output.index() == gammaOutput1->index(); + return output.index() == gammaOutput1.output->index(); }); assert(gammaNode->noutputs() == 3); assert(gammaNode->subregion(0)->nresults() == 3); assert(gammaNode->subregion(1)->nresults() == 3); - assert(gammaOutput2->index() == 1); - assert(gammaOutput3->index() == 2); + assert(gammaOutput2.output->index() == 1); + assert(gammaOutput3.output->index() == 2); // Try to remove gammaOutput2. This should result in no change as gammaOutput2 still has users. gammaNode->RemoveGammaOutputsWhere( - [&](const GammaOutput & output) + [&](const jlm::rvsdg::output & output) { - return output.index() == gammaOutput2->index(); + return output.index() == gammaOutput2.output->index(); }); assert(gammaNode->noutputs() == 3); assert(gammaNode->subregion(0)->nresults() == 3); assert(gammaNode->subregion(1)->nresults() == 3); - assert(gammaOutput2->index() == 1); - assert(gammaOutput3->index() == 2); + assert(gammaOutput2.output->index() == 1); + assert(gammaOutput3.output->index() == 2); } static void @@ -255,20 +251,18 @@ TestPruneOutputs() auto v3 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto gammaNode = GammaNode::create(predicate, 2); - auto gammaInput0 = gammaNode->add_entryvar(v0); - auto gammaInput1 = gammaNode->add_entryvar(v1); - auto gammaInput2 = gammaNode->add_entryvar(v2); - auto gammaInput3 = gammaNode->add_entryvar(v3); - - auto gammaOutput0 = - gammaNode->add_exitvar({ gammaInput0->argument(0), gammaInput0->argument(1) }); - gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput1->argument(1) }); - auto gammaOutput2 = - gammaNode->add_exitvar({ gammaInput2->argument(0), gammaInput2->argument(1) }); - gammaNode->add_exitvar({ gammaInput3->argument(0), gammaInput3->argument(1) }); + auto gammaInput0 = gammaNode->AddEntryVar(v0); + auto gammaInput1 = gammaNode->AddEntryVar(v1); + auto gammaInput2 = gammaNode->AddEntryVar(v2); + auto gammaInput3 = gammaNode->AddEntryVar(v3); - jlm::tests::GraphExport::Create(*gammaOutput0, ""); - jlm::tests::GraphExport::Create(*gammaOutput2, ""); + auto gammaOutput0 = gammaNode->AddExitVar(gammaInput0.branchArgument); + gammaNode->AddExitVar(gammaInput1.branchArgument); + auto gammaOutput2 = gammaNode->AddExitVar(gammaInput2.branchArgument); + gammaNode->AddExitVar(gammaInput3.branchArgument); + + jlm::tests::GraphExport::Create(*gammaOutput0.output, ""); + jlm::tests::GraphExport::Create(*gammaOutput2.output, ""); // Act gammaNode->PruneOutputs(); @@ -278,13 +272,11 @@ TestPruneOutputs() assert(gammaNode->subregion(0)->nresults() == 2); assert(gammaNode->subregion(1)->nresults() == 2); - assert(gammaOutput0->index() == 0); - assert(gammaNode->subregion(0)->result(0)->output() == gammaOutput0); - assert(gammaNode->subregion(1)->result(0)->output() == gammaOutput0); + assert(gammaOutput0.output->index() == 0); + assert(gammaNode->GetExitVars()[0].output == gammaOutput0.output); - assert(gammaOutput2->index() == 1); - assert(gammaNode->subregion(0)->result(1)->output() == gammaOutput2); - assert(gammaNode->subregion(1)->result(1)->output() == gammaOutput2); + assert(gammaOutput2.output->index() == 1); + assert(gammaNode->GetExitVars()[1].output == gammaOutput2.output); } static void @@ -302,30 +294,26 @@ TestIsInvariant() auto v1 = &jlm::tests::GraphImport::Create(rvsdg, vt, ""); auto gammaNode = GammaNode::create(predicate, 2); - auto gammaInput0 = gammaNode->add_entryvar(v0); - auto gammaInput1 = gammaNode->add_entryvar(v1); - auto gammaInput2 = gammaNode->add_entryvar(v1); + auto gammaInput0 = gammaNode->AddEntryVar(v0); + auto gammaInput1 = gammaNode->AddEntryVar(v1); + auto gammaInput2 = gammaNode->AddEntryVar(v1); - auto gammaOutput0 = - gammaNode->add_exitvar({ gammaInput0->argument(0), gammaInput0->argument(1) }); + auto gammaOutput0 = gammaNode->AddExitVar(gammaInput0.branchArgument); auto gammaOutput1 = - gammaNode->add_exitvar({ gammaInput1->argument(0), gammaInput2->argument(1) }); + gammaNode->AddExitVar({ gammaInput1.branchArgument[0], gammaInput2.branchArgument[1] }); auto gammaOutput2 = - gammaNode->add_exitvar({ gammaInput0->argument(0), gammaInput2->argument(1) }); + gammaNode->AddExitVar({ gammaInput0.branchArgument[0], gammaInput2.branchArgument[1] }); // Act & Assert - assert(gammaOutput0->IsInvariant()); - output * invariantOrigin = nullptr; - gammaOutput0->IsInvariant(&invariantOrigin); - assert(invariantOrigin == v0); - - assert(gammaOutput1->IsInvariant(&invariantOrigin)); - assert(invariantOrigin == v1); - - invariantOrigin = nullptr; - assert(!gammaOutput2->IsInvariant(&invariantOrigin)); - // invariantOrigin should not have been touched as gammaOutput2 is not invariant - assert(invariantOrigin == nullptr); + std::optional invariantOrigin; + invariantOrigin = jlm::rvsdg::GetGammaInvariantOrigin(*gammaNode, gammaOutput0); + assert(invariantOrigin && *invariantOrigin == v0); + + invariantOrigin = jlm::rvsdg::GetGammaInvariantOrigin(*gammaNode, gammaOutput1); + assert(invariantOrigin && *invariantOrigin == v1); + + invariantOrigin = jlm::rvsdg::GetGammaInvariantOrigin(*gammaNode, gammaOutput2); + assert(!invariantOrigin); } static int From b82c2a066a779535688469c49a935df3600c7831 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 3 Dec 2024 10:54:44 +0100 Subject: [PATCH 127/170] Rename node class to Node (#673) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 4 +- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 4 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.hpp | 4 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 4 +- jlm/hls/backend/rhls2firrtl/dot-hls.hpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 48 +++--- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp | 8 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.hpp | 2 +- jlm/hls/opt/cne.cpp | 4 +- jlm/hls/util/view.cpp | 2 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 18 +-- jlm/llvm/ir/operators/Load.cpp | 8 +- jlm/llvm/ir/operators/Load.hpp | 6 +- jlm/llvm/ir/operators/Store.cpp | 6 +- jlm/llvm/ir/operators/Store.hpp | 6 +- jlm/llvm/ir/operators/call.cpp | 2 +- jlm/llvm/ir/operators/call.hpp | 2 +- jlm/llvm/ir/operators/delta.cpp | 2 +- jlm/llvm/ir/operators/delta.hpp | 2 +- jlm/llvm/ir/operators/lambda.cpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +- .../opt/alias-analyses/PointerObjectSet.cpp | 8 +- .../opt/alias-analyses/PointerObjectSet.hpp | 12 +- jlm/llvm/opt/alias-analyses/PointsToGraph.hpp | 26 ++-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 20 +-- jlm/llvm/opt/cne.cpp | 4 +- jlm/llvm/opt/inversion.cpp | 6 +- jlm/llvm/opt/pull.cpp | 12 +- jlm/llvm/opt/push.cpp | 22 +-- jlm/llvm/opt/unroll.hpp | 12 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 10 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 6 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 12 +- jlm/mlir/frontend/MlirToJlmConverter.hpp | 10 +- jlm/rvsdg/binary.cpp | 10 +- jlm/rvsdg/binary.hpp | 6 +- jlm/rvsdg/bitstring/concat.cpp | 2 +- jlm/rvsdg/gamma.cpp | 16 +- jlm/rvsdg/gamma.hpp | 2 +- jlm/rvsdg/graph.cpp | 4 +- jlm/rvsdg/graph.hpp | 2 +- jlm/rvsdg/node-normal-form.cpp | 2 +- jlm/rvsdg/node-normal-form.hpp | 4 +- jlm/rvsdg/node.cpp | 32 ++-- jlm/rvsdg/node.hpp | 84 +++++------ jlm/rvsdg/notifiers.cpp | 6 +- jlm/rvsdg/notifiers.hpp | 8 +- jlm/rvsdg/operation.hpp | 2 +- jlm/rvsdg/region.cpp | 20 +-- jlm/rvsdg/region.hpp | 34 ++--- jlm/rvsdg/simple-node.cpp | 8 +- jlm/rvsdg/simple-node.hpp | 10 +- jlm/rvsdg/simple-normal-form.cpp | 8 +- jlm/rvsdg/simple-normal-form.hpp | 2 +- jlm/rvsdg/statemux.cpp | 6 +- jlm/rvsdg/statemux.hpp | 2 +- jlm/rvsdg/structural-node.cpp | 2 +- jlm/rvsdg/structural-node.hpp | 10 +- jlm/rvsdg/theta.cpp | 4 +- jlm/rvsdg/theta.hpp | 4 +- jlm/rvsdg/tracker.cpp | 14 +- jlm/rvsdg/tracker.hpp | 25 ++- jlm/rvsdg/traverser.cpp | 14 +- jlm/rvsdg/traverser.hpp | 38 ++--- jlm/rvsdg/unary.cpp | 2 +- jlm/rvsdg/unary.hpp | 2 +- jlm/rvsdg/view.cpp | 10 +- tests/TestRvsdgs.hpp | 142 +++++++++--------- .../alias-analyses/TestMemoryStateEncoder.cpp | 34 ++--- .../mlir/frontend/TestMlirToJlmConverter.cpp | 20 +-- tests/jlm/rvsdg/test-bottomup.cpp | 4 +- tests/jlm/rvsdg/test-graph.cpp | 2 +- tests/jlm/rvsdg/test-nodes.cpp | 4 +- tests/jlm/rvsdg/test-topdown.cpp | 12 +- tests/test-operation.hpp | 10 +- 78 files changed, 458 insertions(+), 475 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index cf5581fb2..153962534 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -3951,7 +3951,7 @@ RhlsToFirrtlConverter::GetFirrtlType(const jlm::rvsdg::Type * type) } std::string -RhlsToFirrtlConverter::GetModuleName(const jlm::rvsdg::node * node) +RhlsToFirrtlConverter::GetModuleName(const rvsdg::Node * node) { std::string append = ""; @@ -4048,7 +4048,7 @@ RhlsToFirrtlConverter::IsIdentityMapping(const jlm::rvsdg::match_op & op) void RhlsToFirrtlConverter::WriteModuleToFile( const circt::firrtl::FModuleOp fModuleOp, - const jlm::rvsdg::node * node) + const rvsdg::Node * node) { if (!fModuleOp) return; diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 895608373..922973327 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -70,7 +70,7 @@ class RhlsToFirrtlConverter : public BaseHLS MlirGen(const llvm::lambda::node * lamdaNode); void - WriteModuleToFile(const circt::firrtl::FModuleOp fModuleOp, const jlm::rvsdg::node * node); + WriteModuleToFile(const circt::firrtl::FModuleOp fModuleOp, const rvsdg::Node * node); void WriteCircuitToFile(const circt::firrtl::CircuitOp circuit, std::string name); @@ -282,7 +282,7 @@ class RhlsToFirrtlConverter : public BaseHLS circt::firrtl::FIRRTLBaseType GetFirrtlType(const jlm::rvsdg::Type * type); std::string - GetModuleName(const jlm::rvsdg::node * node); + GetModuleName(const rvsdg::Node * node); bool IsIdentityMapping(const jlm::rvsdg::match_op & op); diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index e85a1b58e..6ac11e3ff 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -23,7 +23,7 @@ isForbiddenChar(char c) } std::string -BaseHLS::get_node_name(const jlm::rvsdg::node * node) +BaseHLS::get_node_name(const jlm::rvsdg::Node * node) { auto found = node_map.find(node); if (found != node_map.end()) diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index 471bc2d7d..460f086e2 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -48,11 +48,11 @@ class BaseHLS extension() = 0; protected: - std::unordered_map node_map; + std::unordered_map node_map; std::unordered_map output_map; std::string - get_node_name(const jlm::rvsdg::node * node); + get_node_name(const rvsdg::Node * node); static std::string get_port_name(jlm::rvsdg::input * port); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index 9d0cde44e..52782c33e 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -64,7 +64,7 @@ DotHLS::result_to_dot(rvsdg::RegionResult * port) } std::string -DotHLS::node_to_dot(const jlm::rvsdg::node * node) +DotHLS::node_to_dot(const rvsdg::Node * node) { auto SPACER = " \n"; auto name = get_node_name(node); @@ -203,7 +203,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) dot << "color=\"#ff8080\"\n"; std::set back_outputs; - std::set top_nodes; // no artificial top nodes for now + std::set top_nodes; // no artificial top nodes for now for (size_t i = 0; i < sr->narguments(); ++i) { auto arg = sr->argument(i); diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp index 0934fa548..1f8c292d8 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp @@ -28,7 +28,7 @@ class DotHLS : public BaseHLS result_to_dot(rvsdg::RegionResult * port); std::string - node_to_dot(const jlm::rvsdg::node * node); + node_to_dot(const rvsdg::Node * node); std::string edge(std::string src, std::string snk, const jlm::rvsdg::Type & type, bool back = false); diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index a832fd3fc..ae7c50069 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -27,10 +27,10 @@ dae_conv(jlm::llvm::RvsdgModule & rm) } void -find_slice_node(jlm::rvsdg::node * node, std::unordered_set & slice); +find_slice_node(rvsdg::Node * node, std::unordered_set & slice); void -find_slice_output(jlm::rvsdg::output * output, std::unordered_set & slice) +find_slice_output(rvsdg::output * output, std::unordered_set & slice) { if (auto no = dynamic_cast(output)) { @@ -57,7 +57,7 @@ find_slice_output(jlm::rvsdg::output * output, std::unordered_set & slice) +find_slice_node(rvsdg::Node * node, std::unordered_set & slice) { for (size_t i = 0; i < node->ninputs(); ++i) { @@ -66,7 +66,7 @@ find_slice_node(jlm::rvsdg::node * node, std::unordered_set } void -find_data_slice_node(jlm::rvsdg::node * node, std::unordered_set & slice) +find_data_slice_node(rvsdg::Node * node, std::unordered_set & slice) { for (size_t i = 0; i < node->ninputs(); ++i) { @@ -79,7 +79,7 @@ find_data_slice_node(jlm::rvsdg::node * node, std::unordered_set & slice) +find_state_slice_node(rvsdg::Node * node, std::unordered_set & slice) { for (size_t i = 0; i < node->ninputs(); ++i) { @@ -93,17 +93,17 @@ find_state_slice_node(jlm::rvsdg::node * node, std::unordered_set & slice, - std::unordered_set & visited); + rvsdg::Node * source, + rvsdg::Node * destination, + std::unordered_set & slice, + std::unordered_set & visited); bool is_slice_exclusive_input_( jlm::rvsdg::input * source, - jlm::rvsdg::node * destination, - std::unordered_set & slice, - std::unordered_set & visited) + rvsdg::Node * destination, + std::unordered_set & slice, + std::unordered_set & visited) { if (auto ni = dynamic_cast(source)) { @@ -166,10 +166,10 @@ trace_to_loop_results(jlm::rvsdg::output * out, std::vector & slice, - std::unordered_set & visited) + rvsdg::Node * source, + rvsdg::Node * destination, + std::unordered_set & slice, + std::unordered_set & visited) { // check if descendents of source can leave the slice without going through destination if (source == destination) @@ -200,11 +200,11 @@ is_slice_exclusive_( bool is_slice_exclusive_( - jlm::rvsdg::node * source, - jlm::rvsdg::node * destination, - std::unordered_set & slice) + rvsdg::Node * source, + rvsdg::Node * destination, + std::unordered_set & slice) { - std::unordered_set visited; + std::unordered_set visited; return is_slice_exclusive_(source, destination, slice, visited); } @@ -220,7 +220,7 @@ void decouple_load( loop_node * loopNode, jlm::rvsdg::simple_node * loadNode, - std::unordered_set & loop_slice) + std::unordered_set & loop_slice) { // loadNode is always a part of loop_slice due to state edges auto new_loop = loop_node::create(loopNode->region(), false); @@ -257,7 +257,7 @@ decouple_load( } } // copy nodes - std::vector> context(loopNode->subregion()->nnodes()); + std::vector> context(loopNode->subregion()->nnodes()); for (auto & node : loopNode->subregion()->Nodes()) { JLM_ASSERT(node.depth() < context.size()); @@ -383,7 +383,7 @@ process_loopnode(loop_node * loopNode) { // can currently only generate dae one loop deep // find load slice within loop - three slices - complete, data and state-edge - std::unordered_set loop_slice, data_slice, state_slice; + std::unordered_set loop_slice, data_slice, state_slice; find_slice_node(simplenode, loop_slice); find_data_slice_node(simplenode, data_slice); find_state_slice_node(simplenode, state_slice); @@ -430,7 +430,7 @@ process_loopnode(loop_node * loopNode) || dynamic_cast(&sn->GetOperation())) { // state slice may not contain loads or stores except for node - if (sn != dynamic_cast(simplenode)) + if (sn != dynamic_cast(simplenode)) { can_decouple = false; break; diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index 5e92e73ac..acaf0ba73 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -62,7 +62,7 @@ replace_load(jlm::rvsdg::simple_node * orig, jlm::rvsdg::output * resp) { states.push_back(orig->input(i)->origin()); } - jlm::rvsdg::node * nn; + jlm::rvsdg::Node * nn; if (states.empty()) { auto outputs = jlm::hls::decoupled_load_op::create(*addr, *resp); @@ -873,7 +873,7 @@ jlm::hls::ReplaceLoad( states.push_back(replacedLoad->input(i)->origin()); } - jlm::rvsdg::node * newLoad; + rvsdg::Node * newLoad; if (states.empty()) { auto outputs = jlm::hls::decoupled_load_op::create(*loadAddress, *response); diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index fce80d75e..c9a63478f 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -220,14 +220,14 @@ merge_gamma(rvsdg::Region * region) } bool -is_output_of(jlm::rvsdg::output * output, jlm::rvsdg::node * node) +is_output_of(jlm::rvsdg::output * output, rvsdg::Node * node) { auto no = dynamic_cast(output); return no && no->node() == node; } bool -depends_on(jlm::rvsdg::output * output, jlm::rvsdg::node * node) +depends_on(jlm::rvsdg::output * output, rvsdg::Node * node) { auto arg = dynamic_cast(output); if (arg) diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp index e2d4ad476..22eb24b36 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp @@ -126,7 +126,7 @@ remove_unused_loop_inputs(loop_node * ln) } bool -dead_spec_gamma(jlm::rvsdg::node * dmux_node) +dead_spec_gamma(rvsdg::Node * dmux_node) { auto mux_op = dynamic_cast(&dmux_node->GetOperation()); JLM_ASSERT(mux_op); @@ -152,14 +152,14 @@ dead_spec_gamma(jlm::rvsdg::node * dmux_node) } bool -dead_nonspec_gamma(jlm::rvsdg::node * ndmux_node) +dead_nonspec_gamma(rvsdg::Node * ndmux_node) { auto mux_op = dynamic_cast(&ndmux_node->GetOperation()); JLM_ASSERT(mux_op); JLM_ASSERT(!mux_op->discarding); // check if all inputs go to outputs of same branch bool all_inputs_same_branch = true; - jlm::rvsdg::node * origin_branch = nullptr; + rvsdg::Node * origin_branch = nullptr; for (size_t i = 1; i < ndmux_node->ninputs(); ++i) { if (auto no = dynamic_cast(ndmux_node->input(i)->origin())) @@ -193,7 +193,7 @@ dead_nonspec_gamma(jlm::rvsdg::node * ndmux_node) } bool -dead_loop(jlm::rvsdg::node * ndmux_node) +dead_loop(rvsdg::Node * ndmux_node) { auto mux_op = dynamic_cast(&ndmux_node->GetOperation()); JLM_ASSERT(mux_op); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.hpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.hpp index 1253ab432..b6025db86 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.hpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.hpp @@ -15,7 +15,7 @@ namespace jlm::hls { static inline bool -is_constant(const jlm::rvsdg::node * node) +is_constant(const rvsdg::Node * node) { return jlm::rvsdg::is(node) || jlm::rvsdg::is(node) diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 490bb5459..d8a1f7a09 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -93,7 +93,7 @@ class cnectx } inline void - mark(const jlm::rvsdg::node * n1, const jlm::rvsdg::node * n2) + mark(const Node * n1, const Node * n2) { JLM_ASSERT(n1->noutputs() == n2->noutputs()); @@ -489,7 +489,7 @@ divert_users(jlm::rvsdg::output * output, cnectx & ctx) } static void -divert_outputs(jlm::rvsdg::node * node, cnectx & ctx) +divert_outputs(Node * node, cnectx & ctx) { for (size_t n = 0; n < node->noutputs(); n++) divert_users(node->output(n), ctx); diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 5c1b7a6de..e0a5d3f87 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -28,7 +28,7 @@ hex(size_t i) } std::string -get_dot_name(jlm::rvsdg::node * node) +get_dot_name(rvsdg::Node * node) { return jlm::util::strfmt("n", hex((intptr_t)node)); } diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 88de496d3..02db63c2b 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -99,7 +99,7 @@ create_initialization(const delta::node * delta, context & ctx) } static void -convert_node(const rvsdg::node & node, context & ctx); +convert_node(const rvsdg::Node & node, context & ctx); static inline void convert_region(rvsdg::Region & region, context & ctx) @@ -159,7 +159,7 @@ create_cfg(const lambda::node & lambda, context & ctx) } static inline void -convert_simple_node(const rvsdg::node & node, context & ctx) +convert_simple_node(const rvsdg::Node & node, context & ctx) { JLM_ASSERT(dynamic_cast(&node.GetOperation())); @@ -229,7 +229,7 @@ convert_empty_gamma_node(const rvsdg::GammaNode * gamma, context & ctx) } static inline void -convert_gamma_node(const rvsdg::node & node, context & ctx) +convert_gamma_node(const rvsdg::Node & node, context & ctx) { JLM_ASSERT(is(&node)); auto gamma = static_cast(&node); @@ -350,7 +350,7 @@ phi_needed(const rvsdg::input * i, const llvm::variable * v) } static inline void -convert_theta_node(const rvsdg::node & node, context & ctx) +convert_theta_node(const rvsdg::Node & node, context & ctx) { JLM_ASSERT(is(&node)); auto subregion = static_cast(&node)->subregion(0); @@ -405,7 +405,7 @@ convert_theta_node(const rvsdg::node & node, context & ctx) } static inline void -convert_lambda_node(const rvsdg::node & node, context & ctx) +convert_lambda_node(const rvsdg::Node & node, context & ctx) { JLM_ASSERT(is(&node)); auto lambda = static_cast(&node); @@ -425,7 +425,7 @@ convert_lambda_node(const rvsdg::node & node, context & ctx) } static inline void -convert_phi_node(const rvsdg::node & node, context & ctx) +convert_phi_node(const rvsdg::Node & node, context & ctx) { JLM_ASSERT(rvsdg::is(&node)); auto phi = static_cast(&node); @@ -497,7 +497,7 @@ convert_phi_node(const rvsdg::node & node, context & ctx) } static inline void -convert_delta_node(const rvsdg::node & node, context & ctx) +convert_delta_node(const rvsdg::Node & node, context & ctx) { JLM_ASSERT(is(&node)); auto delta = static_cast(&node); @@ -516,10 +516,10 @@ convert_delta_node(const rvsdg::node & node, context & ctx) } static inline void -convert_node(const rvsdg::node & node, context & ctx) +convert_node(const rvsdg::Node & node, context & ctx) { static std:: - unordered_map> + unordered_map> map({ { typeid(lambda::operation), convert_lambda_node }, { std::type_index(typeid(rvsdg::GammaOperation)), convert_gamma_node }, { std::type_index(typeid(rvsdg::ThetaOperation)), convert_theta_node }, diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 7222fa1db..ee4e68af3 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -86,7 +86,7 @@ LoadNonVolatileNode::CopyWithNewMemoryStates( GetAlignment()); } -rvsdg::node * +rvsdg::Node * LoadNonVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { @@ -162,7 +162,7 @@ LoadVolatileNode::CopyWithNewMemoryStates(const std::vector & m GetAlignment()); } -rvsdg::node * +rvsdg::Node * LoadVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); @@ -230,7 +230,7 @@ is_load_alloca_reducible(const std::vector & operands) } static bool -is_reducible_state(const rvsdg::output * state, const rvsdg::node * loadalloca) +is_reducible_state(const rvsdg::output * state, const rvsdg::Node * loadalloca) { if (is(rvsdg::output::GetNode(*state))) { @@ -581,7 +581,7 @@ load_normal_form::load_normal_form( {} bool -load_normal_form::normalize_node(rvsdg::node * node) const +load_normal_form::normalize_node(rvsdg::Node * node) const { JLM_ASSERT(is(node->GetOperation())); auto op = static_cast(&node->GetOperation()); diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index c1f8aaa54..a78baef00 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -28,7 +28,7 @@ class load_normal_form final : public rvsdg::simple_normal_form rvsdg::Graph * graph) noexcept; virtual bool - normalize_node(rvsdg::node * node) const override; + normalize_node(rvsdg::Node * node) const override; virtual std::vector normalized_create( @@ -364,7 +364,7 @@ class LoadVolatileNode final : public LoadNode {} public: - rvsdg::node * + Node * copy(rvsdg::Region * region, const std::vector & operands) const override; [[nodiscard]] const LoadVolatileOperation & @@ -529,7 +529,7 @@ class LoadNonVolatileNode final : public LoadNode [[nodiscard]] LoadNonVolatileNode & CopyWithNewMemoryStates(const std::vector & memoryStates) const override; - rvsdg::node * + Node * copy(rvsdg::Region * region, const std::vector & operands) const override; static std::vector diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 291297717..6ade5042e 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -84,7 +84,7 @@ StoreNonVolatileNode::CopyWithNewMemoryStates( GetAlignment()); } -rvsdg::node * +rvsdg::Node * StoreNonVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { @@ -160,7 +160,7 @@ StoreVolatileNode::CopyWithNewMemoryStates(const std::vector & GetAlignment()); } -rvsdg::node * +rvsdg::Node * StoreVolatileNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); @@ -331,7 +331,7 @@ store_normal_form::store_normal_form( } bool -store_normal_form::normalize_node(jlm::rvsdg::node * node) const +store_normal_form::normalize_node(rvsdg::Node * node) const { JLM_ASSERT(is(node->GetOperation())); auto op = static_cast(&node->GetOperation()); diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index b38b20625..d310b080e 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -28,7 +28,7 @@ class store_normal_form final : public jlm::rvsdg::simple_normal_form rvsdg::Graph * graph) noexcept; virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(rvsdg::Node * node) const override; virtual std::vector normalized_create( @@ -333,7 +333,7 @@ class StoreNonVolatileNode final : public StoreNode [[nodiscard]] StoreNonVolatileNode & CopyWithNewMemoryStates(const std::vector & memoryStates) const override; - rvsdg::node * + Node * copy(rvsdg::Region * region, const std::vector & operands) const override; static std::vector @@ -520,7 +520,7 @@ class StoreVolatileNode final : public StoreNode return *ioStateOutput; } - rvsdg::node * + Node * copy(rvsdg::Region * region, const std::vector & operands) const override; static StoreVolatileNode & diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index baca5e613..5557292d7 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -148,7 +148,7 @@ CallOperation::copy() const return std::unique_ptr(new CallOperation(*this)); } -rvsdg::node * +rvsdg::Node * CallNode::copy(rvsdg::Region * region, const std::vector & operands) const { return &CreateNode(*region, GetOperation(), operands); diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index f08a658d3..163923b3a 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -448,7 +448,7 @@ class CallNode final : public jlm::rvsdg::simple_node : nullptr; } - rvsdg::node * + Node * copy(rvsdg::Region * region, const std::vector & operands) const override; static std::vector diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index 5903ff7dc..a74a991c3 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -51,7 +51,7 @@ node::GetOperation() const noexcept delta::node * node::copy(rvsdg::Region * region, const std::vector & operands) const { - return static_cast(jlm::rvsdg::node::copy(region, operands)); + return static_cast(rvsdg::Node::copy(region, operands)); } delta::node * diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index 6505c6469..fb26c4c62 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -256,7 +256,7 @@ class node final : public rvsdg::StructuralNode result() const noexcept; virtual delta::node * - copy(rvsdg::Region * region, const std::vector & operands) const override; + copy(rvsdg::Region * region, const std::vector & operands) const override; virtual delta::node * copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index 2114e2b05..a4546b400 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -217,7 +217,7 @@ node::output() const noexcept lambda::node * node::copy(rvsdg::Region * region, const std::vector & operands) const { - return util::AssertedCast(jlm::rvsdg::node::copy(region, operands)); + return util::AssertedCast(rvsdg::Node::copy(region, operands)); } lambda::node * diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 334821b35..8ef58b653 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -58,7 +58,7 @@ class DeadNodeElimination::Context final } bool - IsAlive(const jlm::rvsdg::node & node) const noexcept + IsAlive(const rvsdg::Node & node) const noexcept { if (auto simpleNode = dynamic_cast(&node)) { @@ -317,7 +317,7 @@ DeadNodeElimination::SweepRegion(rvsdg::Region & region) const { region.prune(false); - std::vector> nodesTopDown(region.nnodes()); + std::vector> nodesTopDown(region.nnodes()); for (auto & node : region.Nodes()) { nodesTopDown[node.depth()].push_back(&node); diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp index b0bf15b0a..8d217eadd 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.cpp @@ -121,7 +121,7 @@ PointerObjectSet::CreateDummyRegisterPointerObject() } PointerObjectIndex -PointerObjectSet::CreateAllocaMemoryObject(const rvsdg::node & allocaNode, bool canPoint) +PointerObjectSet::CreateAllocaMemoryObject(const rvsdg::Node & allocaNode, bool canPoint) { JLM_ASSERT(AllocaMap_.count(&allocaNode) == 0); return AllocaMap_[&allocaNode] = @@ -129,7 +129,7 @@ PointerObjectSet::CreateAllocaMemoryObject(const rvsdg::node & allocaNode, bool } PointerObjectIndex -PointerObjectSet::CreateMallocMemoryObject(const rvsdg::node & mallocNode, bool canPoint) +PointerObjectSet::CreateMallocMemoryObject(const rvsdg::Node & mallocNode, bool canPoint) { JLM_ASSERT(MallocMap_.count(&mallocNode) == 0); return MallocMap_[&mallocNode] = @@ -187,13 +187,13 @@ PointerObjectSet::GetRegisterMap() const noexcept return RegisterMap_; } -const std::unordered_map & +const std::unordered_map & PointerObjectSet::GetAllocaMap() const noexcept { return AllocaMap_; } -const std::unordered_map & +const std::unordered_map & PointerObjectSet::GetMallocMap() const noexcept { return MallocMap_; diff --git a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp index 0a52599b6..d662f2149 100644 --- a/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp +++ b/jlm/llvm/opt/alias-analyses/PointerObjectSet.hpp @@ -167,9 +167,9 @@ class PointerObjectSet final // Unlike the other maps, several rvsdg::output* can share register PointerObject std::unordered_map RegisterMap_; - std::unordered_map AllocaMap_; + std::unordered_map AllocaMap_; - std::unordered_map MallocMap_; + std::unordered_map MallocMap_; std::unordered_map GlobalMap_; @@ -273,10 +273,10 @@ class PointerObjectSet final CreateDummyRegisterPointerObject(); [[nodiscard]] PointerObjectIndex - CreateAllocaMemoryObject(const rvsdg::node & allocaNode, bool canPoint); + CreateAllocaMemoryObject(const rvsdg::Node & allocaNode, bool canPoint); [[nodiscard]] PointerObjectIndex - CreateMallocMemoryObject(const rvsdg::node & mallocNode, bool canPoint); + CreateMallocMemoryObject(const rvsdg::Node & mallocNode, bool canPoint); [[nodiscard]] PointerObjectIndex CreateGlobalMemoryObject(const delta::node & deltaNode, bool canPoint); @@ -312,10 +312,10 @@ class PointerObjectSet final const std::unordered_map & GetRegisterMap() const noexcept; - const std::unordered_map & + const std::unordered_map & GetAllocaMap() const noexcept; - const std::unordered_map & + const std::unordered_map & GetMallocMap() const noexcept; const std::unordered_map & diff --git a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp index 740df1da0..8e2e049fe 100644 --- a/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp +++ b/jlm/llvm/opt/alias-analyses/PointsToGraph.hpp @@ -49,16 +49,14 @@ class PointsToGraph final class UnknownMemoryNode; class ExternalMemoryNode; - using AllocaNodeMap = - std::unordered_map>; + using AllocaNodeMap = std::unordered_map>; using DeltaNodeMap = std::unordered_map>; using ImportNodeMap = std::unordered_map>; using LambdaNodeMap = std::unordered_map>; - using MallocNodeMap = - std::unordered_map>; + using MallocNodeMap = std::unordered_map>; using RegisterNodeMap = std::unordered_map; using RegisterNodeVector = std::vector>; @@ -269,7 +267,7 @@ class PointsToGraph final } const PointsToGraph::AllocaNode & - GetAllocaNode(const jlm::rvsdg::node & node) const + GetAllocaNode(const rvsdg::Node & node) const { auto it = AllocaNodes_.find(&node); if (it == AllocaNodes_.end()) @@ -309,7 +307,7 @@ class PointsToGraph final } const PointsToGraph::MallocNode & - GetMallocNode(const jlm::rvsdg::node & node) const + GetMallocNode(const rvsdg::Node & node) const { auto it = MallocNodes_.find(&node); if (it == MallocNodes_.end()) @@ -617,7 +615,7 @@ class PointsToGraph::AllocaNode final : public PointsToGraph::MemoryNode ~AllocaNode() noexcept override; private: - AllocaNode(PointsToGraph & pointsToGraph, const jlm::rvsdg::node & allocaNode) + AllocaNode(PointsToGraph & pointsToGraph, const rvsdg::Node & allocaNode) : MemoryNode(pointsToGraph), AllocaNode_(&allocaNode) { @@ -625,7 +623,7 @@ class PointsToGraph::AllocaNode final : public PointsToGraph::MemoryNode } public: - const jlm::rvsdg::node & + const rvsdg::Node & GetAllocaNode() const noexcept { return *AllocaNode_; @@ -635,14 +633,14 @@ class PointsToGraph::AllocaNode final : public PointsToGraph::MemoryNode DebugString() const override; static PointsToGraph::AllocaNode & - Create(PointsToGraph & pointsToGraph, const jlm::rvsdg::node & node) + Create(PointsToGraph & pointsToGraph, const rvsdg::Node & node) { auto n = std::unique_ptr(new AllocaNode(pointsToGraph, node)); return pointsToGraph.AddAllocaNode(std::move(n)); } private: - const jlm::rvsdg::node * AllocaNode_; + const rvsdg::Node * AllocaNode_; }; /** \brief PointsTo graph delta node @@ -691,7 +689,7 @@ class PointsToGraph::MallocNode final : public PointsToGraph::MemoryNode ~MallocNode() noexcept override; private: - MallocNode(PointsToGraph & pointsToGraph, const jlm::rvsdg::node & mallocNode) + MallocNode(PointsToGraph & pointsToGraph, const rvsdg::Node & mallocNode) : MemoryNode(pointsToGraph), MallocNode_(&mallocNode) { @@ -699,7 +697,7 @@ class PointsToGraph::MallocNode final : public PointsToGraph::MemoryNode } public: - const jlm::rvsdg::node & + const rvsdg::Node & GetMallocNode() const noexcept { return *MallocNode_; @@ -709,14 +707,14 @@ class PointsToGraph::MallocNode final : public PointsToGraph::MemoryNode DebugString() const override; static PointsToGraph::MallocNode & - Create(PointsToGraph & pointsToGraph, const jlm::rvsdg::node & node) + Create(PointsToGraph & pointsToGraph, const rvsdg::Node & node) { auto n = std::unique_ptr(new MallocNode(pointsToGraph, node)); return pointsToGraph.AddMallocNode(std::move(n)); } private: - const jlm::rvsdg::node * MallocNode_; + const rvsdg::Node * MallocNode_; }; /** \brief PointsTo graph malloc node diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 483b72950..0508e7109 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -316,7 +316,7 @@ class AllocaLocation final : public MemoryLocation ~AllocaLocation() override = default; - explicit AllocaLocation(const jlm::rvsdg::node & node) + explicit AllocaLocation(const rvsdg::Node & node) : MemoryLocation(), Node_(node) { @@ -324,7 +324,7 @@ class AllocaLocation final : public MemoryLocation } public: - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetNode() const noexcept { return Node_; @@ -337,13 +337,13 @@ class AllocaLocation final : public MemoryLocation } static std::unique_ptr - Create(const jlm::rvsdg::node & node) + Create(const rvsdg::Node & node) { return std::unique_ptr(new AllocaLocation(node)); } private: - const jlm::rvsdg::node & Node_; + const rvsdg::Node & Node_; }; /** \brief MallocLocation class @@ -354,7 +354,7 @@ class MallocLocation final : public MemoryLocation { ~MallocLocation() override = default; - explicit MallocLocation(const jlm::rvsdg::node & node) + explicit MallocLocation(const rvsdg::Node & node) : MemoryLocation(), Node_(node) { @@ -362,7 +362,7 @@ class MallocLocation final : public MemoryLocation } public: - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetNode() const noexcept { return Node_; @@ -375,13 +375,13 @@ class MallocLocation final : public MemoryLocation } static std::unique_ptr - Create(const jlm::rvsdg::node & node) + Create(const rvsdg::Node & node) { return std::unique_ptr(new MallocLocation(node)); } private: - const jlm::rvsdg::node & Node_; + const rvsdg::Node & Node_; }; /** \brief LambdaLocation class @@ -566,7 +566,7 @@ class Steensgaard::Context final } Location & - InsertAllocaLocation(const jlm::rvsdg::node & node) + InsertAllocaLocation(const rvsdg::Node & node) { Locations_.push_back(AllocaLocation::Create(node)); auto location = Locations_.back().get(); @@ -576,7 +576,7 @@ class Steensgaard::Context final } Location & - InsertMallocLocation(const jlm::rvsdg::node & node) + InsertMallocLocation(const rvsdg::Node & node) { Locations_.push_back(MallocLocation::Create(node)); auto location = Locations_.back().get(); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 7c6fbe2f0..159b493bb 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -90,7 +90,7 @@ class cnectx } inline void - mark(const jlm::rvsdg::node * n1, const jlm::rvsdg::node * n2) + mark(const rvsdg::Node * n1, const rvsdg::Node * n2) { JLM_ASSERT(n1->noutputs() == n2->noutputs()); @@ -450,7 +450,7 @@ divert_users(jlm::rvsdg::output * output, cnectx & ctx) } static void -divert_outputs(jlm::rvsdg::node * node, cnectx & ctx) +divert_outputs(rvsdg::Node * node, cnectx & ctx) { for (size_t n = 0; n < node->noutputs(); n++) divert_users(node->output(n), ctx); diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 2deb867e5..6e319c2ab 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -90,14 +90,14 @@ pullin(rvsdg::GammaNode * gamma, rvsdg::ThetaNode * theta) pullin_top(gamma); } -static std::vector> +static std::vector> collect_condition_nodes(rvsdg::StructuralNode * tnode, jlm::rvsdg::StructuralNode * gnode) { JLM_ASSERT(is(tnode)); JLM_ASSERT(rvsdg::is(gnode)); JLM_ASSERT(gnode->region()->node() == tnode); - std::vector> nodes; + std::vector> nodes; for (auto & node : tnode->subregion(0)->Nodes()) { if (&node == gnode) @@ -115,7 +115,7 @@ static void copy_condition_nodes( rvsdg::Region * target, rvsdg::SubstitutionMap & smap, - const std::vector> & nodes) + const std::vector> & nodes) { for (size_t n = 0; n < nodes.size(); n++) { diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 2fe54ce7d..1b261b19a 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -57,9 +57,9 @@ empty(const rvsdg::GammaNode * gamma) } static bool -single_successor(const jlm::rvsdg::node * node) +single_successor(const rvsdg::Node * node) { - std::unordered_set successors; + std::unordered_set successors; for (size_t n = 0; n < node->noutputs(); n++) { for (const auto & user : *node->output(n)) @@ -80,7 +80,7 @@ remove(rvsdg::input * input) } static void -pullin_node(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) +pullin_node(rvsdg::GammaNode * gamma, rvsdg::Node * node) { /* collect operands */ std::vector> operands(gamma->nsubregions()); @@ -112,7 +112,7 @@ pullin_node(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) } static void -cleanup(rvsdg::GammaNode * gamma, jlm::rvsdg::node * node) +cleanup(rvsdg::GammaNode * gamma, rvsdg::Node * node) { JLM_ASSERT(single_successor(node)); @@ -156,7 +156,7 @@ void pullin_bottom(rvsdg::GammaNode * gamma) { /* collect immediate successors of the gamma node */ - std::unordered_set workset; + std::unordered_set workset; for (size_t n = 0; n < gamma->noutputs(); n++) { auto output = gamma->output(n); @@ -217,7 +217,7 @@ pullin_bottom(rvsdg::GammaNode * gamma) } static size_t -is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const jlm::rvsdg::node * node) +is_used_in_nsubregions(const rvsdg::GammaNode * gamma, const rvsdg::Node * node) { JLM_ASSERT(single_successor(node)); diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 4a3f7d210..74436f853 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -51,7 +51,7 @@ class worklist { public: inline void - push_back(jlm::rvsdg::node * node) noexcept + push_back(rvsdg::Node * node) noexcept { if (set_.find(node) != set_.end()) return; @@ -60,7 +60,7 @@ class worklist set_.insert(node); } - inline jlm::rvsdg::node * + rvsdg::Node * pop_front() noexcept { JLM_ASSERT(!empty()); @@ -78,12 +78,12 @@ class worklist } private: - std::deque queue_; - std::unordered_set set_; + std::deque queue_; + std::unordered_set set_; }; static bool -has_side_effects(const jlm::rvsdg::node * node) +has_side_effects(const rvsdg::Node * node) { for (size_t n = 0; n < node->noutputs(); n++) { @@ -95,7 +95,7 @@ has_side_effects(const jlm::rvsdg::node * node) } static std::vector -copy_from_gamma(jlm::rvsdg::node * node, size_t r) +copy_from_gamma(rvsdg::Node * node, size_t r) { JLM_ASSERT(jlm::rvsdg::is(node->region()->node())); JLM_ASSERT(node->depth() == 0); @@ -124,7 +124,7 @@ copy_from_gamma(jlm::rvsdg::node * node, size_t r) } static std::vector -copy_from_theta(jlm::rvsdg::node * node) +copy_from_theta(rvsdg::Node * node) { JLM_ASSERT(is(node->region()->node())); JLM_ASSERT(node->depth() == 0); @@ -153,7 +153,7 @@ copy_from_theta(jlm::rvsdg::node * node) } static bool -is_gamma_top_pushable(const jlm::rvsdg::node * node) +is_gamma_top_pushable(const rvsdg::Node * node) { return !has_side_effects(node); } @@ -211,7 +211,7 @@ push(rvsdg::GammaNode * gamma) static bool is_theta_invariant( - const jlm::rvsdg::node * node, + const rvsdg::Node * node, const std::unordered_set & invariants) { JLM_ASSERT(is(node->region()->node())); @@ -294,7 +294,7 @@ is_invariant(const rvsdg::RegionArgument * argument) } static bool -is_movable_store(jlm::rvsdg::node * node) +is_movable_store(rvsdg::Node * node) { JLM_ASSERT(is(node->region()->node())); JLM_ASSERT(jlm::rvsdg::is(node)); @@ -324,7 +324,7 @@ is_movable_store(jlm::rvsdg::node * node) } static void -pushout_store(jlm::rvsdg::node * storenode) +pushout_store(rvsdg::Node * storenode) { JLM_ASSERT(is(storenode->region()->node())); JLM_ASSERT(jlm::rvsdg::is(storenode) && is_movable_store(storenode)); diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index 8c780e509..bcb64e4ec 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -51,8 +51,8 @@ class unrollinfo final private: inline unrollinfo( - jlm::rvsdg::node * cmpnode, - jlm::rvsdg::node * armnode, + rvsdg::Node * cmpnode, + rvsdg::Node * armnode, rvsdg::RegionArgument * idv, rvsdg::RegionArgument * step, rvsdg::RegionArgument * end) @@ -109,7 +109,7 @@ class unrollinfo final std::unique_ptr niterations() const noexcept; - inline jlm::rvsdg::node * + rvsdg::Node * cmpnode() const noexcept { return cmpnode_; @@ -121,7 +121,7 @@ class unrollinfo final return *static_cast(&cmpnode()->GetOperation()); } - inline jlm::rvsdg::node * + inline rvsdg::Node * armnode() const noexcept { return armnode_; @@ -227,8 +227,8 @@ class unrollinfo final rvsdg::RegionArgument * end_; rvsdg::RegionArgument * step_; - jlm::rvsdg::node * cmpnode_; - jlm::rvsdg::node * armnode_; + rvsdg::Node * cmpnode_; + rvsdg::Node * armnode_; rvsdg::RegionArgument * idv_; }; diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index 2c7104549..d08fc86ae 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -71,8 +71,8 @@ JlmToMlirConverter::ConvertRegion(rvsdg::Region & region, ::mlir::Block & block) // Create an MLIR operation for each RVSDG node and store each pair in a // hash map for easy lookup of corresponding MLIR operation - std::unordered_map operationsMap; - for (rvsdg::node * rvsdgNode : rvsdg::topdown_traverser(®ion)) + std::unordered_map operationsMap; + for (rvsdg::Node * rvsdgNode : rvsdg::topdown_traverser(®ion)) { ::llvm::SmallVector<::mlir::Value> inputs = GetConvertedInputs(*rvsdgNode, operationsMap, block); @@ -114,8 +114,8 @@ JlmToMlirConverter::ConvertRegion(rvsdg::Region & region, ::mlir::Block & block) ::llvm::SmallVector<::mlir::Value> JlmToMlirConverter::GetConvertedInputs( - const rvsdg::node & node, - const std::unordered_map & operationsMap, + const rvsdg::Node & node, + const std::unordered_map & operationsMap, ::mlir::Block & block) { ::llvm::SmallVector<::mlir::Value> inputs; @@ -148,7 +148,7 @@ JlmToMlirConverter::GetConvertedInputs( ::mlir::Operation * JlmToMlirConverter::ConvertNode( - const rvsdg::node & node, + const rvsdg::Node & node, ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs) { diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index b5ff91471..35b0dcf12 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -96,8 +96,8 @@ class JlmToMlirConverter final */ static ::llvm::SmallVector<::mlir::Value> GetConvertedInputs( - const rvsdg::node & node, - const std::unordered_map & operationsMap, + const rvsdg::Node & node, + const std::unordered_map & operationsMap, ::mlir::Block & block); /** @@ -109,7 +109,7 @@ class JlmToMlirConverter final */ ::mlir::Operation * ConvertNode( - const rvsdg::node & node, + const rvsdg::Node & node, ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs); diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index 726434ac6..63bc82e80 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -53,7 +53,7 @@ MlirToJlmConverter::ConvertRegion(::mlir::Region & region, rvsdg::Region & rvsdg ::llvm::SmallVector MlirToJlmConverter::GetConvertedInputs( ::mlir::Operation & mlirOp, - const std::unordered_map<::mlir::Operation *, rvsdg::node *> & operationsMap, + const std::unordered_map<::mlir::Operation *, rvsdg::Node *> & operationsMap, const rvsdg::Region & rvsdgRegion) { ::llvm::SmallVector inputs; @@ -83,7 +83,7 @@ MlirToJlmConverter::ConvertBlock(::mlir::Block & block, rvsdg::Region & rvsdgReg // Create an RVSDG node for each MLIR operation and store each pair in a // hash map for easy lookup of corresponding RVSDG nodes - std::unordered_map<::mlir::Operation *, rvsdg::node *> operationsMap; + std::unordered_map<::mlir::Operation *, rvsdg::Node *> operationsMap; for (auto & mlirOp : block.getOperations()) { ::llvm::SmallVector inputs = @@ -101,7 +101,7 @@ MlirToJlmConverter::ConvertBlock(::mlir::Block & block, rvsdg::Region & rvsdgReg return GetConvertedInputs(*terminator, operationsMap, rvsdgRegion); } -rvsdg::node * +rvsdg::Node * MlirToJlmConverter::ConvertCmpIOp( ::mlir::arith::CmpIOp & CompOp, const ::llvm::SmallVector & inputs, @@ -153,7 +153,7 @@ MlirToJlmConverter::ConvertCmpIOp( } } -rvsdg::node * +rvsdg::Node * MlirToJlmConverter::ConvertBitBinaryNode( const ::mlir::Operation & mlirOperation, const ::llvm::SmallVector & inputs) @@ -255,7 +255,7 @@ MlirToJlmConverter::ConvertBitBinaryNode( return nullptr; } -rvsdg::node * +rvsdg::Node * MlirToJlmConverter::ConvertOperation( ::mlir::Operation & mlirOperation, rvsdg::Region & rvsdgRegion, @@ -428,7 +428,7 @@ MlirToJlmConverter::ConvertOmega(::mlir::Operation & mlirOmega, rvsdg::Region & ConvertRegion(mlirOmega.getRegion(0), rvsdgRegion); } -jlm::rvsdg::node * +rvsdg::Node * MlirToJlmConverter::ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::Region & rvsdgRegion) { // Get the name of the function diff --git a/jlm/mlir/frontend/MlirToJlmConverter.hpp b/jlm/mlir/frontend/MlirToJlmConverter.hpp index 65755af7a..46b432e7b 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.hpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.hpp @@ -110,7 +110,7 @@ class MlirToJlmConverter final static ::llvm::SmallVector GetConvertedInputs( ::mlir::Operation & mlirOp, - const std::unordered_map<::mlir::Operation *, rvsdg::node *> & operationsMap, + const std::unordered_map<::mlir::Operation *, rvsdg::Node *> & operationsMap, const rvsdg::Region & rvsdgRegion); /** @@ -120,7 +120,7 @@ class MlirToJlmConverter final * \param nbits The number of bits in the comparison. * \result The converted RVSDG node. */ - rvsdg::node * + rvsdg::Node * ConvertCmpIOp( ::mlir::arith::CmpIOp & CompOp, const ::llvm::SmallVector & inputs, @@ -132,7 +132,7 @@ class MlirToJlmConverter final * \param inputs The inputs for the RVSDG node. * \result The converted RVSDG node OR nullptr if the operation cannot be casted to an operation */ - rvsdg::node * + rvsdg::Node * ConvertBitBinaryNode( const ::mlir::Operation & mlirOperation, const ::llvm::SmallVector & inputs); @@ -144,7 +144,7 @@ class MlirToJlmConverter final * \param inputs The inputs for the RVSDG node. * \result The converted RVSDG node. */ - rvsdg::node * + rvsdg::Node * ConvertOperation( ::mlir::Operation & mlirOperation, rvsdg::Region & rvsdgRegion, @@ -164,7 +164,7 @@ class MlirToJlmConverter final * \param rvsdgRegion The RVSDG region that the lambda node will reside in. * \result The converted Lambda node. */ - rvsdg::node * + rvsdg::Node * ConvertLambda(::mlir::Operation & mlirLambda, rvsdg::Region & rvsdgRegion); /** diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 1072a2916..7f0a79918 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -77,7 +77,7 @@ binary_normal_form::binary_normal_form( } bool -binary_normal_form::normalize_node(jlm::rvsdg::node * node) const +binary_normal_form::normalize_node(Node * node) const { const operation & base_op = node->GetOperation(); const auto & op = *static_cast(&base_op); @@ -86,7 +86,7 @@ binary_normal_form::normalize_node(jlm::rvsdg::node * node) const } bool -binary_normal_form::normalize_node(jlm::rvsdg::node * node, const binary_op & op) const +binary_normal_form::normalize_node(Node * node, const binary_op & op) const { if (!get_mutable()) { @@ -286,7 +286,7 @@ flattened_binary_normal_form::flattened_binary_normal_form( {} bool -flattened_binary_normal_form::normalize_node(jlm::rvsdg::node * node) const +flattened_binary_normal_form::normalize_node(Node * node) const { const auto & op = static_cast(node->GetOperation()); const auto & bin_op = op.bin_operation(); @@ -441,10 +441,6 @@ flattened_binary_op::reduce( } -/* node class */ - -/* node class inheritable methods */ - jlm::rvsdg::node_normal_form * binary_operation_get_default_normal_form_( const std::type_info & operator_class, diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index dd5e66705..5daad18d8 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -30,7 +30,7 @@ class binary_normal_form final : public simple_normal_form Graph * graph); virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(Node * node) const override; virtual std::vector normalized_create( @@ -85,7 +85,7 @@ class binary_normal_form final : public simple_normal_form private: bool - normalize_node(jlm::rvsdg::node * node, const binary_op & op) const; + normalize_node(Node * node, const binary_op & op) const; bool enable_reducible_; bool enable_reorder_; @@ -107,7 +107,7 @@ class flattened_binary_normal_form final : public simple_normal_form Graph * graph); virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(Node * node) const override; virtual std::vector normalized_create( diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 38f018993..6f4def471 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -93,7 +93,7 @@ class concat_normal_form final : public simple_normal_form {} virtual bool - normalize_node(jlm::rvsdg::node * node) const override + normalize_node(Node * node) const override { if (!get_mutable()) { diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 43dba7bf6..c53eaadf1 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -170,7 +170,7 @@ gamma_normal_form::gamma_normal_form( } bool -gamma_normal_form::normalize_node(jlm::rvsdg::node * node_) const +gamma_normal_form::normalize_node(Node * node_) const { auto node = util::AssertedCast(node_); @@ -273,15 +273,15 @@ GammaNode::~GammaNode() noexcept = default; GammaNode::GammaNode(rvsdg::output * predicate, size_t nalternatives) : StructuralNode(GammaOperation(nalternatives), predicate->region(), nalternatives) { - node::add_input(std::unique_ptr( - new rvsdg::StructuralInput(this, predicate, ControlType::Create(nalternatives)))); + add_input(std::unique_ptr( + new StructuralInput(this, predicate, ControlType::Create(nalternatives)))); } GammaNode::EntryVar GammaNode::AddEntryVar(rvsdg::output * origin) { auto gammaInput = new StructuralInput(this, origin, origin->Type()); - node::add_input(std::unique_ptr(gammaInput)); + add_input(std::unique_ptr(gammaInput)); EntryVar ev; ev.input = gammaInput; @@ -341,8 +341,8 @@ GammaNode::AddExitVar(std::vector values) throw jlm::util::error("Incorrect number of values."); const auto & type = values[0]->Type(); - auto output = static_cast( - node::add_output(std::make_unique(this, type))); + auto output = + static_cast(add_output(std::make_unique(this, type))); std::vector branchResults; for (size_t n = 0; n < nsubregions(); n++) @@ -379,7 +379,7 @@ GammaNode::MapOutputExitVar(const rvsdg::output & output) const { branchResults.push_back(subregion(k)->result(output.index())); } - return ExitVar{ std::move(branchResults), node::output(output.index()) }; + return ExitVar{ std::move(branchResults), Node::output(output.index()) }; } GammaNode::ExitVar @@ -391,7 +391,7 @@ GammaNode::MapBranchResultExitVar(const rvsdg::input & input) const { branchResults.push_back(subregion(k)->result(input.index())); } - return ExitVar{ std::move(branchResults), node::output(input.index()) }; + return ExitVar{ std::move(branchResults), Node::output(input.index()) }; } GammaNode * diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index d842f10f4..8e3878a4c 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -30,7 +30,7 @@ class gamma_normal_form final : public structural_normal_form Graph * graph) noexcept; virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(Node * node) const override; virtual void set_predicate_reduction(bool enable); diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index 9d00b8e50..c7286cf1f 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -64,7 +64,7 @@ Graph::node_normal_form(const std::type_info & type) noexcept return result; } -std::vector +std::vector Graph::ExtractTailNodes(const Graph & rvsdg) { auto IsOnlyExported = [](const rvsdg::output & output) @@ -89,7 +89,7 @@ Graph::ExtractTailNodes(const Graph & rvsdg) auto & rootRegion = *rvsdg.root(); - std::vector nodes; + std::vector nodes; for (auto & bottomNode : rootRegion.BottomNodes()) { nodes.push_back(&bottomNode); diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index a24813142..939151112 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -107,7 +107,7 @@ class Graph * @param rvsdg The RVSDG from which to extract the tail nodes. * @return A vector of tail nodes. */ - static std::vector + static std::vector ExtractTailNodes(const Graph & rvsdg); private: diff --git a/jlm/rvsdg/node-normal-form.cpp b/jlm/rvsdg/node-normal-form.cpp index 3a99d30e5..a808f65d3 100644 --- a/jlm/rvsdg/node-normal-form.cpp +++ b/jlm/rvsdg/node-normal-form.cpp @@ -16,7 +16,7 @@ node_normal_form::~node_normal_form() noexcept {} bool -node_normal_form::normalize_node(jlm::rvsdg::node * node) const +node_normal_form::normalize_node(Node * node) const { return true; } diff --git a/jlm/rvsdg/node-normal-form.hpp b/jlm/rvsdg/node-normal-form.hpp index 79e0da003..61b5256bb 100644 --- a/jlm/rvsdg/node-normal-form.hpp +++ b/jlm/rvsdg/node-normal-form.hpp @@ -23,7 +23,7 @@ namespace jlm::rvsdg { class Graph; -class node; +class Node; class operation; class output; class Region; @@ -50,7 +50,7 @@ class node_normal_form } virtual bool - normalize_node(jlm::rvsdg::node * node) const; + normalize_node(Node * node) const; inline node_normal_form * parent() const noexcept diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 0efc8a9bf..7cbb8dc7b 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -69,7 +69,7 @@ input::divert_to(jlm::rvsdg::output * new_origin) on_input_change(this, old_origin, new_origin); } -rvsdg::node * +Node * input::GetNode(const rvsdg::input & input) noexcept { auto nodeInput = dynamic_cast(&input); @@ -95,7 +95,7 @@ output::debug_string() const return jlm::util::strfmt(index()); } -rvsdg::node * +Node * output::GetNode(const rvsdg::output & output) noexcept { auto nodeOutput = dynamic_cast(&output); @@ -161,13 +161,13 @@ namespace jlm::rvsdg node_input::node_input( jlm::rvsdg::output * origin, - jlm::rvsdg::node * node, + Node * node, std::shared_ptr type) : jlm::rvsdg::input(origin, node->region(), std::move(type)), node_(node) {} -[[nodiscard]] std::variant +[[nodiscard]] std::variant node_input::GetOwner() const noexcept { return node_; @@ -175,12 +175,12 @@ node_input::GetOwner() const noexcept /* node_output class */ -node_output::node_output(jlm::rvsdg::node * node, std::shared_ptr type) +node_output::node_output(Node * node, std::shared_ptr type) : jlm::rvsdg::output(node->region(), std::move(type)), node_(node) {} -[[nodiscard]] std::variant +[[nodiscard]] std::variant node_output::GetOwner() const noexcept { return node_; @@ -188,7 +188,7 @@ node_output::GetOwner() const noexcept /* node class */ -node::node(std::unique_ptr op, rvsdg::Region * region) +Node::Node(std::unique_ptr op, rvsdg::Region * region) : depth_(0), graph_(region->graph()), region_(region), @@ -202,7 +202,7 @@ node::node(std::unique_ptr op, rvsdg::Region * region) JLM_ASSERT(wasAdded); } -node::~node() +Node::~Node() { outputs_.clear(); bool wasRemoved = region()->RemoveBottomNode(*this); @@ -220,7 +220,7 @@ node::~node() } node_input * -node::add_input(std::unique_ptr input) +Node::add_input(std::unique_ptr input) { auto producer = output::GetNode(*input->origin()); @@ -242,7 +242,7 @@ node::add_input(std::unique_ptr input) } void -node::RemoveInput(size_t index) +Node::RemoveInput(size_t index) { JLM_ASSERT(index < ninputs()); auto producer = output::GetNode(*input(index)->origin()); @@ -275,7 +275,7 @@ node::RemoveInput(size_t index) } void -node::RemoveOutput(size_t index) +Node::RemoveOutput(size_t index) { JLM_ASSERT(index < noutputs()); @@ -288,7 +288,7 @@ node::RemoveOutput(size_t index) } void -node::recompute_depth() noexcept +Node::recompute_depth() noexcept { /* FIXME: This function is inefficient, as it can visit the @@ -322,8 +322,8 @@ node::recompute_depth() noexcept } } -jlm::rvsdg::node * -node::copy(rvsdg::Region * region, const std::vector & operands) const +Node * +Node::copy(rvsdg::Region * region, const std::vector & operands) const { SubstitutionMap smap; @@ -334,7 +334,7 @@ node::copy(rvsdg::Region * region, const std::vector & ope return copy(region, smap); } -jlm::rvsdg::node * +Node * producer(const jlm::rvsdg::output * output) noexcept { if (auto node = output::GetNode(*output)) @@ -354,7 +354,7 @@ producer(const jlm::rvsdg::output * output) noexcept } bool -normalize(jlm::rvsdg::node * node) +normalize(Node * node) { const auto & op = node->GetOperation(); auto nf = node->graph()->node_normal_form(typeid(op)); diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 0950d6e56..5c820c003 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -35,7 +35,7 @@ class SubstitutionMap; class input { - friend class jlm::rvsdg::node; + friend class Node; friend class rvsdg::Region; public: @@ -99,10 +99,10 @@ class input * @return The node associated with \p input if input is derived from jlm::rvsdg::node_input, * otherwise nullptr. */ - [[nodiscard]] static rvsdg::node * + [[nodiscard]] static Node * GetNode(const rvsdg::input & input) noexcept; - [[nodiscard]] virtual std::variant + [[nodiscard]] virtual std::variant GetOwner() const noexcept = 0; template @@ -288,7 +288,7 @@ is(const jlm::rvsdg::input & input) noexcept class output { friend input; - friend class jlm::rvsdg::node; + friend class Node; friend class rvsdg::Region; typedef std::unordered_set::const_iterator user_iterator; @@ -378,7 +378,7 @@ class output virtual std::string debug_string() const; - [[nodiscard]] virtual std::variant + [[nodiscard]] virtual std::variant GetOwner() const noexcept = 0; /** @@ -389,7 +389,7 @@ class output * @return The node associated with \p output if output is derived from jlm::rvsdg::node_output, * otherwise nullptr. */ - [[nodiscard]] static rvsdg::node * + [[nodiscard]] static Node * GetNode(const rvsdg::output & output) noexcept; template @@ -581,22 +581,19 @@ is(const jlm::rvsdg::output * output) noexcept class node_input : public jlm::rvsdg::input { public: - node_input( - jlm::rvsdg::output * origin, - jlm::rvsdg::node * node, - std::shared_ptr type); + node_input(jlm::rvsdg::output * origin, Node * node, std::shared_ptr type); - jlm::rvsdg::node * + Node * node() const noexcept { return node_; } - [[nodiscard]] std::variant + [[nodiscard]] std::variant GetOwner() const noexcept override; private: - jlm::rvsdg::node * node_; + Node * node_; }; /* node_output class */ @@ -604,36 +601,36 @@ class node_input : public jlm::rvsdg::input class node_output : public jlm::rvsdg::output { public: - node_output(jlm::rvsdg::node * node, std::shared_ptr type); + node_output(Node * node, std::shared_ptr type); - jlm::rvsdg::node * + [[nodiscard]] Node * node() const noexcept { return node_; } - static jlm::rvsdg::node * + static Node * node(const jlm::rvsdg::output * output) { auto no = dynamic_cast(output); return no != nullptr ? no->node() : nullptr; } - [[nodiscard]] std::variant + [[nodiscard]] std::variant GetOwner() const noexcept override; private: - jlm::rvsdg::node * node_; + Node * node_; }; /* node class */ -class node +class Node { public: - virtual ~node(); + virtual ~Node(); - node(std::unique_ptr op, rvsdg::Region * region); + Node(std::unique_ptr op, Region * region); [[nodiscard]] virtual const operation & GetOperation() const noexcept @@ -764,7 +761,7 @@ class node // iterate backwards to avoid the invalidation of 'n' by RemoveInput() for (size_t n = ninputs() - 1; n != static_cast(-1); n--) { - auto & input = *node::input(n); + auto & input = *Node::input(n); if (match(input)) { RemoveInput(n); @@ -813,7 +810,7 @@ class node // iterate backwards to avoid the invalidation of 'n' by RemoveOutput() for (size_t n = noutputs() - 1; n != static_cast(-1); n--) { - auto & output = *node::output(n); + auto & output = *Node::output(n); if (output.nusers() == 0 && match(output)) { RemoveOutput(n); @@ -834,7 +831,7 @@ class node return region_; } - virtual jlm::rvsdg::node * + virtual Node * copy(rvsdg::Region * region, const std::vector & operands) const; /** @@ -853,7 +850,7 @@ class node corresponding outputs of the newly created node in subsequent \ref copy operations. */ - virtual jlm::rvsdg::node * + virtual Node * copy(rvsdg::Region * region, SubstitutionMap & smap) const = 0; inline size_t @@ -863,24 +860,21 @@ class node } private: - jlm::util::intrusive_list_anchor region_node_list_anchor_; + util::intrusive_list_anchor region_node_list_anchor_; - jlm::util::intrusive_list_anchor region_top_node_list_anchor_; + util::intrusive_list_anchor region_top_node_list_anchor_; - jlm::util::intrusive_list_anchor region_bottom_node_list_anchor_; + util::intrusive_list_anchor region_bottom_node_list_anchor_; public: - typedef jlm::util:: - intrusive_list_accessor - region_node_list_accessor; + typedef util::intrusive_list_accessor + region_node_list_accessor; - typedef jlm::util:: - intrusive_list_accessor - region_top_node_list_accessor; + typedef util::intrusive_list_accessor + region_top_node_list_accessor; - typedef jlm::util:: - intrusive_list_accessor - region_bottom_node_list_accessor; + typedef util::intrusive_list_accessor + region_bottom_node_list_accessor; private: size_t depth_; @@ -916,7 +910,7 @@ inline NodeType * TryGetOwnerNode(const rvsdg::input & input) noexcept { auto owner = input.GetOwner(); - if (auto node = std::get_if(&owner)) + if (const auto node = std::get_if(&owner)) { return dynamic_cast(*node); } @@ -951,7 +945,7 @@ inline NodeType * TryGetOwnerNode(const rvsdg::output & output) noexcept { auto owner = output.GetOwner(); - if (auto node = std::get_if(&owner)) + if (const auto node = std::get_if(&owner)) { return dynamic_cast(*node); } @@ -1050,7 +1044,7 @@ TryGetOwnerRegion(const rvsdg::output & output) noexcept } static inline std::vector -operands(const jlm::rvsdg::node * node) +operands(const Node * node) { std::vector operands; for (size_t n = 0; n < node->ninputs(); n++) @@ -1059,7 +1053,7 @@ operands(const jlm::rvsdg::node * node) } static inline std::vector -outputs(const jlm::rvsdg::node * node) +outputs(const Node * node) { std::vector outputs; for (size_t n = 0; n < node->noutputs(); n++) @@ -1068,7 +1062,7 @@ outputs(const jlm::rvsdg::node * node) } static inline void -divert_users(jlm::rvsdg::node * node, const std::vector & outputs) +divert_users(Node * node, const std::vector & outputs) { JLM_ASSERT(node->noutputs() == outputs.size()); @@ -1078,7 +1072,7 @@ divert_users(jlm::rvsdg::node * node, const std::vector & template static inline bool -is(const jlm::rvsdg::node * node) noexcept +is(const Node * node) noexcept { if (!node) return false; @@ -1086,11 +1080,11 @@ is(const jlm::rvsdg::node * node) noexcept return is(node->GetOperation()); } -jlm::rvsdg::node * +Node * producer(const jlm::rvsdg::output * output) noexcept; bool -normalize(jlm::rvsdg::node * node); +normalize(Node * node); } diff --git a/jlm/rvsdg/notifiers.cpp b/jlm/rvsdg/notifiers.cpp index 00f7dfa3a..b99d8e08a 100644 --- a/jlm/rvsdg/notifiers.cpp +++ b/jlm/rvsdg/notifiers.cpp @@ -11,9 +11,9 @@ namespace jlm::rvsdg jlm::util::notifier on_region_create; jlm::util::notifier on_region_destroy; -jlm::util::notifier on_node_create; -jlm::util::notifier on_node_destroy; -jlm::util::notifier on_node_depth_change; +util::notifier on_node_create; +util::notifier on_node_destroy; +util::notifier on_node_depth_change; jlm::util::notifier on_input_create; jlm::util::notifier< diff --git a/jlm/rvsdg/notifiers.hpp b/jlm/rvsdg/notifiers.hpp index 78257cf9e..6902d821f 100644 --- a/jlm/rvsdg/notifiers.hpp +++ b/jlm/rvsdg/notifiers.hpp @@ -12,16 +12,16 @@ namespace jlm::rvsdg { class input; -class node; +class Node; class output; class Region; extern jlm::util::notifier on_region_create; extern jlm::util::notifier on_region_destroy; -extern jlm::util::notifier on_node_create; -extern jlm::util::notifier on_node_destroy; -extern jlm::util::notifier on_node_depth_change; +extern util::notifier on_node_create; +extern util::notifier on_node_destroy; +extern util::notifier on_node_depth_change; extern jlm::util::notifier on_input_create; extern jlm::util::notifier< diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index ddafe793f..e9d3bea66 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -17,7 +17,7 @@ namespace jlm::rvsdg { class Graph; -class node; +class Node; class node_normal_form; class output; class Region; diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 2a79aaeaa..815949db3 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -43,7 +43,7 @@ RegionArgument::RegionArgument( } } -[[nodiscard]] std::variant +[[nodiscard]] std::variant RegionArgument::GetOwner() const noexcept { return region(); @@ -96,7 +96,7 @@ RegionResult::RegionResult( } } -[[nodiscard]] std::variant +[[nodiscard]] std::variant RegionResult::GetOwner() const noexcept { return region(); @@ -220,13 +220,13 @@ Region::RemoveResult(size_t index) } void -Region::remove_node(jlm::rvsdg::node * node) +Region::remove_node(Node * node) { delete node; } bool -Region::AddTopNode(rvsdg::node & node) +Region::AddTopNode(Node & node) { if (node.region() != this) return false; @@ -241,7 +241,7 @@ Region::AddTopNode(rvsdg::node & node) } bool -Region::AddBottomNode(rvsdg::node & node) +Region::AddBottomNode(Node & node) { if (node.region() != this) return false; @@ -256,7 +256,7 @@ Region::AddBottomNode(rvsdg::node & node) } bool -Region::AddNode(rvsdg::node & node) +Region::AddNode(Node & node) { if (node.region() != this) return false; @@ -267,7 +267,7 @@ Region::AddNode(rvsdg::node & node) } bool -Region::RemoveBottomNode(rvsdg::node & node) +Region::RemoveBottomNode(Node & node) { auto numBottomNodes = NumBottomNodes(); BottomNodes_.erase(&node); @@ -275,7 +275,7 @@ Region::RemoveBottomNode(rvsdg::node & node) } bool -Region::RemoveTopNode(rvsdg::node & node) +Region::RemoveTopNode(Node & node) { auto numTopNodes = NumTopNodes(); TopNodes_.erase(&node); @@ -283,7 +283,7 @@ Region::RemoveTopNode(rvsdg::node & node) } bool -Region::RemoveNode(rvsdg::node & node) +Region::RemoveNode(Node & node) { auto numNodes = nnodes(); Nodes_.erase(&node); @@ -296,7 +296,7 @@ Region::copy(Region * target, SubstitutionMap & smap, bool copy_arguments, bool smap.insert(this, target); // order nodes top-down - std::vector> context(nnodes()); + std::vector> context(nnodes()); for (const auto & node : Nodes()) { JLM_ASSERT(node.depth() < context.size()); diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index e1efedda5..f77a21ddc 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -23,7 +23,7 @@ class AnnotationMap; namespace jlm::rvsdg { -class node; +class Node; class simple_node; class SimpleOperation; class StructuralInput; @@ -82,7 +82,7 @@ class RegionArgument : public output virtual RegionArgument & Copy(Region & region, StructuralInput * input); - [[nodiscard]] std::variant + [[nodiscard]] std::variant GetOwner() const noexcept override; /** @@ -163,7 +163,7 @@ class RegionResult : public input virtual RegionResult & Copy(rvsdg::output & origin, StructuralOutput * output); - [[nodiscard]] std::variant + [[nodiscard]] std::variant GetOwner() const noexcept override; /** @@ -214,16 +214,12 @@ class RegionResult : public input */ class Region { - typedef jlm::util::intrusive_list - region_nodes_list; + typedef util::intrusive_list region_nodes_list; - typedef jlm::util:: - intrusive_list - region_top_node_list; + typedef util::intrusive_list region_top_node_list; - typedef jlm::util:: - intrusive_list - region_bottom_node_list; + typedef util::intrusive_list + region_bottom_node_list; using RegionArgumentIterator = std::vector::iterator; using RegionArgumentConstIterator = std::vector::const_iterator; @@ -529,7 +525,7 @@ class Region } void - remove_node(jlm::rvsdg::node * node); + remove_node(Node * node); /** * \brief Adds \p node to the top nodes of the region. @@ -545,7 +541,7 @@ class Region * no need to invoke it manually. */ bool - AddTopNode(rvsdg::node & node); + AddTopNode(Node & node); /** * \brief Adds \p node to the bottom nodes of the region. @@ -561,7 +557,7 @@ class Region * no need to invoke it manually. */ bool - AddBottomNode(rvsdg::node & node); + AddBottomNode(Node & node); /** * \brief Adds \p node to the region. @@ -575,7 +571,7 @@ class Region * it manually. */ bool - AddNode(rvsdg::node & node); + AddNode(Node & node); /** * Removes \p node from the top nodes in the region. @@ -587,7 +583,7 @@ class Region * invoke it manually. */ bool - RemoveTopNode(rvsdg::node & node); + RemoveTopNode(Node & node); /** * Removes \p node from the bottom nodes in the region. @@ -599,7 +595,7 @@ class Region * invoke it manually. */ bool - RemoveBottomNode(rvsdg::node & node); + RemoveBottomNode(Node & node); /** * Remove \p node from the region. @@ -611,7 +607,7 @@ class Region * it manually. */ bool - RemoveNode(rvsdg::node & node); + RemoveNode(Node & node); /** \brief Copy a region with substitutions @@ -743,7 +739,7 @@ class Region }; static inline void -remove(jlm::rvsdg::node * node) +remove(Node * node) { return node->region()->remove_node(node); } diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 5e53ed561..5d4d5180c 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -49,7 +49,7 @@ simple_node::simple_node( rvsdg::Region * region, const SimpleOperation & op, const std::vector & operands) - : node(op.copy(), region) + : Node(op.copy(), region) { if (simple_node::GetOperation().narguments() != operands.size()) throw jlm::util::error(jlm::util::strfmt( @@ -74,10 +74,10 @@ simple_node::simple_node( const SimpleOperation & simple_node::GetOperation() const noexcept { - return *util::AssertedCast(&node::GetOperation()); + return *util::AssertedCast(&Node::GetOperation()); } -jlm::rvsdg::node * +Node * simple_node::copy(rvsdg::Region * region, const std::vector & operands) const { auto node = create(region, GetOperation(), operands); @@ -85,7 +85,7 @@ simple_node::copy(rvsdg::Region * region, const std::vector operands; diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index c6da042dd..1a7a8c02c 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -20,7 +20,7 @@ class simple_output; /* simple nodes */ -class simple_node : public node +class simple_node : public Node { public: virtual ~simple_node(); @@ -41,10 +41,10 @@ class simple_node : public node [[nodiscard]] const SimpleOperation & GetOperation() const noexcept override; - virtual jlm::rvsdg::node * + Node * copy(rvsdg::Region * region, const std::vector & operands) const override; - virtual jlm::rvsdg::node * + Node * copy(rvsdg::Region * region, SubstitutionMap & smap) const override; static inline jlm::rvsdg::simple_node * @@ -113,13 +113,13 @@ class simple_output final : public node_output inline jlm::rvsdg::simple_input * simple_node::input(size_t index) const noexcept { - return static_cast(node::input(index)); + return static_cast(Node::input(index)); } inline jlm::rvsdg::simple_output * simple_node::output(size_t index) const noexcept { - return static_cast(node::output(index)); + return static_cast(Node::output(index)); } } diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 2990d8d1b..249ec3dc1 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -6,13 +6,13 @@ #include #include -static jlm::rvsdg::node * +static jlm::rvsdg::Node * node_cse( jlm::rvsdg::Region * region, const jlm::rvsdg::operation & op, const std::vector & arguments) { - auto cse_test = [&](const jlm::rvsdg::node * node) + auto cse_test = [&](const jlm::rvsdg::Node * node) { return node->GetOperation() == op && arguments == operands(node); }; @@ -59,7 +59,7 @@ simple_normal_form::simple_normal_form( } bool -simple_normal_form::normalize_node(jlm::rvsdg::node * node) const +simple_normal_form::normalize_node(Node * node) const { if (!get_mutable()) return true; @@ -85,7 +85,7 @@ simple_normal_form::normalized_create( const SimpleOperation & op, const std::vector & arguments) const { - jlm::rvsdg::node * node = nullptr; + Node * node = nullptr; if (get_mutable() && get_cse()) node = node_cse(region, op, arguments); if (!node) diff --git a/jlm/rvsdg/simple-normal-form.hpp b/jlm/rvsdg/simple-normal-form.hpp index 18bc3f615..f1d322ce2 100644 --- a/jlm/rvsdg/simple-normal-form.hpp +++ b/jlm/rvsdg/simple-normal-form.hpp @@ -24,7 +24,7 @@ class simple_normal_form : public node_normal_form Graph * graph) noexcept; virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(Node * node) const override; virtual std::vector normalized_create( diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 96706d09e..3967fb89f 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -37,7 +37,7 @@ mux_op::copy() const /* mux normal form */ -static jlm::rvsdg::node * +static Node * is_mux_mux_reducible(const std::vector & ops) { std::unordered_set operands(ops.begin(), ops.end()); @@ -81,7 +81,7 @@ perform_multiple_origin_reduction( static std::vector perform_mux_mux_reduction( const jlm::rvsdg::mux_op & op, - const jlm::rvsdg::node * muxnode, + const Node * muxnode, const std::vector & old_operands) { JLM_ASSERT(is_mux_op(muxnode->GetOperation())); @@ -121,7 +121,7 @@ mux_normal_form::mux_normal_form( } bool -mux_normal_form::normalize_node(jlm::rvsdg::node * node) const +mux_normal_form::normalize_node(Node * node) const { JLM_ASSERT(dynamic_cast(&node->GetOperation())); auto op = static_cast(&node->GetOperation()); diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index 5d57a453a..d0c39fb0e 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -27,7 +27,7 @@ class mux_normal_form final : public simple_normal_form Graph * graph) noexcept; virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(Node * node) const override; virtual std::vector normalized_create( diff --git a/jlm/rvsdg/structural-node.cpp b/jlm/rvsdg/structural-node.cpp index 3446b2220..964335c88 100644 --- a/jlm/rvsdg/structural-node.cpp +++ b/jlm/rvsdg/structural-node.cpp @@ -57,7 +57,7 @@ StructuralNode::StructuralNode( const StructuralOperation & op, rvsdg::Region * region, size_t nsubregions) - : node(op.copy(), region) + : Node(op.copy(), region) { if (nsubregions == 0) throw jlm::util::error("Number of subregions must be greater than zero."); diff --git a/jlm/rvsdg/structural-node.hpp b/jlm/rvsdg/structural-node.hpp index 2f639c942..a10e7a8c5 100644 --- a/jlm/rvsdg/structural-node.hpp +++ b/jlm/rvsdg/structural-node.hpp @@ -18,7 +18,7 @@ class StructuralInput; class StructuralOperation; class StructuralOutput; -class StructuralNode : public node +class StructuralNode : public Node { public: ~StructuralNode() noexcept override; @@ -56,9 +56,9 @@ class StructuralNode : public node StructuralOutput * append_output(std::unique_ptr output); - using node::RemoveInput; + using Node::RemoveInput; - using node::RemoveOutput; + using Node::RemoveOutput; private: std::vector> subregions_; @@ -135,13 +135,13 @@ class StructuralOutput : public node_output inline StructuralInput * StructuralNode::input(size_t index) const noexcept { - return static_cast(node::input(index)); + return static_cast(Node::input(index)); } inline StructuralOutput * StructuralNode::output(size_t index) const noexcept { - return static_cast(node::output(index)); + return static_cast(Node::output(index)); } template diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 088df296d..621054abe 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -100,8 +100,8 @@ ThetaNode::loopvar_iterator::operator++() noexcept ThetaOutput * ThetaNode::add_loopvar(jlm::rvsdg::output * origin) { - node::add_input(std::make_unique(this, origin, origin->Type())); - node::add_output(std::make_unique(this, origin->Type())); + Node::add_input(std::make_unique(this, origin, origin->Type())); + Node::add_output(std::make_unique(this, origin->Type())); auto input = ThetaNode::input(ninputs() - 1); auto output = ThetaNode::output(noutputs() - 1); diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 407ddc8a4..1acac7be1 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -432,13 +432,13 @@ is_invariant(const ThetaOutput * output) noexcept inline ThetaInput * ThetaNode::input(size_t index) const noexcept { - return static_cast(node::input(index)); + return static_cast(Node::input(index)); } inline ThetaOutput * ThetaNode::output(size_t index) const noexcept { - return static_cast(node::output(index)); + return static_cast(Node::output(index)); } template diff --git a/jlm/rvsdg/tracker.cpp b/jlm/rvsdg/tracker.cpp index b5a1a3fe7..aaed8ec17 100644 --- a/jlm/rvsdg/tracker.cpp +++ b/jlm/rvsdg/tracker.cpp @@ -178,7 +178,7 @@ tracker::tracker(Graph * graph, size_t nstates) } void -tracker::node_depth_change(jlm::rvsdg::node * node, size_t old_depth) +tracker::node_depth_change(Node * node, size_t old_depth) { auto nstate = nodestate(node); if (nstate->state() < states_.size()) @@ -189,7 +189,7 @@ tracker::node_depth_change(jlm::rvsdg::node * node, size_t old_depth) } void -tracker::node_destroy(jlm::rvsdg::node * node) +tracker::node_destroy(Node * node) { auto nstate = nodestate(node); if (nstate->state() < states_.size()) @@ -199,13 +199,13 @@ tracker::node_destroy(jlm::rvsdg::node * node) } ssize_t -tracker::get_nodestate(jlm::rvsdg::node * node) +tracker::get_nodestate(Node * node) { return nodestate(node)->state(); } void -tracker::set_nodestate(jlm::rvsdg::node * node, size_t state) +tracker::set_nodestate(Node * node, size_t state) { auto nstate = nodestate(node); if (nstate->state() != state) @@ -219,7 +219,7 @@ tracker::set_nodestate(jlm::rvsdg::node * node, size_t state) } } -jlm::rvsdg::node * +Node * tracker::peek_top(size_t state) const { JLM_ASSERT(state < states_.size()); @@ -234,7 +234,7 @@ tracker::peek_top(size_t state) const return nullptr; } -jlm::rvsdg::node * +Node * tracker::peek_bottom(size_t state) const { JLM_ASSERT(state < states_.size()); @@ -250,7 +250,7 @@ tracker::peek_bottom(size_t state) const } jlm::rvsdg::tracker_nodestate * -tracker::nodestate(jlm::rvsdg::node * node) +tracker::nodestate(Node * node) { auto it = nodestates_.find(node); if (it != nodestates_.end()) diff --git a/jlm/rvsdg/tracker.hpp b/jlm/rvsdg/tracker.hpp index 834163778..9c24794cb 100644 --- a/jlm/rvsdg/tracker.hpp +++ b/jlm/rvsdg/tracker.hpp @@ -20,7 +20,7 @@ namespace jlm::rvsdg static const size_t tracker_nodestate_none = (size_t)-1; class Graph; -class node; +class Node; class Region; class tracker_depth_state; class tracker_nodestate; @@ -40,18 +40,18 @@ struct tracker /* get state of the node */ ssize_t - get_nodestate(jlm::rvsdg::node * node); + get_nodestate(Node * node); /* set state of the node */ void - set_nodestate(jlm::rvsdg::node * node, size_t state); + set_nodestate(Node * node, size_t state); /* get one of the top nodes for the given state */ - jlm::rvsdg::node * + Node * peek_top(size_t state) const; /* get one of the bottom nodes for the given state */ - jlm::rvsdg::node * + Node * peek_bottom(size_t state) const; [[nodiscard]] Graph * @@ -62,13 +62,13 @@ struct tracker private: jlm::rvsdg::tracker_nodestate * - nodestate(jlm::rvsdg::node * node); + nodestate(Node * node); void - node_depth_change(jlm::rvsdg::node * node, size_t old_depth); + node_depth_change(Node * node, size_t old_depth); void - node_destroy(jlm::rvsdg::node * node); + node_destroy(Node * node); jlm::rvsdg::Graph * graph_; @@ -77,8 +77,7 @@ struct tracker jlm::util::callback depth_callback_, destroy_callback_; - std::unordered_map> - nodestates_; + std::unordered_map> nodestates_; }; class tracker_nodestate @@ -86,7 +85,7 @@ class tracker_nodestate friend tracker; public: - inline tracker_nodestate(jlm::rvsdg::node * node) + inline tracker_nodestate(Node * node) : state_(tracker_nodestate_none), node_(node) {} @@ -101,7 +100,7 @@ class tracker_nodestate tracker_nodestate & operator=(tracker_nodestate &&) = delete; - inline jlm::rvsdg::node * + [[nodiscard]] Node * node() const noexcept { return node_; @@ -115,7 +114,7 @@ class tracker_nodestate private: size_t state_; - jlm::rvsdg::node * node_; + Node * node_; }; } diff --git a/jlm/rvsdg/traverser.cpp b/jlm/rvsdg/traverser.cpp index e763f470e..379c98ac1 100644 --- a/jlm/rvsdg/traverser.cpp +++ b/jlm/rvsdg/traverser.cpp @@ -48,7 +48,7 @@ topdown_traverser::topdown_traverser(rvsdg::Region * region) } bool -topdown_traverser::predecessors_visited(const jlm::rvsdg::node * node) noexcept +topdown_traverser::predecessors_visited(const Node * node) noexcept { for (size_t n = 0; n < node->ninputs(); n++) { @@ -63,10 +63,10 @@ topdown_traverser::predecessors_visited(const jlm::rvsdg::node * node) noexcept return true; } -jlm::rvsdg::node * +Node * topdown_traverser::next() { - jlm::rvsdg::node * node = tracker_.peek_top(); + Node * node = tracker_.peek_top(); if (!node) return nullptr; @@ -88,7 +88,7 @@ topdown_traverser::next() } void -topdown_traverser::node_create(jlm::rvsdg::node * node) +topdown_traverser::node_create(Node * node) { if (node->region() != region()) return; @@ -148,7 +148,7 @@ bottomup_traverser::bottomup_traverser(rvsdg::Region * region, bool revisit) on_input_change.connect(std::bind(&bottomup_traverser::input_change, this, _1, _2, _3))); } -jlm::rvsdg::node * +Node * bottomup_traverser::next() { auto node = tracker_.peek_bottom(); @@ -166,7 +166,7 @@ bottomup_traverser::next() } void -bottomup_traverser::node_create(jlm::rvsdg::node * node) +bottomup_traverser::node_create(Node * node) { if (node->region() != region()) return; @@ -175,7 +175,7 @@ bottomup_traverser::node_create(jlm::rvsdg::node * node) } void -bottomup_traverser::node_destroy(jlm::rvsdg::node * node) +bottomup_traverser::node_destroy(Node * node) { if (node->region() != region()) return; diff --git a/jlm/rvsdg/traverser.hpp b/jlm/rvsdg/traverser.hpp index a65702828..a201469f1 100644 --- a/jlm/rvsdg/traverser.hpp +++ b/jlm/rvsdg/traverser.hpp @@ -27,12 +27,12 @@ class traverser_iterator { public: typedef std::input_iterator_tag iterator_category; - typedef jlm::rvsdg::node * value_type; + typedef Node * value_type; typedef ssize_t difference_type; typedef value_type * pointer; typedef value_type & reference; - constexpr traverser_iterator(T * traverser = nullptr, jlm::rvsdg::node * node = nullptr) noexcept + constexpr traverser_iterator(T * traverser = nullptr, Node * node = nullptr) noexcept : traverser_(traverser), node_(node) {} @@ -70,7 +70,7 @@ class traverser_iterator private: T * traverser_; - jlm::rvsdg::node * node_; + Node * node_; }; } @@ -89,15 +89,15 @@ class traversal_tracker final inline traversal_tracker(Graph * graph); inline traversal_nodestate - get_nodestate(jlm::rvsdg::node * node); + get_nodestate(Node * node); inline void - set_nodestate(jlm::rvsdg::node * node, traversal_nodestate state); + set_nodestate(Node * node, traversal_nodestate state); - inline jlm::rvsdg::node * + inline Node * peek_top(); - inline jlm::rvsdg::node * + inline Node * peek_bottom(); private: @@ -143,7 +143,7 @@ class topdown_traverser final explicit topdown_traverser(rvsdg::Region * region); - jlm::rvsdg::node * + Node * next(); [[nodiscard]] rvsdg::Region * @@ -153,7 +153,7 @@ class topdown_traverser final } typedef detail::traverser_iterator iterator; - typedef jlm::rvsdg::node * value_type; + typedef Node * value_type; inline iterator begin() @@ -169,10 +169,10 @@ class topdown_traverser final private: bool - predecessors_visited(const jlm::rvsdg::node * node) noexcept; + predecessors_visited(const Node * node) noexcept; void - node_create(jlm::rvsdg::node * node); + node_create(Node * node); void input_change(input * in, output * old_origin, output * new_origin); @@ -189,7 +189,7 @@ class bottomup_traverser final explicit bottomup_traverser(rvsdg::Region * region, bool revisit = false); - jlm::rvsdg::node * + Node * next(); [[nodiscard]] rvsdg::Region * @@ -199,7 +199,7 @@ class bottomup_traverser final } typedef detail::traverser_iterator iterator; - typedef jlm::rvsdg::node * value_type; + typedef Node * value_type; inline iterator begin() @@ -215,10 +215,10 @@ class bottomup_traverser final private: void - node_create(jlm::rvsdg::node * node); + node_create(Node * node); void - node_destroy(jlm::rvsdg::node * node); + node_destroy(Node * node); void input_change(input * in, output * old_origin, output * new_origin); @@ -236,24 +236,24 @@ traversal_tracker::traversal_tracker(Graph * graph) {} traversal_nodestate -traversal_tracker::get_nodestate(jlm::rvsdg::node * node) +traversal_tracker::get_nodestate(Node * node) { return static_cast(tracker_.get_nodestate(node)); } void -traversal_tracker::set_nodestate(jlm::rvsdg::node * node, traversal_nodestate state) +traversal_tracker::set_nodestate(Node * node, traversal_nodestate state) { tracker_.set_nodestate(node, static_cast(state)); } -jlm::rvsdg::node * +Node * traversal_tracker::peek_top() { return tracker_.peek_top(static_cast(traversal_nodestate::frontier)); } -jlm::rvsdg::node * +Node * traversal_tracker::peek_bottom() { return tracker_.peek_bottom(static_cast(traversal_nodestate::frontier)); diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 5f21495e7..e2f4a4304 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -29,7 +29,7 @@ unary_normal_form::unary_normal_form( } bool -unary_normal_form::normalize_node(jlm::rvsdg::node * node) const +unary_normal_form::normalize_node(Node * node) const { if (!get_mutable()) { diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index be813b611..f26e61dd3 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -28,7 +28,7 @@ class unary_normal_form final : public simple_normal_form Graph * graph); virtual bool - normalize_node(jlm::rvsdg::node * node) const override; + normalize_node(Node * node) const override; virtual std::vector normalized_create( diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index be3e9a8ab..6dc3bf388 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -35,7 +35,7 @@ create_port_name( static std::string node_to_string( - const jlm::rvsdg::node * node, + const Node * node, size_t depth, std::unordered_map & map) { @@ -94,7 +94,7 @@ region_body( size_t depth, std::unordered_map & map) { - std::vector> context; + std::vector> context; for (const auto & node : region->Nodes()) { if (node.depth() >= context.size()) @@ -194,7 +194,7 @@ id(const jlm::rvsdg::input * port) } static inline std::string -id(const jlm::rvsdg::node * node) +id(const Node * node) { return jlm::util::strfmt("n", (intptr_t)node); } @@ -260,7 +260,7 @@ edge_tag(const std::string & srcid, const std::string & dstid) } static inline std::string -type(const jlm::rvsdg::node * n) +type(const Node * n) { if (dynamic_cast(&n->GetOperation())) return "gamma"; @@ -322,7 +322,7 @@ convert_structural_node(const rvsdg::StructuralNode * node) } static inline std::string -convert_node(const jlm::rvsdg::node * node) +convert_node(const Node * node) { if (auto n = dynamic_cast(node)) return convert_simple_node(n); diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index 0bc3339fb..dd37d7ee6 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -83,12 +83,12 @@ class StoreTest1 final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * size; + rvsdg::Node * size; - jlm::rvsdg::node * alloca_a; - jlm::rvsdg::node * alloca_b; - jlm::rvsdg::node * alloca_c; - jlm::rvsdg::node * alloca_d; + rvsdg::Node * alloca_a; + rvsdg::Node * alloca_b; + rvsdg::Node * alloca_c; + rvsdg::Node * alloca_d; }; /** \brief StoreTest2 class @@ -121,13 +121,13 @@ class StoreTest2 final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * size; + rvsdg::Node * size; - jlm::rvsdg::node * alloca_a; - jlm::rvsdg::node * alloca_b; - jlm::rvsdg::node * alloca_x; - jlm::rvsdg::node * alloca_y; - jlm::rvsdg::node * alloca_p; + rvsdg::Node * alloca_a; + rvsdg::Node * alloca_b; + rvsdg::Node * alloca_x; + rvsdg::Node * alloca_y; + rvsdg::Node * alloca_p; }; /** \brief LoadTest1 class @@ -155,8 +155,8 @@ class LoadTest1 final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * load_p; - jlm::rvsdg::node * load_x; + rvsdg::Node * load_p; + rvsdg::Node * load_x; }; /** \brief LoadTest2 class @@ -189,16 +189,16 @@ class LoadTest2 final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * size; + rvsdg::Node * size; - jlm::rvsdg::node * alloca_a; - jlm::rvsdg::node * alloca_b; - jlm::rvsdg::node * alloca_x; - jlm::rvsdg::node * alloca_y; - jlm::rvsdg::node * alloca_p; + rvsdg::Node * alloca_a; + rvsdg::Node * alloca_b; + rvsdg::Node * alloca_x; + rvsdg::Node * alloca_y; + rvsdg::Node * alloca_p; - jlm::rvsdg::node * load_x; - jlm::rvsdg::node * load_a; + rvsdg::Node * load_x; + rvsdg::Node * load_a; }; /** \brief LoadFromUndefTest class @@ -229,7 +229,7 @@ class LoadFromUndefTest final : public RvsdgTest return *Lambda_; } - [[nodiscard]] const jlm::rvsdg::node * + [[nodiscard]] const rvsdg::Node * UndefValueNode() const noexcept { return UndefValueNode_; @@ -237,7 +237,7 @@ class LoadFromUndefTest final : public RvsdgTest private: jlm::llvm::lambda::node * Lambda_; - jlm::rvsdg::node * UndefValueNode_; + rvsdg::Node * UndefValueNode_; }; /** \brief GetElementPtrTest class @@ -268,8 +268,8 @@ class GetElementPtrTest final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * getElementPtrX; - jlm::rvsdg::node * getElementPtrY; + rvsdg::Node * getElementPtrX; + rvsdg::Node * getElementPtrY; }; /** \brief BitCastTest class @@ -292,7 +292,7 @@ class BitCastTest final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * bitCast; + rvsdg::Node * bitCast; }; /** \brief Bits2PtrTest class @@ -336,7 +336,7 @@ class Bits2PtrTest final : public RvsdgTest return *CallNode_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetBitsToPtrNode() const noexcept { return *BitsToPtrNode_; @@ -349,7 +349,7 @@ class Bits2PtrTest final : public RvsdgTest jlm::llvm::lambda::node * LambdaBits2Ptr_; jlm::llvm::lambda::node * LambdaTest_; - jlm::rvsdg::node * BitsToPtrNode_; + rvsdg::Node * BitsToPtrNode_; jlm::llvm::CallNode * CallNode_; }; @@ -374,7 +374,7 @@ class ConstantPointerNullTest final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; - jlm::rvsdg::node * constantPointerNullNode; + rvsdg::Node * constantPointerNullNode; }; /** \brief CallTest1 class @@ -428,9 +428,9 @@ class CallTest1 final : public RvsdgTest jlm::llvm::lambda::node * lambda_g; jlm::llvm::lambda::node * lambda_h; - jlm::rvsdg::node * alloca_x; - jlm::rvsdg::node * alloca_y; - jlm::rvsdg::node * alloca_z; + rvsdg::Node * alloca_x; + rvsdg::Node * alloca_y; + rvsdg::Node * alloca_z; private: jlm::llvm::CallNode * CallF_; @@ -499,8 +499,8 @@ class CallTest2 final : public RvsdgTest jlm::llvm::lambda::node * lambda_destroy; jlm::llvm::lambda::node * lambda_test; - jlm::rvsdg::node * malloc; - jlm::rvsdg::node * free; + rvsdg::Node * malloc; + rvsdg::Node * free; private: std::unique_ptr @@ -1038,31 +1038,31 @@ class GammaTest2 final : public RvsdgTest return *CallFromH_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaXFromG() const noexcept { return *AllocaXFromG_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaYFromG() const noexcept { return *AllocaYFromG_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaXFromH() const noexcept { return *AllocaXFromH_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaYFromH() const noexcept { return *AllocaYFromH_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaZ() const noexcept { return *AllocaZ_; @@ -1081,11 +1081,11 @@ class GammaTest2 final : public RvsdgTest llvm::CallNode * CallFromG_; llvm::CallNode * CallFromH_; - rvsdg::node * AllocaXFromG_; - rvsdg::node * AllocaYFromG_; - rvsdg::node * AllocaXFromH_; - rvsdg::node * AllocaYFromH_; - rvsdg::node * AllocaZ_; + rvsdg::Node * AllocaXFromG_; + rvsdg::Node * AllocaYFromG_; + rvsdg::Node * AllocaXFromH_; + rvsdg::Node * AllocaYFromH_; + rvsdg::Node * AllocaZ_; }; /** \brief ThetaTest class @@ -1114,7 +1114,7 @@ class ThetaTest final : public RvsdgTest public: jlm::llvm::lambda::node * lambda; jlm::rvsdg::ThetaNode * theta; - jlm::rvsdg::node * gep; + rvsdg::Node * gep; }; /** \brief DeltaTest1 class @@ -1155,7 +1155,7 @@ class DeltaTest1 final : public RvsdgTest jlm::llvm::delta::node * delta_f; - jlm::rvsdg::node * constantFive; + rvsdg::Node * constantFive; private: std::unique_ptr @@ -1390,7 +1390,7 @@ class PhiTest1 final : public RvsdgTest jlm::llvm::phi::node * phi; - jlm::rvsdg::node * alloca; + rvsdg::Node * alloca; private: std::unique_ptr @@ -1767,8 +1767,8 @@ class EscapedMemoryTest2 final : public RvsdgTest jlm::llvm::CallNode * ExternalFunction1Call; jlm::llvm::CallNode * ExternalFunction2Call; - jlm::rvsdg::node * ReturnAddressMalloc; - jlm::rvsdg::node * CallExternalFunction1Malloc; + rvsdg::Node * ReturnAddressMalloc; + rvsdg::Node * CallExternalFunction1Malloc; jlm::rvsdg::RegionArgument * ExternalFunction1Import; jlm::rvsdg::RegionArgument * ExternalFunction2Import; @@ -1871,7 +1871,7 @@ class MemcpyTest final : public RvsdgTest return *CallF_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & Memcpy() const noexcept { return *Memcpy_; @@ -1889,7 +1889,7 @@ class MemcpyTest final : public RvsdgTest jlm::llvm::CallNode * CallF_; - jlm::rvsdg::node * Memcpy_; + rvsdg::Node * Memcpy_; }; /** @@ -1946,7 +1946,7 @@ class MemcpyTest2 final : public RvsdgTest return *CallG_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & Memcpy() const noexcept { JLM_ASSERT(Memcpy_ != nullptr); @@ -1962,7 +1962,7 @@ class MemcpyTest2 final : public RvsdgTest jlm::llvm::CallNode * CallG_ = {}; - jlm::rvsdg::node * Memcpy_ = {}; + rvsdg::Node * Memcpy_ = {}; }; /** @@ -1994,14 +1994,14 @@ class MemcpyTest3 final : public RvsdgTest return *Lambda_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & Alloca() const noexcept { JLM_ASSERT(Alloca_ != nullptr); return *Alloca_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & Memcpy() const noexcept { JLM_ASSERT(Memcpy_ != nullptr); @@ -2014,9 +2014,9 @@ class MemcpyTest3 final : public RvsdgTest jlm::llvm::lambda::node * Lambda_ = {}; - jlm::rvsdg::node * Alloca_ = {}; + rvsdg::Node * Alloca_ = {}; - jlm::rvsdg::node * Memcpy_ = {}; + rvsdg::Node * Memcpy_ = {}; }; /** \brief LinkedListTest class @@ -2043,7 +2043,7 @@ class MemcpyTest3 final : public RvsdgTest class LinkedListTest final : public RvsdgTest { public: - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetAlloca() const noexcept { return *Alloca_; @@ -2069,7 +2069,7 @@ class LinkedListTest final : public RvsdgTest jlm::llvm::lambda::node * LambdaNext_; - jlm::rvsdg::node * Alloca_; + rvsdg::Node * Alloca_; }; /** \brief RVSDG module with one of each memory node type. @@ -2129,7 +2129,7 @@ class AllMemoryNodesTest final : public RvsdgTest return *Lambda_->output(); } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetAllocaNode() const noexcept { JLM_ASSERT(Alloca_); @@ -2143,7 +2143,7 @@ class AllMemoryNodesTest final : public RvsdgTest return *Alloca_->output(0); } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetMallocNode() const noexcept { JLM_ASSERT(Malloc_); @@ -2167,9 +2167,9 @@ class AllMemoryNodesTest final : public RvsdgTest jlm::llvm::lambda::node * Lambda_ = {}; - jlm::rvsdg::node * Alloca_ = {}; + rvsdg::Node * Alloca_ = {}; - jlm::rvsdg::node * Malloc_ = {}; + rvsdg::Node * Malloc_ = {}; }; /** \brief RVSDG module with an arbitrary amount of alloca nodes. @@ -2201,7 +2201,7 @@ class NAllocaNodesTest final : public RvsdgTest return NumAllocaNodes_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetAllocaNode(size_t index) const noexcept { JLM_ASSERT(index < AllocaNodes_.size()); @@ -2228,7 +2228,7 @@ class NAllocaNodesTest final : public RvsdgTest size_t NumAllocaNodes_; - std::vector AllocaNodes_ = {}; + std::vector AllocaNodes_ = {}; jlm::llvm::lambda::node * Function_; }; @@ -2286,7 +2286,7 @@ class EscapingLocalFunctionTest final : public RvsdgTest return *LocalFuncParam_; } - [[nodiscard]] const jlm::rvsdg::node & + [[nodiscard]] const rvsdg::Node & GetLocalFunctionParamAllocaNode() const noexcept { JLM_ASSERT(LocalFuncParamAllocaNode_); @@ -2308,7 +2308,7 @@ class EscapingLocalFunctionTest final : public RvsdgTest jlm::llvm::lambda::node * LocalFunc_ = {}; jlm::rvsdg::output * LocalFuncParam_ = {}; jlm::rvsdg::output * LocalFuncRegister_ = {}; - jlm::rvsdg::node * LocalFuncParamAllocaNode_ = {}; + rvsdg::Node * LocalFuncParamAllocaNode_ = {}; jlm::llvm::lambda::node * ExportedFunc_ = {}; }; @@ -2452,7 +2452,7 @@ class VariadicFunctionTest1 final : public RvsdgTest return *CallH_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaNode() const noexcept { JLM_ASSERT(AllocaNode_ != nullptr); @@ -2470,7 +2470,7 @@ class VariadicFunctionTest1 final : public RvsdgTest llvm::CallNode * CallH_ = {}; - rvsdg::node * AllocaNode_ = {}; + rvsdg::Node * AllocaNode_ = {}; }; /** @@ -2520,7 +2520,7 @@ class VariadicFunctionTest2 final : public RvsdgTest return *LambdaG_; } - [[nodiscard]] rvsdg::node & + [[nodiscard]] rvsdg::Node & GetAllocaNode() const noexcept { JLM_ASSERT(AllocaNode_ != nullptr); @@ -2534,7 +2534,7 @@ class VariadicFunctionTest2 final : public RvsdgTest jlm::llvm::lambda::node * LambdaFst_ = {}; jlm::llvm::lambda::node * LambdaG_ = {}; - rvsdg::node * AllocaNode_ = {}; + rvsdg::Node * AllocaNode_ = {}; }; } diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 85c2162f1..41a48dd6b 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -67,7 +67,7 @@ ValidateTest(std::function validateEncoding) template static bool -is(const jlm::rvsdg::node & node, size_t numInputs, size_t numOutputs) +is(const jlm::rvsdg::Node & node, size_t numInputs, size_t numOutputs) { return jlm::rvsdg::is(&node) && node.ninputs() == numInputs && node.noutputs() == numOutputs; } @@ -1051,8 +1051,8 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithThree().input(3)->origin()); assert(is(*callEntryMerge, 6, 1)); - const jlm::rvsdg::node * storeNode = nullptr; - const jlm::rvsdg::node * lambdaEntrySplit = nullptr; + const jlm::rvsdg::Node * storeNode = nullptr; + const jlm::rvsdg::Node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); @@ -1088,8 +1088,8 @@ ValidateIndirectCallTest2SteensgaardRegionAware(const jlm::tests::IndirectCallTe auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithFour().input(3)->origin()); assert(is(*callEntryMerge, 6, 1)); - const jlm::rvsdg::node * storeNode = nullptr; - const jlm::rvsdg::node * lambdaEntrySplit = nullptr; + const jlm::rvsdg::Node * storeNode = nullptr; + const jlm::rvsdg::Node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); @@ -1209,8 +1209,8 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithThree().input(3)->origin()); assert(is(*callEntryMerge, 13, 1)); - const jlm::rvsdg::node * storeNode = nullptr; - const jlm::rvsdg::node * lambdaEntrySplit = nullptr; + const jlm::rvsdg::Node * storeNode = nullptr; + const jlm::rvsdg::Node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); @@ -1246,9 +1246,9 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa auto callEntryMerge = jlm::rvsdg::output::GetNode(*test.GetCallIWithFour().input(3)->origin()); assert(is(*callEntryMerge, 13, 1)); - jlm::rvsdg::node * undefNode = nullptr; - const jlm::rvsdg::node * storeNode = nullptr; - const jlm::rvsdg::node * lambdaEntrySplit = nullptr; + jlm::rvsdg::Node * undefNode = nullptr; + const jlm::rvsdg::Node * storeNode = nullptr; + const jlm::rvsdg::Node * lambdaEntrySplit = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); @@ -1294,7 +1294,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa auto callXExitSplit = jlm::rvsdg::input::GetNode(**test.GetTestCallX().output(2)->begin()); assert(is(*callXExitSplit, 1, 13)); - jlm::rvsdg::node * undefNode = nullptr; + jlm::rvsdg::Node * undefNode = nullptr; for (auto & node : test.GetLambdaTest().subregion()->Nodes()) { if (is(&node)) @@ -1330,7 +1330,7 @@ ValidateIndirectCallTest2SteensgaardAgnosticTopDown(const jlm::tests::IndirectCa auto callXExitSplit = jlm::rvsdg::input::GetNode(**test.GetTest2CallX().output(2)->begin()); assert(is(*callXExitSplit, 1, 13)); - jlm::rvsdg::node * undefNode = nullptr; + jlm::rvsdg::Node * undefNode = nullptr; for (auto & node : test.GetLambdaTest2().subregion()->Nodes()) { if (is(&node)) @@ -1679,7 +1679,7 @@ ValidateDeltaTest3SteensgaardAgnostic(const jlm::tests::DeltaTest3 & test) auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadG1Node->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); - jlm::rvsdg::node * storeG2Node = nullptr; + jlm::rvsdg::Node * storeG2Node = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto input = lambdaExitMerge->input(n); @@ -1722,7 +1722,7 @@ ValidateDeltaTest3SteensgaardRegionAware(const jlm::tests::DeltaTest3 & test) auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadG1Node->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 2)); - jlm::rvsdg::node * storeG2Node = nullptr; + jlm::rvsdg::Node * storeG2Node = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto input = lambdaExitMerge->input(n); @@ -1765,7 +1765,7 @@ ValidateDeltaTest3SteensgaardAgnosticTopDown(const jlm::tests::DeltaTest3 & test auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loadG1Node->input(1)->origin()); assert(is(*lambdaEntrySplit, 1, 5)); - jlm::rvsdg::node * storeG2Node = nullptr; + jlm::rvsdg::Node * storeG2Node = nullptr; for (size_t n = 0; n < lambdaExitMerge->ninputs(); n++) { auto input = lambdaExitMerge->input(n); @@ -2039,7 +2039,7 @@ ValidateMemcpySteensgaardAgnostic(const jlm::tests::MemcpyTest & test) auto callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*callEntryMerge, 5, 1)); - jlm::rvsdg::node * memcpy = nullptr; + jlm::rvsdg::Node * memcpy = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); @@ -2138,7 +2138,7 @@ ValidateMemcpyTestSteensgaardAgnosticTopDown(const jlm::tests::MemcpyTest & test auto callEntryMerge = jlm::rvsdg::output::GetNode(*call->input(2)->origin()); assert(is(*callEntryMerge, 5, 1)); - jlm::rvsdg::node * memcpy = nullptr; + jlm::rvsdg::Node * memcpy = nullptr; for (size_t n = 0; n < callEntryMerge->ninputs(); n++) { auto node = jlm::rvsdg::output::GetNode(*callEntryMerge->input(n)->origin()); diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 5f455411c..bf0ab491e 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -279,7 +279,7 @@ TestDivOperation() assert( lambdaResultOriginNodeOuput = dynamic_cast( convertedLambda->subregion()->result(0)->origin())); - jlm::rvsdg::node * lambdaResultOriginNode = lambdaResultOriginNodeOuput->node(); + Node * lambdaResultOriginNode = lambdaResultOriginNodeOuput->node(); assert(is(lambdaResultOriginNode->GetOperation())); assert(lambdaResultOriginNode->ninputs() == 2); @@ -296,7 +296,7 @@ TestDivOperation() assert( DivInput1NodeOuput = dynamic_cast(lambdaResultOriginNode->input(1)->origin())); - jlm::rvsdg::node * DivInput1Node = DivInput1NodeOuput->node(); + Node * DivInput1Node = DivInput1NodeOuput->node(); assert(is(DivInput1Node->GetOperation())); const jlm::rvsdg::bitconstant_op * DivInput1Constant = dynamic_cast(&DivInput1Node->GetOperation()); @@ -458,7 +458,7 @@ TestCompZeroExt() assert( lambdaResultOriginNodeOuput = dynamic_cast( convertedLambda->subregion()->result(0)->origin())); - jlm::rvsdg::node * ZExtNode = lambdaResultOriginNodeOuput->node(); + Node * ZExtNode = lambdaResultOriginNodeOuput->node(); assert(is(ZExtNode->GetOperation())); assert(ZExtNode->ninputs() == 1); @@ -472,7 +472,7 @@ TestCompZeroExt() std::cout << "Testing input 0\n"; jlm::rvsdg::node_output * ZExtInput0; assert(ZExtInput0 = dynamic_cast(ZExtNode->input(0)->origin())); - jlm::rvsdg::node * BitEqNode = ZExtInput0->node(); + Node * BitEqNode = ZExtInput0->node(); assert(is(BitEqNode->GetOperation())); // Check BitEq @@ -484,14 +484,14 @@ TestCompZeroExt() // Check BitEq input 0 jlm::rvsdg::node_output * AddOuput; assert(AddOuput = dynamic_cast(BitEqNode->input(0)->origin())); - jlm::rvsdg::node * AddNode = AddOuput->node(); + Node * AddNode = AddOuput->node(); assert(is(AddNode->GetOperation())); assert(AddNode->ninputs() == 2); // Check BitEq input 1 jlm::rvsdg::node_output * Const2Ouput; assert(Const2Ouput = dynamic_cast(BitEqNode->input(1)->origin())); - jlm::rvsdg::node * Const2Node = Const2Ouput->node(); + Node * Const2Node = Const2Ouput->node(); assert(is(Const2Node->GetOperation())); // Check Const2 @@ -515,7 +515,7 @@ TestCompZeroExt() // Check add input1 jlm::rvsdg::node_output * Const1Output; assert(Const1Output = dynamic_cast(AddNode->input(1)->origin())); - jlm::rvsdg::node * Const1Node = Const1Output->node(); + Node * Const1Node = Const1Output->node(); assert(is(Const1Node->GetOperation())); // Check Const1 @@ -667,7 +667,7 @@ TestMatchOp() jlm::rvsdg::node_output * matchOutput; assert( matchOutput = dynamic_cast(lambdaRegion->result(0)->origin())); - jlm::rvsdg::node * matchNode = matchOutput->node(); + Node * matchNode = matchOutput->node(); assert(is(matchNode->GetOperation())); auto matchOp = dynamic_cast(&matchNode->GetOperation()); @@ -843,7 +843,7 @@ TestGammaOp() jlm::rvsdg::node_output * gammaOutput; assert( gammaOutput = dynamic_cast(lambdaRegion->result(0)->origin())); - jlm::rvsdg::node * gammaNode = gammaOutput->node(); + Node * gammaNode = gammaOutput->node(); assert(is(gammaNode->GetOperation())); std::cout << "Checking gamma operation" << std::endl; @@ -992,7 +992,7 @@ TestThetaOp() jlm::rvsdg::node_output * thetaOutput; assert( thetaOutput = dynamic_cast(lambdaRegion->result(0)->origin())); - jlm::rvsdg::node * node = thetaOutput->node(); + Node * node = thetaOutput->node(); assert(is(node->GetOperation())); auto thetaNode = dynamic_cast(node); diff --git a/tests/jlm/rvsdg/test-bottomup.cpp b/tests/jlm/rvsdg/test-bottomup.cpp index ef437d6da..3f86e8dac 100644 --- a/tests/jlm/rvsdg/test-bottomup.cpp +++ b/tests/jlm/rvsdg/test-bottomup.cpp @@ -44,7 +44,7 @@ test_basic_traversal() jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { - jlm::rvsdg::node * tmp; + jlm::rvsdg::Node * tmp; jlm::rvsdg::bottomup_traverser trav(graph.root()); tmp = trav.next(); assert(tmp == n2); @@ -66,7 +66,7 @@ test_order_enforcement_traversal() auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0), n1->output(1) }, { type }); - jlm::rvsdg::node * tmp; + jlm::rvsdg::Node * tmp; { jlm::rvsdg::bottomup_traverser trav(graph.root()); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 0cc0ff9ae..f34cc9e08 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -14,7 +14,7 @@ #include static bool -region_contains_node(const jlm::rvsdg::Region * region, const jlm::rvsdg::node * n) +region_contains_node(const jlm::rvsdg::Region * region, const jlm::rvsdg::Node * n) { for (const auto & node : region->Nodes()) { diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index be5d58454..2da66e3da 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -117,7 +117,7 @@ test_node_depth() } /** - * Test node::RemoveOutputsWhere() + * Test Node::RemoveOutputsWhere() */ static void TestRemoveOutputsWhere() @@ -180,7 +180,7 @@ TestRemoveOutputsWhere() } /** - * Test node::RemoveInputsWhere() + * Test Node::RemoveInputsWhere() */ static void TestRemoveInputsWhere() diff --git a/tests/jlm/rvsdg/test-topdown.cpp b/tests/jlm/rvsdg/test-topdown.cpp index 45dfa7bdf..d69c2ba22 100644 --- a/tests/jlm/rvsdg/test-topdown.cpp +++ b/tests/jlm/rvsdg/test-topdown.cpp @@ -55,7 +55,7 @@ test_basic_traversal() jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { - jlm::rvsdg::node * tmp; + jlm::rvsdg::Node * tmp; jlm::rvsdg::topdown_traverser trav(graph.root()); tmp = trav.next(); @@ -80,7 +80,7 @@ test_order_enforcement_traversal() auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0), n1->output(1) }, { type }); { - jlm::rvsdg::node * tmp; + jlm::rvsdg::Node * tmp; jlm::rvsdg::topdown_traverser trav(graph.root()); tmp = trav.next(); @@ -108,7 +108,7 @@ test_traversal_insertion() jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { - jlm::rvsdg::node * node; + jlm::rvsdg::Node * node; jlm::rvsdg::topdown_traverser trav(graph.root()); node = trav.next(); @@ -155,9 +155,9 @@ static void test_mutable_traverse() { auto test = [](jlm::rvsdg::Graph * graph, - jlm::rvsdg::node * n1, - jlm::rvsdg::node * n2, - jlm::rvsdg::node * n3) + jlm::rvsdg::Node * n1, + jlm::rvsdg::Node * n2, + jlm::rvsdg::Node * n3) { bool seen_n1 = false; bool seen_n2 = false; diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index d7bb5d801..2849fd03b 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -94,7 +94,7 @@ class unary_op final : public rvsdg::unary_op virtual std::unique_ptr copy() const override; - static inline rvsdg::node * + static rvsdg::Node * create( rvsdg::Region * region, std::shared_ptr srctype, @@ -125,7 +125,7 @@ is_unary_op(const rvsdg::operation & op) noexcept } static inline bool -is_unary_node(const rvsdg::node * node) noexcept +is_unary_node(const rvsdg::Node * node) noexcept { return jlm::rvsdg::is(node); } @@ -165,7 +165,7 @@ class binary_op final : public rvsdg::binary_op virtual std::unique_ptr copy() const override; - static inline rvsdg::node * + static rvsdg::Node * create( const std::shared_ptr & srctype, std::shared_ptr dsttype, @@ -419,9 +419,9 @@ class SimpleNode final : public rvsdg::simple_node {} public: - using rvsdg::node::RemoveInputsWhere; + using Node::RemoveInputsWhere; - using rvsdg::node::RemoveOutputsWhere; + using Node::RemoveOutputsWhere; static SimpleNode & Create( From e3988cf8ffd7531e058bdf0972908c5f948ccf59 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 4 Dec 2024 07:10:26 +0100 Subject: [PATCH 128/170] Rename operation class to Operation (#676) --- jlm/hls/ir/hls.cpp | 2 +- jlm/hls/ir/hls.hpp | 136 +++++----- .../InterProceduralGraphConversion.cpp | 2 +- .../frontend/LlvmInstructionConversion.cpp | 14 +- jlm/llvm/ir/operators/GetElementPtr.cpp | 6 +- jlm/llvm/ir/operators/GetElementPtr.hpp | 4 +- jlm/llvm/ir/operators/Load.cpp | 12 +- jlm/llvm/ir/operators/Load.hpp | 8 +- jlm/llvm/ir/operators/MemCpy.cpp | 12 +- jlm/llvm/ir/operators/MemCpy.hpp | 8 +- .../ir/operators/MemoryStateOperations.cpp | 36 +-- .../ir/operators/MemoryStateOperations.hpp | 24 +- jlm/llvm/ir/operators/Phi.cpp | 6 +- jlm/llvm/ir/operators/Phi.hpp | 2 +- jlm/llvm/ir/operators/Store.cpp | 12 +- jlm/llvm/ir/operators/Store.hpp | 8 +- jlm/llvm/ir/operators/alloca.cpp | 6 +- jlm/llvm/ir/operators/alloca.hpp | 4 +- jlm/llvm/ir/operators/call.cpp | 6 +- jlm/llvm/ir/operators/call.hpp | 4 +- jlm/llvm/ir/operators/delta.cpp | 6 +- jlm/llvm/ir/operators/delta.hpp | 4 +- jlm/llvm/ir/operators/lambda.cpp | 8 +- jlm/llvm/ir/operators/lambda.hpp | 4 +- jlm/llvm/ir/operators/operators.cpp | 236 +++++++++--------- jlm/llvm/ir/operators/operators.hpp | 170 ++++++------- jlm/llvm/ir/operators/sext.cpp | 8 +- jlm/llvm/ir/operators/sext.hpp | 4 +- jlm/llvm/ir/tac.hpp | 2 +- jlm/llvm/opt/unroll.cpp | 4 +- jlm/rvsdg/binary.cpp | 9 +- jlm/rvsdg/binary.hpp | 4 +- jlm/rvsdg/bitstring/arithmetic-impl.hpp | 8 +- jlm/rvsdg/bitstring/arithmetic.hpp | 8 +- jlm/rvsdg/bitstring/bitoperation-classes.cpp | 6 - jlm/rvsdg/bitstring/comparison-impl.hpp | 4 +- jlm/rvsdg/bitstring/comparison.hpp | 4 +- jlm/rvsdg/bitstring/concat.cpp | 6 +- jlm/rvsdg/bitstring/concat.hpp | 4 +- jlm/rvsdg/bitstring/slice.cpp | 6 +- jlm/rvsdg/bitstring/slice.hpp | 4 +- jlm/rvsdg/control.cpp | 6 +- jlm/rvsdg/control.hpp | 10 +- jlm/rvsdg/gamma.cpp | 8 +- jlm/rvsdg/gamma.hpp | 4 +- jlm/rvsdg/node-normal-form.hpp | 2 +- jlm/rvsdg/node.cpp | 4 +- jlm/rvsdg/node.hpp | 6 +- jlm/rvsdg/nullary.hpp | 6 +- jlm/rvsdg/operation.cpp | 13 +- jlm/rvsdg/operation.hpp | 24 +- jlm/rvsdg/simple-normal-form.cpp | 2 +- jlm/rvsdg/statemux.cpp | 6 +- jlm/rvsdg/statemux.hpp | 6 +- jlm/rvsdg/theta.cpp | 8 +- jlm/rvsdg/theta.hpp | 2 +- tests/TestRvsdgs.cpp | 80 +++--- .../rvsdg2rhls/MemoryConverterTests.cpp | 8 +- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 4 +- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 4 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 2 +- .../rvsdg2rhls/test-loop-passthrough.cpp | 2 +- .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 6 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 8 +- tests/jlm/llvm/ir/operators/TestLambda.cpp | 4 +- tests/jlm/llvm/opt/test-cne.cpp | 18 +- tests/jlm/llvm/opt/test-unroll.cpp | 12 +- tests/jlm/mlir/TestJlmToMlirToJlm.cpp | 2 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 12 +- tests/test-operation.cpp | 28 +-- tests/test-operation.hpp | 16 +- 71 files changed, 551 insertions(+), 583 deletions(-) diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index 1edfe456e..bafa688cb 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -101,7 +101,7 @@ loop_node::add_loopconst(jlm::rvsdg::output * origin) loop_node * loop_node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { - auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph()->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto loop = create(region, false); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index b348df8a3..8306c70f5 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -35,7 +35,7 @@ class branch_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); // check predicate and value @@ -49,10 +49,10 @@ class branch_op final : public rvsdg::SimpleOperation return "HLS_BRANCH"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new branch_op(*this)); + return std::make_unique(*this); } static std::vector @@ -114,7 +114,7 @@ class fork_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto forkOp = dynamic_cast(&other); // check predicate and value @@ -132,10 +132,10 @@ class fork_op final : public rvsdg::SimpleOperation return IsConstant() ? "HLS_CFORK" : "HLS_FORK"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new fork_op(*this)); + return std::make_unique(*this); } /** @@ -183,7 +183,7 @@ class merge_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); return ot && ot->narguments() == narguments() && *ot->argument(0) == *argument(0); @@ -195,10 +195,10 @@ class merge_op final : public rvsdg::SimpleOperation return "HLS_MERGE"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new merge_op(*this)); + return std::make_unique(*this); } static std::vector @@ -230,7 +230,7 @@ class mux_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); // check predicate and value @@ -244,10 +244,10 @@ class mux_op final : public rvsdg::SimpleOperation return discarding ? "HLS_DMUX" : "HLS_NDMUX"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new mux_op(*this)); + return std::make_unique(*this); } static std::vector @@ -297,7 +297,7 @@ class sink_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); return ot && *ot->argument(0) == *argument(0); @@ -309,10 +309,10 @@ class sink_op final : public rvsdg::SimpleOperation return "HLS_SINK"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new sink_op(*this)); + return std::make_unique(*this); } static std::vector @@ -335,7 +335,7 @@ class predicate_buffer_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); return ot && *ot->result(0) == *result(0); @@ -347,10 +347,10 @@ class predicate_buffer_op final : public rvsdg::SimpleOperation return "HLS_PRED_BUF"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new predicate_buffer_op(*this)); + return std::make_unique(*this); } static std::vector @@ -378,7 +378,7 @@ class loop_constant_buffer_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); return ot && *ot->result(0) == *result(0) && *ot->argument(0) == *argument(0); @@ -390,10 +390,10 @@ class loop_constant_buffer_op final : public rvsdg::SimpleOperation return "HLS_LOOP_CONST_BUF"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new loop_constant_buffer_op(*this)); + return std::make_unique(*this); } static std::vector @@ -424,7 +424,7 @@ class buffer_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); return ot && ot->capacity == capacity && ot->pass_through == pass_through @@ -437,10 +437,10 @@ class buffer_op final : public rvsdg::SimpleOperation return util::strfmt("HLS_BUF_", (pass_through ? "P_" : ""), capacity); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new buffer_op(*this)); + return std::make_unique(*this); } static std::vector @@ -498,7 +498,7 @@ class trigger_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); // check predicate and value @@ -511,10 +511,10 @@ class trigger_op final : public rvsdg::SimpleOperation return "HLS_TRIGGER"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new trigger_op(*this)); + return std::make_unique(*this); } static std::vector @@ -546,7 +546,7 @@ class print_op final : public rvsdg::SimpleOperation } bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // auto ot = dynamic_cast(&other); // check predicate and value @@ -568,10 +568,10 @@ class print_op final : public rvsdg::SimpleOperation return _id; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new print_op(*this)); + return std::make_unique(*this); } static std::vector @@ -596,10 +596,10 @@ class loop_op final : public rvsdg::StructuralOperation return "HLS_LOOP"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new loop_op(*this)); + return std::make_unique(*this); } }; @@ -885,7 +885,7 @@ class load_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -925,10 +925,10 @@ class load_op final : public rvsdg::SimpleOperation return "HLS_LOAD_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new load_op(*this)); + return std::make_unique(*this); } static std::vector @@ -977,7 +977,7 @@ class addr_queue_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1010,10 +1010,10 @@ class addr_queue_op final : public rvsdg::SimpleOperation return "HLS_ADDR_QUEUE_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new addr_queue_op(*this)); + return std::make_unique(*this); } static jlm::rvsdg::output * @@ -1045,7 +1045,7 @@ class state_gate_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); // check predicate and value @@ -1069,10 +1069,10 @@ class state_gate_op final : public rvsdg::SimpleOperation return "HLS_STATE_GATE_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new state_gate_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1098,7 +1098,7 @@ class decoupled_load_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto ot = dynamic_cast(&other); // check predicate and value @@ -1127,10 +1127,10 @@ class decoupled_load_op final : public rvsdg::SimpleOperation return "HLS_DEC_LOAD_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new decoupled_load_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1167,7 +1167,7 @@ class mem_resp_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1208,10 +1208,10 @@ class mem_resp_op final : public rvsdg::SimpleOperation return "HLS_MEM_RESP"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new mem_resp_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1254,7 +1254,7 @@ class mem_req_op final : public rvsdg::SimpleOperation mem_req_op(const mem_req_op & other) = default; bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1310,10 +1310,10 @@ class mem_req_op final : public rvsdg::SimpleOperation return "HLS_MEM_REQ"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new mem_req_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1374,7 +1374,7 @@ class store_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1411,10 +1411,10 @@ class store_op final : public rvsdg::SimpleOperation return "HLS_STORE_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new store_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1455,7 +1455,7 @@ class local_mem_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: // auto ot = dynamic_cast(&other); @@ -1476,10 +1476,10 @@ class local_mem_op final : public rvsdg::SimpleOperation return "HLS_LOCAL_MEM_" + result(0)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new local_mem_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1501,7 +1501,7 @@ class local_mem_resp_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1522,10 +1522,10 @@ class local_mem_resp_op final : public rvsdg::SimpleOperation return "HLS_LOCAL_MEM_RESP"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new local_mem_resp_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1549,7 +1549,7 @@ class local_load_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1587,10 +1587,10 @@ class local_load_op final : public rvsdg::SimpleOperation return "HLS_LOCAL_LOAD_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new local_load_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1627,7 +1627,7 @@ class local_store_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1664,10 +1664,10 @@ class local_store_op final : public rvsdg::SimpleOperation return "HLS_LOCAL_STORE_" + argument(narguments() - 1)->debug_string(); } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new local_store_op(*this)); + return std::make_unique(*this); } static std::vector @@ -1707,7 +1707,7 @@ class local_mem_req_op final : public rvsdg::SimpleOperation {} bool - operator==(const jlm::rvsdg::operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { // TODO: auto ot = dynamic_cast(&other); @@ -1742,10 +1742,10 @@ class local_mem_req_op final : public rvsdg::SimpleOperation return "HLS_LOCAL_MEM_REQ"; } - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new local_mem_req_op(*this)); + return std::make_unique(*this); } static std::vector diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 687b78e18..752d2a41e 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -1200,7 +1200,7 @@ ConvertInterProceduralGraphModule( std::move(interProceduralGraphModule.ReleaseStructTypeDeclarations())); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); /* FIXME: we currently cannot handle flattened_binary_op in jlm2llvm pass */ diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index 3d9461a09..f3ec20ccb 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -444,7 +444,7 @@ convert_icmp_instruction(::llvm::Instruction * instruction, tacsvector_t & tacs, auto t = i->getOperand(0)->getType(); static std:: - unordered_map (*)(size_t)> + unordered_map (*)(size_t)> map({ { ::llvm::CmpInst::ICMP_SLT, [](size_t nbits) { @@ -518,7 +518,7 @@ convert_icmp_instruction(::llvm::Instruction * instruction, tacsvector_t & tacs, auto op1 = ConvertValue(i->getOperand(0), tacs, ctx); auto op2 = ConvertValue(i->getOperand(1), tacs, ctx); - std::unique_ptr binop; + std::unique_ptr binop; if (t->isIntegerTy() || (t->isVectorTy() && t->getScalarType()->isIntegerTy())) { @@ -930,7 +930,7 @@ convert_binary_operator(::llvm::Instruction * instruction, tacsvector_t & tacs, static std::unordered_map< const ::llvm::Instruction::BinaryOps, - std::unique_ptr (*)(size_t)> + std::unique_ptr (*)(size_t)> bitmap({ { ::llvm::Instruction::Add, [](size_t nbits) { @@ -1024,7 +1024,7 @@ convert_binary_operator(::llvm::Instruction * instruction, tacsvector_t & tacs, { ::llvm::Type::X86_FP80TyID, fpsize::x86fp80 }, { ::llvm::Type::FP128TyID, fpsize::fp128 } }); - std::unique_ptr operation; + std::unique_ptr operation; auto t = i->getType()->isVectorTy() ? i->getType()->getScalarType() : i->getType(); if (t->isIntegerTy()) { @@ -1159,10 +1159,10 @@ convert(::llvm::UnaryOperator * unaryOperator, tacsvector_t & threeAddressCodeVe } template -static std::unique_ptr +static std::unique_ptr create_unop(std::shared_ptr st, std::shared_ptr dt) { - return std::unique_ptr(new OP(std::move(st), std::move(dt))); + return std::unique_ptr(new OP(std::move(st), std::move(dt))); } static const variable * @@ -1174,7 +1174,7 @@ convert_cast_instruction(::llvm::Instruction * i, tacsvector_t & tacs, context & static std::unordered_map< unsigned, - std::unique_ptr (*)( + std::unique_ptr (*)( std::shared_ptr, std::shared_ptr)> map({ { ::llvm::Instruction::Trunc, create_unop }, diff --git a/jlm/llvm/ir/operators/GetElementPtr.cpp b/jlm/llvm/ir/operators/GetElementPtr.cpp index c1706bbb9..9d95826f2 100644 --- a/jlm/llvm/ir/operators/GetElementPtr.cpp +++ b/jlm/llvm/ir/operators/GetElementPtr.cpp @@ -11,7 +11,7 @@ namespace jlm::llvm GetElementPtrOperation::~GetElementPtrOperation() noexcept = default; bool -GetElementPtrOperation::operator==(const operation & other) const noexcept +GetElementPtrOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); @@ -38,10 +38,10 @@ GetElementPtrOperation::debug_string() const return "GetElementPtr"; } -std::unique_ptr +std::unique_ptr GetElementPtrOperation::copy() const { - return std::unique_ptr(new GetElementPtrOperation(*this)); + return std::make_unique(*this); } } diff --git a/jlm/llvm/ir/operators/GetElementPtr.hpp b/jlm/llvm/ir/operators/GetElementPtr.hpp index e04d2b4c1..bfda9dd3c 100644 --- a/jlm/llvm/ir/operators/GetElementPtr.hpp +++ b/jlm/llvm/ir/operators/GetElementPtr.hpp @@ -38,12 +38,12 @@ class GetElementPtrOperation final : public rvsdg::SimpleOperation GetElementPtrOperation(GetElementPtrOperation && other) noexcept = default; bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] const rvsdg::ValueType & diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index ee4e68af3..6f7ae6346 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -20,7 +20,7 @@ LoadNode::GetOperation() const noexcept LoadNonVolatileOperation::~LoadNonVolatileOperation() noexcept = default; bool -LoadNonVolatileOperation::operator==(const operation & other) const noexcept +LoadNonVolatileOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->narguments() == narguments() @@ -34,10 +34,10 @@ LoadNonVolatileOperation::debug_string() const return "Load"; } -std::unique_ptr +std::unique_ptr LoadNonVolatileOperation::copy() const { - return std::unique_ptr(new LoadNonVolatileOperation(*this)); + return std::make_unique(*this); } size_t @@ -96,7 +96,7 @@ LoadNonVolatileNode::copy(rvsdg::Region * region, const std::vector(&other); return operation && operation->narguments() == narguments() @@ -110,10 +110,10 @@ LoadVolatileOperation::debug_string() const return "LoadVolatile"; } -std::unique_ptr +std::unique_ptr LoadVolatileOperation::copy() const { - return std::unique_ptr(new LoadVolatileOperation(*this)); + return std::make_unique(*this); } size_t diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index a78baef00..c83600b29 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -200,12 +200,12 @@ class LoadVolatileOperation final : public LoadOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] size_t @@ -450,12 +450,12 @@ class LoadNonVolatileOperation final : public LoadOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] size_t diff --git a/jlm/llvm/ir/operators/MemCpy.cpp b/jlm/llvm/ir/operators/MemCpy.cpp index 2c109ba1c..9347fe5e3 100644 --- a/jlm/llvm/ir/operators/MemCpy.cpp +++ b/jlm/llvm/ir/operators/MemCpy.cpp @@ -11,7 +11,7 @@ namespace jlm::llvm MemCpyNonVolatileOperation::~MemCpyNonVolatileOperation() = default; bool -MemCpyNonVolatileOperation::operator==(const operation & other) const noexcept +MemCpyNonVolatileOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->LengthType() == LengthType() @@ -24,10 +24,10 @@ MemCpyNonVolatileOperation::debug_string() const return "MemCpy"; } -std::unique_ptr +std::unique_ptr MemCpyNonVolatileOperation::copy() const { - return std::unique_ptr(new MemCpyNonVolatileOperation(*this)); + return std::make_unique(*this); } size_t @@ -39,7 +39,7 @@ MemCpyNonVolatileOperation::NumMemoryStates() const noexcept MemCpyVolatileOperation::~MemCpyVolatileOperation() noexcept = default; bool -MemCpyVolatileOperation::operator==(const operation & other) const noexcept +MemCpyVolatileOperation::operator==(const Operation & other) const noexcept { // Avoid common node elimination for memcpy operator return this == &other; @@ -51,10 +51,10 @@ MemCpyVolatileOperation::debug_string() const return "MemCpyVolatile"; } -std::unique_ptr +std::unique_ptr MemCpyVolatileOperation::copy() const { - return std::unique_ptr(new MemCpyVolatileOperation(*this)); + return std::make_unique(*this); } size_t diff --git a/jlm/llvm/ir/operators/MemCpy.hpp b/jlm/llvm/ir/operators/MemCpy.hpp index 14e01ac01..d7b431193 100644 --- a/jlm/llvm/ir/operators/MemCpy.hpp +++ b/jlm/llvm/ir/operators/MemCpy.hpp @@ -79,12 +79,12 @@ class MemCpyNonVolatileOperation final : public MemCpyOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] size_t @@ -158,12 +158,12 @@ class MemCpyVolatileOperation final : public MemCpyOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] size_t diff --git a/jlm/llvm/ir/operators/MemoryStateOperations.cpp b/jlm/llvm/ir/operators/MemoryStateOperations.cpp index 3fe2f3be0..37ce22e97 100644 --- a/jlm/llvm/ir/operators/MemoryStateOperations.cpp +++ b/jlm/llvm/ir/operators/MemoryStateOperations.cpp @@ -11,7 +11,7 @@ namespace jlm::llvm MemoryStateMergeOperation::~MemoryStateMergeOperation() noexcept = default; bool -MemoryStateMergeOperation::operator==(const rvsdg::operation & other) const noexcept +MemoryStateMergeOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->narguments() == narguments(); @@ -23,16 +23,16 @@ MemoryStateMergeOperation::debug_string() const return "MemoryStateMerge"; } -std::unique_ptr +std::unique_ptr MemoryStateMergeOperation::copy() const { - return std::unique_ptr(new MemoryStateMergeOperation(*this)); + return std::make_unique(*this); } MemoryStateSplitOperation::~MemoryStateSplitOperation() noexcept = default; bool -MemoryStateSplitOperation::operator==(const rvsdg::operation & other) const noexcept +MemoryStateSplitOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->nresults() == nresults(); @@ -44,16 +44,16 @@ MemoryStateSplitOperation::debug_string() const return "MemoryStateSplit"; } -std::unique_ptr +std::unique_ptr MemoryStateSplitOperation::copy() const { - return std::unique_ptr(new MemoryStateSplitOperation(*this)); + return std::make_unique(*this); } LambdaEntryMemoryStateSplitOperation::~LambdaEntryMemoryStateSplitOperation() noexcept = default; bool -LambdaEntryMemoryStateSplitOperation::operator==(const rvsdg::operation & other) const noexcept +LambdaEntryMemoryStateSplitOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->nresults() == nresults(); @@ -65,16 +65,16 @@ LambdaEntryMemoryStateSplitOperation::debug_string() const return "LambdaEntryMemoryStateSplit"; } -std::unique_ptr +std::unique_ptr LambdaEntryMemoryStateSplitOperation::copy() const { - return std::unique_ptr(new LambdaEntryMemoryStateSplitOperation(*this)); + return std::make_unique(*this); } LambdaExitMemoryStateMergeOperation::~LambdaExitMemoryStateMergeOperation() noexcept = default; bool -LambdaExitMemoryStateMergeOperation::operator==(const rvsdg::operation & other) const noexcept +LambdaExitMemoryStateMergeOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->narguments() == narguments(); @@ -86,16 +86,16 @@ LambdaExitMemoryStateMergeOperation::debug_string() const return "LambdaExitMemoryStateMerge"; } -std::unique_ptr +std::unique_ptr LambdaExitMemoryStateMergeOperation::copy() const { - return std::unique_ptr(new LambdaExitMemoryStateMergeOperation(*this)); + return std::make_unique(*this); } CallEntryMemoryStateMergeOperation::~CallEntryMemoryStateMergeOperation() noexcept = default; bool -CallEntryMemoryStateMergeOperation::operator==(const rvsdg::operation & other) const noexcept +CallEntryMemoryStateMergeOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->narguments() == narguments(); @@ -107,16 +107,16 @@ CallEntryMemoryStateMergeOperation::debug_string() const return "CallEntryMemoryStateMerge"; } -std::unique_ptr +std::unique_ptr CallEntryMemoryStateMergeOperation::copy() const { - return std::unique_ptr(new CallEntryMemoryStateMergeOperation(*this)); + return std::make_unique(*this); } CallExitMemoryStateSplitOperation::~CallExitMemoryStateSplitOperation() noexcept = default; bool -CallExitMemoryStateSplitOperation::operator==(const rvsdg::operation & other) const noexcept +CallExitMemoryStateSplitOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->nresults() == nresults(); @@ -128,10 +128,10 @@ CallExitMemoryStateSplitOperation::debug_string() const return "CallExitMemoryStateSplit"; } -std::unique_ptr +std::unique_ptr CallExitMemoryStateSplitOperation::copy() const { - return std::unique_ptr(new CallExitMemoryStateSplitOperation(*this)); + return std::make_unique(*this); } } diff --git a/jlm/llvm/ir/operators/MemoryStateOperations.hpp b/jlm/llvm/ir/operators/MemoryStateOperations.hpp index 0f2c6e79f..597b6ed03 100644 --- a/jlm/llvm/ir/operators/MemoryStateOperations.hpp +++ b/jlm/llvm/ir/operators/MemoryStateOperations.hpp @@ -42,12 +42,12 @@ class MemoryStateMergeOperation final : public MemoryStateOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static rvsdg::output * @@ -88,12 +88,12 @@ class MemoryStateSplitOperation final : public MemoryStateOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::vector @@ -127,12 +127,12 @@ class LambdaEntryMemoryStateSplitOperation final : public MemoryStateOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::vector @@ -164,12 +164,12 @@ class LambdaExitMemoryStateMergeOperation final : public MemoryStateOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static rvsdg::output & @@ -200,12 +200,12 @@ class CallEntryMemoryStateMergeOperation final : public MemoryStateOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static rvsdg::output & @@ -236,12 +236,12 @@ class CallExitMemoryStateSplitOperation final : public MemoryStateOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::vector diff --git a/jlm/llvm/ir/operators/Phi.cpp b/jlm/llvm/ir/operators/Phi.cpp index 8c5c7e957..a58276dc8 100644 --- a/jlm/llvm/ir/operators/Phi.cpp +++ b/jlm/llvm/ir/operators/Phi.cpp @@ -13,8 +13,6 @@ namespace jlm::llvm namespace phi { -/* phi operation class */ - operation::~operation() {} @@ -24,10 +22,10 @@ operation::debug_string() const return "PHI"; } -std::unique_ptr +std::unique_ptr operation::copy() const { - return std::unique_ptr(new phi::operation(*this)); + return std::make_unique(*this); } /* phi node class */ diff --git a/jlm/llvm/ir/operators/Phi.hpp b/jlm/llvm/ir/operators/Phi.hpp index b079113c0..2fa1a4b57 100644 --- a/jlm/llvm/ir/operators/Phi.hpp +++ b/jlm/llvm/ir/operators/Phi.hpp @@ -34,7 +34,7 @@ class operation final : public rvsdg::StructuralOperation virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; }; diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 6ade5042e..781ef4257 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -19,7 +19,7 @@ StoreNode::GetOperation() const noexcept StoreNonVolatileOperation::~StoreNonVolatileOperation() noexcept = default; bool -StoreNonVolatileOperation::operator==(const operation & other) const noexcept +StoreNonVolatileOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->narguments() == narguments() @@ -33,10 +33,10 @@ StoreNonVolatileOperation::debug_string() const return "Store"; } -std::unique_ptr +std::unique_ptr StoreNonVolatileOperation::copy() const { - return std::unique_ptr(new StoreNonVolatileOperation(*this)); + return std::make_unique(*this); } [[nodiscard]] size_t @@ -94,7 +94,7 @@ StoreNonVolatileNode::copy(rvsdg::Region * region, const std::vector(&other); return operation && operation->NumMemoryStates() == NumMemoryStates() @@ -108,10 +108,10 @@ StoreVolatileOperation::debug_string() const return "StoreVolatile"; } -std::unique_ptr +std::unique_ptr StoreVolatileOperation::copy() const { - return std::unique_ptr(new StoreVolatileOperation(*this)); + return std::make_unique(*this); } [[nodiscard]] size_t diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index d310b080e..891f2a582 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -154,12 +154,12 @@ class StoreNonVolatileOperation final : public StoreOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] size_t @@ -420,12 +420,12 @@ class StoreVolatileOperation final : public StoreOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] size_t diff --git a/jlm/llvm/ir/operators/alloca.cpp b/jlm/llvm/ir/operators/alloca.cpp index 246f5fbfd..22df73a1b 100644 --- a/jlm/llvm/ir/operators/alloca.cpp +++ b/jlm/llvm/ir/operators/alloca.cpp @@ -14,7 +14,7 @@ alloca_op::~alloca_op() noexcept {} bool -alloca_op::operator==(const operation & other) const noexcept +alloca_op::operator==(const Operation & other) const noexcept { /* Avoid CNE for alloca operators */ return this == &other; @@ -26,10 +26,10 @@ alloca_op::debug_string() const return "ALLOCA[" + value_type().debug_string() + "]"; } -std::unique_ptr +std::unique_ptr alloca_op::copy() const { - return std::unique_ptr(new alloca_op(*this)); + return std::make_unique(*this); } } diff --git a/jlm/llvm/ir/operators/alloca.hpp b/jlm/llvm/ir/operators/alloca.hpp index 101c8a126..8236ddc43 100644 --- a/jlm/llvm/ir/operators/alloca.hpp +++ b/jlm/llvm/ir/operators/alloca.hpp @@ -37,12 +37,12 @@ class alloca_op final : public rvsdg::SimpleOperation alloca_op(alloca_op && other) noexcept = default; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline const rvsdg::bittype & diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index 5557292d7..cfd934bef 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -130,7 +130,7 @@ invariantInput(const rvsdg::output & output) CallOperation::~CallOperation() = default; bool -CallOperation::operator==(const operation & other) const noexcept +CallOperation::operator==(const Operation & other) const noexcept { auto callOperation = dynamic_cast(&other); return callOperation && FunctionType_ == callOperation->FunctionType_; @@ -142,10 +142,10 @@ CallOperation::debug_string() const return "CALL"; } -std::unique_ptr +std::unique_ptr CallOperation::copy() const { - return std::unique_ptr(new CallOperation(*this)); + return std::make_unique(*this); } rvsdg::Node * diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index 163923b3a..2bef541d4 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -30,7 +30,7 @@ class CallOperation final : public jlm::rvsdg::SimpleOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; @@ -41,7 +41,7 @@ class CallOperation final : public jlm::rvsdg::SimpleOperation return FunctionType_; } - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::unique_ptr diff --git a/jlm/llvm/ir/operators/delta.cpp b/jlm/llvm/ir/operators/delta.cpp index a74a991c3..6d734c799 100644 --- a/jlm/llvm/ir/operators/delta.cpp +++ b/jlm/llvm/ir/operators/delta.cpp @@ -23,14 +23,14 @@ operation::debug_string() const return util::strfmt("DELTA[", name(), "]"); } -std::unique_ptr +std::unique_ptr operation::copy() const { - return std::unique_ptr(new delta::operation(*this)); + return std::make_unique(*this); } bool -operation::operator==(const jlm::rvsdg::operation & other) const noexcept +operation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->name_ == name_ && op->linkage_ == linkage_ && op->constant_ == constant_ diff --git a/jlm/llvm/ir/operators/delta.hpp b/jlm/llvm/ir/operators/delta.hpp index fb26c4c62..4000e585c 100644 --- a/jlm/llvm/ir/operators/delta.hpp +++ b/jlm/llvm/ir/operators/delta.hpp @@ -51,11 +51,11 @@ class operation final : public rvsdg::StructuralOperation virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual bool - operator==(const rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; const std::string & name() const noexcept diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index a4546b400..f0471ab59 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -13,8 +13,6 @@ namespace jlm::llvm::lambda { -/* lambda operation class */ - operation::~operation() = default; std::string @@ -24,17 +22,17 @@ operation::debug_string() const } bool -operation::operator==(const jlm::rvsdg::operation & other) const noexcept +operation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->type() == type() && op->name() == name() && op->linkage() == linkage() && op->attributes() == attributes(); } -std::unique_ptr +std::unique_ptr operation::copy() const { - return std::unique_ptr(new operation(*this)); + return std::make_unique(*this); } /* lambda node class */ diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index df6e4b7f1..b9a3a579b 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -90,9 +90,9 @@ class operation final : public rvsdg::StructuralOperation debug_string() const override; bool - operator==(const jlm::rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; private: diff --git a/jlm/llvm/ir/operators/operators.cpp b/jlm/llvm/ir/operators/operators.cpp index 14166f5ea..d76cf1076 100644 --- a/jlm/llvm/ir/operators/operators.cpp +++ b/jlm/llvm/ir/operators/operators.cpp @@ -17,7 +17,7 @@ phi_op::~phi_op() noexcept {} bool -phi_op::operator==(const operation & other) const noexcept +phi_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->nodes_ == nodes_ && op->result(0) == result(0); @@ -38,10 +38,10 @@ phi_op::debug_string() const return "PHI" + str; } -std::unique_ptr +std::unique_ptr phi_op::copy() const { - return std::unique_ptr(new phi_op(*this)); + return std::make_unique(*this); } /* assignment operator */ @@ -50,7 +50,7 @@ assignment_op::~assignment_op() noexcept {} bool -assignment_op::operator==(const operation & other) const noexcept +assignment_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0); @@ -62,10 +62,10 @@ assignment_op::debug_string() const return "ASSIGN"; } -std::unique_ptr +std::unique_ptr assignment_op::copy() const { - return std::unique_ptr(new assignment_op(*this)); + return std::make_unique(*this); } /* select operator */ @@ -74,7 +74,7 @@ select_op::~select_op() noexcept {} bool -select_op::operator==(const operation & other) const noexcept +select_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -86,10 +86,10 @@ select_op::debug_string() const return "SELECT"; } -std::unique_ptr +std::unique_ptr select_op::copy() const { - return std::unique_ptr(new select_op(*this)); + return std::make_unique(*this); } /* vectorselect operator */ @@ -98,7 +98,7 @@ vectorselect_op::~vectorselect_op() noexcept {} bool -vectorselect_op::operator==(const operation & other) const noexcept +vectorselect_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->type() == type(); @@ -110,10 +110,10 @@ vectorselect_op::debug_string() const return "VECTORSELECT"; } -std::unique_ptr +std::unique_ptr vectorselect_op::copy() const { - return std::unique_ptr(new vectorselect_op(*this)); + return std::make_unique(*this); } /* fp2ui operator */ @@ -122,7 +122,7 @@ fp2ui_op::~fp2ui_op() noexcept {} bool -fp2ui_op::operator==(const operation & other) const noexcept +fp2ui_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -134,10 +134,10 @@ fp2ui_op::debug_string() const return "FP2UI"; } -std::unique_ptr +std::unique_ptr fp2ui_op::copy() const { - return std::unique_ptr(new fp2ui_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -158,7 +158,7 @@ fp2si_op::~fp2si_op() noexcept {} bool -fp2si_op::operator==(const operation & other) const noexcept +fp2si_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -170,10 +170,10 @@ fp2si_op::debug_string() const return "FP2UI"; } -std::unique_ptr +std::unique_ptr fp2si_op::copy() const { - return std::unique_ptr(new fp2si_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -194,7 +194,7 @@ ctl2bits_op::~ctl2bits_op() noexcept {} bool -ctl2bits_op::operator==(const operation & other) const noexcept +ctl2bits_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -206,10 +206,10 @@ ctl2bits_op::debug_string() const return "CTL2BITS"; } -std::unique_ptr +std::unique_ptr ctl2bits_op::copy() const { - return std::unique_ptr(new ctl2bits_op(*this)); + return std::make_unique(*this); } /* branch operator */ @@ -218,7 +218,7 @@ branch_op::~branch_op() noexcept {} bool -branch_op::operator==(const operation & other) const noexcept +branch_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0); @@ -230,16 +230,16 @@ branch_op::debug_string() const return "BRANCH"; } -std::unique_ptr +std::unique_ptr branch_op::copy() const { - return std::unique_ptr(new branch_op(*this)); + return std::make_unique(*this); } ConstantPointerNullOperation::~ConstantPointerNullOperation() noexcept = default; bool -ConstantPointerNullOperation::operator==(const operation & other) const noexcept +ConstantPointerNullOperation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->GetPointerType() == GetPointerType(); @@ -251,10 +251,10 @@ ConstantPointerNullOperation::debug_string() const return "ConstantPointerNull"; } -std::unique_ptr +std::unique_ptr ConstantPointerNullOperation::copy() const { - return std::unique_ptr(new ConstantPointerNullOperation(*this)); + return std::make_unique(*this); } /* bits2ptr operator */ @@ -263,7 +263,7 @@ bits2ptr_op::~bits2ptr_op() {} bool -bits2ptr_op::operator==(const operation & other) const noexcept +bits2ptr_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -275,10 +275,10 @@ bits2ptr_op::debug_string() const return "BITS2PTR"; } -std::unique_ptr +std::unique_ptr bits2ptr_op::copy() const { - return std::unique_ptr(new bits2ptr_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -299,7 +299,7 @@ ptr2bits_op::~ptr2bits_op() {} bool -ptr2bits_op::operator==(const operation & other) const noexcept +ptr2bits_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -311,10 +311,10 @@ ptr2bits_op::debug_string() const return "PTR2BITS"; } -std::unique_ptr +std::unique_ptr ptr2bits_op::copy() const { - return std::unique_ptr(new ptr2bits_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -333,7 +333,7 @@ ConstantDataArray::~ConstantDataArray() {} bool -ConstantDataArray::operator==(const rvsdg::operation & other) const noexcept +ConstantDataArray::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -345,10 +345,10 @@ ConstantDataArray::debug_string() const return "ConstantDataArray"; } -std::unique_ptr +std::unique_ptr ConstantDataArray::copy() const { - return std::unique_ptr(new ConstantDataArray(*this)); + return std::make_unique(*this); } /* pointer compare operator */ @@ -357,7 +357,7 @@ ptrcmp_op::~ptrcmp_op() {} bool -ptrcmp_op::operator==(const operation & other) const noexcept +ptrcmp_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->cmp_ == cmp_; @@ -377,10 +377,10 @@ ptrcmp_op::debug_string() const return "PTRCMP " + map[cmp()]; } -std::unique_ptr +std::unique_ptr ptrcmp_op::copy() const { - return std::unique_ptr(new ptrcmp_op(*this)); + return std::make_unique(*this); } rvsdg::binop_reduction_path_t @@ -405,7 +405,7 @@ zext_op::~zext_op() {} bool -zext_op::operator==(const operation & other) const noexcept +zext_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -417,10 +417,10 @@ zext_op::debug_string() const return util::strfmt("ZEXT[", nsrcbits(), " -> ", ndstbits(), "]"); } -std::unique_ptr +std::unique_ptr zext_op::copy() const { - return std::unique_ptr(new zext_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -452,7 +452,7 @@ ConstantFP::~ConstantFP() {} bool -ConstantFP::operator==(const operation & other) const noexcept +ConstantFP::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && size() == op->size() && constant().bitwiseIsEqual(op->constant()); @@ -472,10 +472,10 @@ ConstantFP::debug_string() const return s; } -std::unique_ptr +std::unique_ptr ConstantFP::copy() const { - return std::unique_ptr(new ConstantFP(*this)); + return std::make_unique(*this); } /* floating point comparison operator */ @@ -484,7 +484,7 @@ fpcmp_op::~fpcmp_op() {} bool -fpcmp_op::operator==(const operation & other) const noexcept +fpcmp_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->cmp_ == cmp_; @@ -512,10 +512,10 @@ fpcmp_op::debug_string() const return "FPCMP " + map[cmp()]; } -std::unique_ptr +std::unique_ptr fpcmp_op::copy() const { - return std::unique_ptr(new fpcmp_op(*this)); + return std::make_unique(*this); } rvsdg::binop_reduction_path_t @@ -537,7 +537,7 @@ fpcmp_op::reduce_operand_pair( UndefValueOperation::~UndefValueOperation() noexcept = default; bool -UndefValueOperation::operator==(const operation & other) const noexcept +UndefValueOperation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->GetType() == GetType(); @@ -549,16 +549,16 @@ UndefValueOperation::debug_string() const return "undef"; } -std::unique_ptr +std::unique_ptr UndefValueOperation::copy() const { - return std::unique_ptr(new UndefValueOperation(*this)); + return std::make_unique(*this); } PoisonValueOperation::~PoisonValueOperation() noexcept = default; bool -PoisonValueOperation::operator==(const operation & other) const noexcept +PoisonValueOperation::operator==(const Operation & other) const noexcept { auto operation = dynamic_cast(&other); return operation && operation->GetType() == GetType(); @@ -570,10 +570,10 @@ PoisonValueOperation::debug_string() const return "poison"; } -std::unique_ptr +std::unique_ptr PoisonValueOperation::copy() const { - return std::unique_ptr(new PoisonValueOperation(*this)); + return std::make_unique(*this); } /* floating point arithmetic operator */ @@ -582,7 +582,7 @@ fpbin_op::~fpbin_op() {} bool -fpbin_op::operator==(const operation & other) const noexcept +fpbin_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->fpop() == fpop() && op->size() == size(); @@ -601,10 +601,10 @@ fpbin_op::debug_string() const return "FPOP " + map[fpop()]; } -std::unique_ptr +std::unique_ptr fpbin_op::copy() const { - return std::unique_ptr(new fpbin_op(*this)); + return std::make_unique(*this); } rvsdg::binop_reduction_path_t @@ -629,7 +629,7 @@ fpext_op::~fpext_op() {} bool -fpext_op::operator==(const operation & other) const noexcept +fpext_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->srcsize() == srcsize() && op->dstsize() == dstsize(); @@ -641,10 +641,10 @@ fpext_op::debug_string() const return "fpext"; } -std::unique_ptr +std::unique_ptr fpext_op::copy() const { - return std::unique_ptr(new fpext_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -665,7 +665,7 @@ fpneg_op::~fpneg_op() {} bool -fpneg_op::operator==(const operation & other) const noexcept +fpneg_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->size() == size(); @@ -677,10 +677,10 @@ fpneg_op::debug_string() const return "fpneg"; } -std::unique_ptr +std::unique_ptr fpneg_op::copy() const { - return std::unique_ptr(new fpneg_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -701,7 +701,7 @@ fptrunc_op::~fptrunc_op() {} bool -fptrunc_op::operator==(const operation & other) const noexcept +fptrunc_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->srcsize() == srcsize() && op->dstsize() == dstsize(); @@ -713,10 +713,10 @@ fptrunc_op::debug_string() const return "fptrunc"; } -std::unique_ptr +std::unique_ptr fptrunc_op::copy() const { - return std::unique_ptr(new fptrunc_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -737,7 +737,7 @@ valist_op::~valist_op() {} bool -valist_op::operator==(const operation & other) const noexcept +valist_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); if (!op || op->narguments() != narguments()) @@ -758,10 +758,10 @@ valist_op::debug_string() const return "VALIST"; } -std::unique_ptr +std::unique_ptr valist_op::copy() const { - return std::unique_ptr(new valist_op(*this)); + return std::make_unique(*this); } /* bitcast operator */ @@ -770,7 +770,7 @@ bitcast_op::~bitcast_op() {} bool -bitcast_op::operator==(const operation & other) const noexcept +bitcast_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -787,10 +787,10 @@ bitcast_op::debug_string() const "]"); } -std::unique_ptr +std::unique_ptr bitcast_op::copy() const { - return std::unique_ptr(new bitcast_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -811,7 +811,7 @@ ConstantStruct::~ConstantStruct() {} bool -ConstantStruct::operator==(const operation & other) const noexcept +ConstantStruct::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -823,10 +823,10 @@ ConstantStruct::debug_string() const return "ConstantStruct"; } -std::unique_ptr +std::unique_ptr ConstantStruct::copy() const { - return std::unique_ptr(new ConstantStruct(*this)); + return std::make_unique(*this); } /* trunc operator */ @@ -835,7 +835,7 @@ trunc_op::~trunc_op() {} bool -trunc_op::operator==(const operation & other) const noexcept +trunc_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -847,10 +847,10 @@ trunc_op::debug_string() const return util::strfmt("TRUNC[", nsrcbits(), " -> ", ndstbits(), "]"); } -std::unique_ptr +std::unique_ptr trunc_op::copy() const { - return std::unique_ptr(new trunc_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t @@ -871,7 +871,7 @@ uitofp_op::~uitofp_op() {} bool -uitofp_op::operator==(const operation & other) const noexcept +uitofp_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -883,7 +883,7 @@ uitofp_op::debug_string() const return "UITOFP"; } -std::unique_ptr +std::unique_ptr uitofp_op::copy() const { return std::make_unique(*this); @@ -907,7 +907,7 @@ sitofp_op::~sitofp_op() {} bool -sitofp_op::operator==(const operation & other) const noexcept +sitofp_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -919,7 +919,7 @@ sitofp_op::debug_string() const return "SITOFP"; } -std::unique_ptr +std::unique_ptr sitofp_op::copy() const { return std::make_unique(*this); @@ -943,7 +943,7 @@ ConstantArray::~ConstantArray() {} bool -ConstantArray::operator==(const operation & other) const noexcept +ConstantArray::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -955,10 +955,10 @@ ConstantArray::debug_string() const return "ConstantArray"; } -std::unique_ptr +std::unique_ptr ConstantArray::copy() const { - return std::unique_ptr(new ConstantArray(*this)); + return std::make_unique(*this); } /* ConstantAggregateZero operator */ @@ -967,7 +967,7 @@ ConstantAggregateZero::~ConstantAggregateZero() {} bool -ConstantAggregateZero::operator==(const operation & other) const noexcept +ConstantAggregateZero::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -979,10 +979,10 @@ ConstantAggregateZero::debug_string() const return "ConstantAggregateZero"; } -std::unique_ptr +std::unique_ptr ConstantAggregateZero::copy() const { - return std::unique_ptr(new ConstantAggregateZero(*this)); + return std::make_unique(*this); } /* extractelement operator */ @@ -991,7 +991,7 @@ extractelement_op::~extractelement_op() {} bool -extractelement_op::operator==(const operation & other) const noexcept +extractelement_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->argument(1) == argument(1); @@ -1003,10 +1003,10 @@ extractelement_op::debug_string() const return "EXTRACTELEMENT"; } -std::unique_ptr +std::unique_ptr extractelement_op::copy() const { - return std::unique_ptr(new extractelement_op(*this)); + return std::make_unique(*this); } /* shufflevector operator */ @@ -1015,7 +1015,7 @@ shufflevector_op::~shufflevector_op() {} bool -shufflevector_op::operator==(const operation & other) const noexcept +shufflevector_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->Mask() == Mask(); @@ -1027,10 +1027,10 @@ shufflevector_op::debug_string() const return "SHUFFLEVECTOR"; } -std::unique_ptr +std::unique_ptr shufflevector_op::copy() const { - return std::unique_ptr(new shufflevector_op(*this)); + return std::make_unique(*this); } /* constantvector operator */ @@ -1039,7 +1039,7 @@ constantvector_op::~constantvector_op() {} bool -constantvector_op::operator==(const operation & other) const noexcept +constantvector_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -1051,10 +1051,10 @@ constantvector_op::debug_string() const return "CONSTANTVECTOR"; } -std::unique_ptr +std::unique_ptr constantvector_op::copy() const { - return std::unique_ptr(new constantvector_op(*this)); + return std::make_unique(*this); } /* insertelement operator */ @@ -1063,7 +1063,7 @@ insertelement_op::~insertelement_op() {} bool -insertelement_op::operator==(const operation & other) const noexcept +insertelement_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->argument(1) == argument(1) @@ -1076,10 +1076,10 @@ insertelement_op::debug_string() const return "INSERTELEMENT"; } -std::unique_ptr +std::unique_ptr insertelement_op::copy() const { - return std::unique_ptr(new insertelement_op(*this)); + return std::make_unique(*this); } /* vectorunary operator */ @@ -1088,7 +1088,7 @@ vectorunary_op::~vectorunary_op() {} bool -vectorunary_op::operator==(const rvsdg::operation & other) const noexcept +vectorunary_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->operation() == operation(); @@ -1100,10 +1100,10 @@ vectorunary_op::debug_string() const return util::strfmt("VEC", operation().debug_string()); } -std::unique_ptr +std::unique_ptr vectorunary_op::copy() const { - return std::unique_ptr(new vectorunary_op(*this)); + return std::make_unique(*this); } /* vectorbinary operator */ @@ -1112,7 +1112,7 @@ vectorbinary_op::~vectorbinary_op() {} bool -vectorbinary_op::operator==(const rvsdg::operation & other) const noexcept +vectorbinary_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->operation() == operation(); @@ -1124,10 +1124,10 @@ vectorbinary_op::debug_string() const return util::strfmt("VEC", operation().debug_string()); } -std::unique_ptr +std::unique_ptr vectorbinary_op::copy() const { - return std::unique_ptr(new vectorbinary_op(*this)); + return std::make_unique(*this); } /* const data vector operator */ @@ -1136,7 +1136,7 @@ constant_data_vector_op::~constant_data_vector_op() {} bool -constant_data_vector_op::operator==(const rvsdg::operation & other) const noexcept +constant_data_vector_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->result(0) == result(0); @@ -1148,10 +1148,10 @@ constant_data_vector_op::debug_string() const return "CONSTANTDATAVECTOR"; } -std::unique_ptr +std::unique_ptr constant_data_vector_op::copy() const { - return std::unique_ptr(new constant_data_vector_op(*this)); + return std::make_unique(*this); } /* extractvalue operator */ @@ -1160,7 +1160,7 @@ ExtractValue::~ExtractValue() {} bool -ExtractValue::operator==(const rvsdg::operation & other) const noexcept +ExtractValue::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->indices_ == indices_ && op->type() == type(); @@ -1172,10 +1172,10 @@ ExtractValue::debug_string() const return "ExtractValue"; } -std::unique_ptr +std::unique_ptr ExtractValue::copy() const { - return std::unique_ptr(new ExtractValue(*this)); + return std::make_unique(*this); } /* malloc operator */ @@ -1184,7 +1184,7 @@ malloc_op::~malloc_op() {} bool -malloc_op::operator==(const operation & other) const noexcept +malloc_op::operator==(const Operation & other) const noexcept { /* Avoid CNE for malloc operator @@ -1198,10 +1198,10 @@ malloc_op::debug_string() const return "MALLOC"; } -std::unique_ptr +std::unique_ptr malloc_op::copy() const { - return std::unique_ptr(new malloc_op(*this)); + return std::make_unique(*this); } /* free operator */ @@ -1209,7 +1209,7 @@ malloc_op::copy() const FreeOperation::~FreeOperation() noexcept = default; bool -FreeOperation::operator==(const operation & other) const noexcept +FreeOperation::operator==(const Operation & other) const noexcept { // Avoid CNE for free operator return this == &other; @@ -1221,10 +1221,10 @@ FreeOperation::debug_string() const return "FREE"; } -std::unique_ptr +std::unique_ptr FreeOperation::copy() const { - return std::unique_ptr(new FreeOperation(*this)); + return std::make_unique(*this); } } diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index 3e20383a8..fee9c06b7 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -47,12 +47,12 @@ class phi_op final : public rvsdg::SimpleOperation operator=(phi_op &&) = delete; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline const jlm::rvsdg::Type & @@ -111,12 +111,12 @@ class assignment_op final : public rvsdg::SimpleOperation assignment_op(assignment_op &&) = default; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::unique_ptr @@ -141,12 +141,12 @@ class select_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] const jlm::rvsdg::Type & @@ -185,12 +185,12 @@ class vectorselect_op final : public rvsdg::SimpleOperation public: virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] const rvsdg::Type & @@ -268,12 +268,12 @@ class fp2ui_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -331,12 +331,12 @@ class fp2si_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -376,12 +376,12 @@ class ctl2bits_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::unique_ptr @@ -412,12 +412,12 @@ class branch_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline size_t @@ -448,12 +448,12 @@ class ConstantPointerNullOperation final : public rvsdg::SimpleOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] const PointerType & @@ -515,12 +515,12 @@ class bits2ptr_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -595,12 +595,12 @@ class ptr2bits_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -647,12 +647,12 @@ class ConstantDataArray final : public rvsdg::SimpleOperation } virtual bool - operator==(const jlm::rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; size_t @@ -724,12 +724,12 @@ class ptrcmp_op final : public jlm::rvsdg::binary_op {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual jlm::rvsdg::binop_reduction_path_t @@ -804,12 +804,12 @@ class zext_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual jlm::rvsdg::unop_reduction_path_t @@ -882,12 +882,12 @@ class ConstantFP final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline const ::llvm::APFloat & @@ -957,12 +957,12 @@ class fpcmp_op final : public jlm::rvsdg::binary_op {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::binop_reduction_path_t @@ -1024,12 +1024,12 @@ class UndefValueOperation final : public rvsdg::SimpleOperation operator=(UndefValueOperation &&) = delete; bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; [[nodiscard]] const rvsdg::Type & @@ -1096,12 +1096,12 @@ class PoisonValueOperation final : public rvsdg::SimpleOperation operator=(PoisonValueOperation &&) = delete; bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; std::string debug_string() const override; - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; const jlm::rvsdg::ValueType & @@ -1166,12 +1166,12 @@ class fpbin_op final : public jlm::rvsdg::binary_op {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::binop_reduction_path_t @@ -1252,12 +1252,12 @@ class fpext_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -1311,12 +1311,12 @@ class fpneg_op final : public jlm::rvsdg::unary_op {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -1387,12 +1387,12 @@ class fptrunc_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -1450,12 +1450,12 @@ class valist_op final : public rvsdg::SimpleOperation operator=(valist_op &&) = delete; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::unique_ptr @@ -1505,21 +1505,21 @@ class bitcast_op final : public jlm::rvsdg::unary_op bitcast_op(const bitcast_op &) = default; - bitcast_op(jlm::rvsdg::operation &&) = delete; + bitcast_op(Operation &&) = delete; bitcast_op & - operator=(const jlm::rvsdg::operation &) = delete; + operator=(const Operation &) = delete; bitcast_op & - operator=(jlm::rvsdg::operation &&) = delete; + operator=(Operation &&) = delete; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -1579,12 +1579,12 @@ class ConstantStruct final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; const StructType & @@ -1673,12 +1673,12 @@ class trunc_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual jlm::rvsdg::unop_reduction_path_t @@ -1755,12 +1755,12 @@ class uitofp_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual jlm::rvsdg::unop_reduction_path_t @@ -1814,12 +1814,12 @@ class sitofp_op final : public jlm::rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; jlm::rvsdg::unop_reduction_path_t @@ -1860,12 +1860,12 @@ class ConstantArray final : public rvsdg::SimpleOperation } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; size_t @@ -1929,12 +1929,12 @@ class ConstantAggregateZero final : public rvsdg::SimpleOperation } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::unique_ptr @@ -1966,12 +1966,12 @@ class extractelement_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static inline std::unique_ptr @@ -2010,12 +2010,12 @@ class shufflevector_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; const ::llvm::ArrayRef @@ -2061,12 +2061,12 @@ class constantvector_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static inline std::unique_ptr @@ -2105,12 +2105,12 @@ class insertelement_op final : public rvsdg::SimpleOperation } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static inline std::unique_ptr @@ -2197,12 +2197,12 @@ class vectorunary_op final : public rvsdg::SimpleOperation } virtual bool - operator==(const jlm::rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static inline std::unique_ptr @@ -2221,7 +2221,7 @@ class vectorunary_op final : public rvsdg::SimpleOperation } private: - std::unique_ptr op_; + std::unique_ptr op_; }; /* vectorbinary operator */ @@ -2292,12 +2292,12 @@ class vectorbinary_op final : public rvsdg::SimpleOperation } virtual bool - operator==(const jlm::rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static inline std::unique_ptr @@ -2318,7 +2318,7 @@ class vectorbinary_op final : public rvsdg::SimpleOperation } private: - std::unique_ptr op_; + std::unique_ptr op_; }; /* constant data vector operator */ @@ -2335,12 +2335,12 @@ class constant_data_vector_op final : public rvsdg::SimpleOperation public: virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; size_t @@ -2390,12 +2390,12 @@ class ExtractValue final : public rvsdg::SimpleOperation } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; const_iterator @@ -2468,12 +2468,12 @@ class malloc_op final : public rvsdg::SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; const jlm::rvsdg::bittype & @@ -2527,12 +2527,12 @@ class FreeOperation final : public rvsdg::SimpleOperation {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; [[nodiscard]] std::string debug_string() const override; - [[nodiscard]] std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static std::unique_ptr diff --git a/jlm/llvm/ir/operators/sext.cpp b/jlm/llvm/ir/operators/sext.cpp index 9e54460c6..d19651345 100644 --- a/jlm/llvm/ir/operators/sext.cpp +++ b/jlm/llvm/ir/operators/sext.cpp @@ -9,8 +9,6 @@ namespace jlm::llvm { -/* sext operation */ - static const rvsdg::unop_reduction_path_t sext_reduction_bitunary = 128; static const rvsdg::unop_reduction_path_t sext_reduction_bitbinary = 129; @@ -78,7 +76,7 @@ sext_op::~sext_op() {} bool -sext_op::operator==(const operation & other) const noexcept +sext_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -90,10 +88,10 @@ sext_op::debug_string() const return util::strfmt("SEXT[", nsrcbits(), " -> ", ndstbits(), "]"); } -std::unique_ptr +std::unique_ptr sext_op::copy() const { - return std::unique_ptr(new sext_op(*this)); + return std::make_unique(*this); } rvsdg::unop_reduction_path_t diff --git a/jlm/llvm/ir/operators/sext.hpp b/jlm/llvm/ir/operators/sext.hpp index 1e623666c..3bf1af6e5 100644 --- a/jlm/llvm/ir/operators/sext.hpp +++ b/jlm/llvm/ir/operators/sext.hpp @@ -47,12 +47,12 @@ class sext_op final : public rvsdg::unary_op } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual rvsdg::unop_reduction_path_t diff --git a/jlm/llvm/ir/tac.hpp b/jlm/llvm/ir/tac.hpp index 463a2233e..849f62dd9 100644 --- a/jlm/llvm/ir/tac.hpp +++ b/jlm/llvm/ir/tac.hpp @@ -178,7 +178,7 @@ class tac final } std::vector operands_; - std::unique_ptr operation_; + std::unique_ptr operation_; std::vector> results_; }; diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 4088d82a8..c9b7c4825 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -47,7 +47,7 @@ class unrollstat final : public util::Statistics /* helper functions */ static bool -is_eqcmp(const jlm::rvsdg::operation & op) +is_eqcmp(const rvsdg::Operation & op) { return dynamic_cast(&op) || dynamic_cast(&op) @@ -470,7 +470,7 @@ unroll(rvsdg::ThetaNode * otheta, size_t factor) if (!ui) return; - auto nf = otheta->graph()->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = otheta->graph()->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); if (ui->is_known() && ui->niterations()) diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 7f0a79918..2aa8d6a80 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -79,7 +79,7 @@ binary_normal_form::binary_normal_form( bool binary_normal_form::normalize_node(Node * node) const { - const operation & base_op = node->GetOperation(); + const Operation & base_op = node->GetOperation(); const auto & op = *static_cast(&base_op); return normalize_node(node, op); @@ -325,7 +325,7 @@ flattened_binary_op::~flattened_binary_op() noexcept {} bool -flattened_binary_op::operator==(const operation & other) const noexcept +flattened_binary_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->bin_operation() == bin_operation() && op->narguments() == narguments(); @@ -337,12 +337,11 @@ flattened_binary_op::debug_string() const return jlm::util::strfmt("FLATTENED[", op_->debug_string(), "]"); } -std::unique_ptr +std::unique_ptr flattened_binary_op::copy() const { std::unique_ptr copied_op(static_cast(op_->copy().release())); - return std::unique_ptr( - new flattened_binary_op(std::move(copied_op), narguments())); + return std::make_unique(std::move(copied_op), narguments()); } /* diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index 5daad18d8..be2ef06c2 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -193,12 +193,12 @@ class flattened_binary_op final : public SimpleOperation } virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline const binary_op & diff --git a/jlm/rvsdg/bitstring/arithmetic-impl.hpp b/jlm/rvsdg/bitstring/arithmetic-impl.hpp index d81ca7fcc..44daa6db9 100644 --- a/jlm/rvsdg/bitstring/arithmetic-impl.hpp +++ b/jlm/rvsdg/bitstring/arithmetic-impl.hpp @@ -18,7 +18,7 @@ MakeBitUnaryOperation::~MakeBitUnaryOperation() noexcept template bool -MakeBitUnaryOperation::operator==(const operation & other) const noexcept +MakeBitUnaryOperation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->type() == type(); @@ -39,7 +39,7 @@ MakeBitUnaryOperation::debug_string() const } template -std::unique_ptr +std::unique_ptr MakeBitUnaryOperation::copy() const { return std::make_unique(*this); @@ -58,7 +58,7 @@ MakeBitBinaryOperation::~MakeBitBinaryOperation() noex template bool -MakeBitBinaryOperation::operator==(const operation & other) const noexcept +MakeBitBinaryOperation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->type() == type(); @@ -88,7 +88,7 @@ MakeBitBinaryOperation::debug_string() const } template -std::unique_ptr +std::unique_ptr MakeBitBinaryOperation::copy() const { return std::make_unique(*this); diff --git a/jlm/rvsdg/bitstring/arithmetic.hpp b/jlm/rvsdg/bitstring/arithmetic.hpp index 1ed32dc01..d91340df3 100644 --- a/jlm/rvsdg/bitstring/arithmetic.hpp +++ b/jlm/rvsdg/bitstring/arithmetic.hpp @@ -23,7 +23,7 @@ class MakeBitUnaryOperation final : public bitunary_op {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; bitvalue_repr reduce_constant(const bitvalue_repr & arg) const override; @@ -31,7 +31,7 @@ class MakeBitUnaryOperation final : public bitunary_op std::string debug_string() const override; - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; std::unique_ptr @@ -55,7 +55,7 @@ class MakeBitBinaryOperation final : public bitbinary_op {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; enum binary_op::flags flags() const noexcept override; @@ -66,7 +66,7 @@ class MakeBitBinaryOperation final : public bitbinary_op std::string debug_string() const override; - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; std::unique_ptr diff --git a/jlm/rvsdg/bitstring/bitoperation-classes.cpp b/jlm/rvsdg/bitstring/bitoperation-classes.cpp index d5f2f266f..4c4a7054b 100644 --- a/jlm/rvsdg/bitstring/bitoperation-classes.cpp +++ b/jlm/rvsdg/bitstring/bitoperation-classes.cpp @@ -10,8 +10,6 @@ namespace jlm::rvsdg { -/* bitunary operation */ - bitunary_op::~bitunary_op() noexcept {} @@ -37,8 +35,6 @@ bitunary_op::reduce_operand(unop_reduction_path_t path, jlm::rvsdg::output * arg return nullptr; } -/* bitbinary operation */ - bitbinary_op::~bitbinary_op() noexcept {} @@ -69,8 +65,6 @@ bitbinary_op::reduce_operand_pair( return nullptr; } -/* bitcompare operation */ - bitcompare_op::~bitcompare_op() noexcept {} diff --git a/jlm/rvsdg/bitstring/comparison-impl.hpp b/jlm/rvsdg/bitstring/comparison-impl.hpp index b77c88a05..9910fb923 100644 --- a/jlm/rvsdg/bitstring/comparison-impl.hpp +++ b/jlm/rvsdg/bitstring/comparison-impl.hpp @@ -18,7 +18,7 @@ MakeBitComparisonOperation::~MakeBitComparisonOperatio template bool MakeBitComparisonOperation::operator==( - const operation & other) const noexcept + const Operation & other) const noexcept { auto op = dynamic_cast *>(&other); return op && op->type() == type(); @@ -56,7 +56,7 @@ MakeBitComparisonOperation::debug_string() const } template -std::unique_ptr +std::unique_ptr MakeBitComparisonOperation::copy() const { return std::make_unique(*this); diff --git a/jlm/rvsdg/bitstring/comparison.hpp b/jlm/rvsdg/bitstring/comparison.hpp index 36cdb0862..bd1799166 100644 --- a/jlm/rvsdg/bitstring/comparison.hpp +++ b/jlm/rvsdg/bitstring/comparison.hpp @@ -24,7 +24,7 @@ class MakeBitComparisonOperation final : public bitcompare_op {} bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; enum binary_op::flags flags() const noexcept override; @@ -35,7 +35,7 @@ class MakeBitComparisonOperation final : public bitcompare_op std::string debug_string() const override; - std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; std::unique_ptr diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 6f4def471..428f0335f 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -262,7 +262,7 @@ bitconcat_op::~bitconcat_op() noexcept {} bool -bitconcat_op::operator==(const jlm::rvsdg::operation & other) const noexcept +bitconcat_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); if (!op || op->narguments() != narguments()) @@ -364,10 +364,10 @@ bitconcat_op::debug_string() const return "BITCONCAT"; } -std::unique_ptr +std::unique_ptr bitconcat_op::copy() const { - return std::unique_ptr(new bitconcat_op(*this)); + return std::make_unique(*this); } } diff --git a/jlm/rvsdg/bitstring/concat.hpp b/jlm/rvsdg/bitstring/concat.hpp index 1338c9574..b8226c0a4 100644 --- a/jlm/rvsdg/bitstring/concat.hpp +++ b/jlm/rvsdg/bitstring/concat.hpp @@ -26,7 +26,7 @@ class bitconcat_op final : public jlm::rvsdg::binary_op {} virtual bool - operator==(const jlm::rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual binop_reduction_path_t can_reduce_operand_pair(const jlm::rvsdg::output * arg1, const jlm::rvsdg::output * arg2) @@ -44,7 +44,7 @@ class bitconcat_op final : public jlm::rvsdg::binary_op virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; private: diff --git a/jlm/rvsdg/bitstring/slice.cpp b/jlm/rvsdg/bitstring/slice.cpp index 651cebc46..7ad4901d6 100644 --- a/jlm/rvsdg/bitstring/slice.cpp +++ b/jlm/rvsdg/bitstring/slice.cpp @@ -16,7 +16,7 @@ bitslice_op::~bitslice_op() noexcept {} bool -bitslice_op::operator==(const operation & other) const noexcept +bitslice_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->low() == low() && op->high() == high() && op->argument(0) == argument(0); @@ -97,10 +97,10 @@ bitslice_op::reduce_operand(unop_reduction_path_t path, jlm::rvsdg::output * arg return nullptr; } -std::unique_ptr +std::unique_ptr bitslice_op::copy() const { - return std::unique_ptr(new bitslice_op(*this)); + return std::make_unique(*this); } jlm::rvsdg::output * diff --git a/jlm/rvsdg/bitstring/slice.hpp b/jlm/rvsdg/bitstring/slice.hpp index 4c5e5734f..0ed48823c 100644 --- a/jlm/rvsdg/bitstring/slice.hpp +++ b/jlm/rvsdg/bitstring/slice.hpp @@ -28,7 +28,7 @@ class bitslice_op : public jlm::rvsdg::unary_op {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; @@ -51,7 +51,7 @@ class bitslice_op : public jlm::rvsdg::unary_op return low_ + std::static_pointer_cast(result(0))->nbits(); } - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline const Type & diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index 2c8311150..6810f1a31 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -98,7 +98,7 @@ match_op::match_op( {} bool -match_op::operator==(const operation & other) const noexcept +match_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->default_alternative_ == default_alternative_ && op->mapping_ == mapping_ @@ -140,10 +140,10 @@ match_op::debug_string() const return "MATCH" + str; } -std::unique_ptr +std::unique_ptr match_op::copy() const { - return std::unique_ptr(new match_op(*this)); + return std::make_unique(*this); } jlm::rvsdg::output * diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index 7a243b249..5a5feb3a4 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -126,13 +126,13 @@ typedef domain_const_op(&op) != nullptr; } static inline const ctlconstant_op & -to_ctlconstant_op(const jlm::rvsdg::operation & op) noexcept +to_ctlconstant_op(const Operation & op) noexcept { JLM_ASSERT(is_ctlconstant_op(op)); return *static_cast(&op); @@ -154,7 +154,7 @@ class match_op final : public jlm::rvsdg::unary_op size_t nalternatives); virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual unop_reduction_path_t can_reduce_operand(const jlm::rvsdg::output * arg) const noexcept override; @@ -165,7 +165,7 @@ class match_op final : public jlm::rvsdg::unary_op virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; inline uint64_t @@ -253,7 +253,7 @@ extern template class domain_const_op< ctltype_of_value>; static inline const match_op & -to_match_op(const jlm::rvsdg::operation & op) noexcept +to_match_op(const Operation & op) noexcept { JLM_ASSERT(is(op)); return *static_cast(&op); diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index c53eaadf1..ea2b71e9f 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -242,8 +242,6 @@ gamma_normal_form::set_control_constant_reduction(bool enable) graph()->mark_denormalized(); } -/* gamma operation */ - GammaOperation::~GammaOperation() noexcept {} @@ -253,14 +251,14 @@ GammaOperation::debug_string() const return "GAMMA"; } -std::unique_ptr +std::unique_ptr GammaOperation::copy() const { - return std::unique_ptr(new GammaOperation(*this)); + return std::make_unique(*this); } bool -GammaOperation::operator==(const operation & other) const noexcept +GammaOperation::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->nalternatives_ == nalternatives_; diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 8e3878a4c..244d3e7ab 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -89,11 +89,11 @@ class GammaOperation final : public StructuralOperation virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; static jlm::rvsdg::gamma_normal_form * normal_form(Graph * graph) noexcept diff --git a/jlm/rvsdg/node-normal-form.hpp b/jlm/rvsdg/node-normal-form.hpp index 61b5256bb..f23a72272 100644 --- a/jlm/rvsdg/node-normal-form.hpp +++ b/jlm/rvsdg/node-normal-form.hpp @@ -24,7 +24,7 @@ namespace jlm::rvsdg class Graph; class Node; -class operation; +class Operation; class output; class Region; diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 7cbb8dc7b..f09dca107 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -150,7 +150,7 @@ static void __attribute__((constructor)) register_node_normal_form(void) { jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::operation), + typeid(jlm::rvsdg::Operation), node_get_default_normal_form_); } @@ -188,7 +188,7 @@ node_output::GetOwner() const noexcept /* node class */ -Node::Node(std::unique_ptr op, rvsdg::Region * region) +Node::Node(std::unique_ptr op, Region * region) : depth_(0), graph_(region->graph()), region_(region), diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 5c820c003..5baf927bc 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -630,9 +630,9 @@ class Node public: virtual ~Node(); - Node(std::unique_ptr op, Region * region); + Node(std::unique_ptr op, Region * region); - [[nodiscard]] virtual const operation & + [[nodiscard]] virtual const Operation & GetOperation() const noexcept { return *operation_; @@ -880,7 +880,7 @@ class Node size_t depth_; Graph * graph_; rvsdg::Region * region_; - std::unique_ptr operation_; + std::unique_ptr operation_; std::vector> inputs_; std::vector> outputs_; }; diff --git a/jlm/rvsdg/nullary.hpp b/jlm/rvsdg/nullary.hpp index 235bf6a71..7e48bdf2f 100644 --- a/jlm/rvsdg/nullary.hpp +++ b/jlm/rvsdg/nullary.hpp @@ -70,7 +70,7 @@ class domain_const_op final : public nullary_op inline domain_const_op(domain_const_op && other) = default; virtual bool - operator==(const operation & other) const noexcept override + operator==(const Operation & other) const noexcept override { auto op = dynamic_cast(&other); return op && op->value_ == value_; @@ -88,10 +88,10 @@ class domain_const_op final : public nullary_op return value_; } - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override { - return std::unique_ptr(new domain_const_op(*this)); + return std::make_unique(*this); } static inline jlm::rvsdg::output * diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index d5159f72d..957b1fc81 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -11,17 +11,14 @@ namespace jlm::rvsdg { -operation::~operation() noexcept -{} +Operation::~Operation() noexcept = default; jlm::rvsdg::node_normal_form * -operation::normal_form(Graph * graph) noexcept +Operation::normal_form(Graph * graph) noexcept { - return graph->node_normal_form(typeid(operation)); + return graph->node_normal_form(typeid(Operation)); } -/* simple operation */ - SimpleOperation::~SimpleOperation() noexcept = default; size_t @@ -56,10 +53,8 @@ SimpleOperation::normal_form(Graph * graph) noexcept return static_cast(graph->node_normal_form(typeid(SimpleOperation))); } -/* structural operation */ - bool -StructuralOperation::operator==(const operation & other) const noexcept +StructuralOperation::operator==(const Operation & other) const noexcept { return typeid(*this) == typeid(other); } diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index e9d3bea66..f8c63cce5 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -24,22 +24,22 @@ class Region; class simple_normal_form; class structural_normal_form; -class operation +class Operation { public: - virtual ~operation() noexcept; + virtual ~Operation() noexcept; virtual bool - operator==(const operation & other) const noexcept = 0; + operator==(const Operation & other) const noexcept = 0; virtual std::string debug_string() const = 0; - virtual std::unique_ptr + [[nodiscard]] virtual std::unique_ptr copy() const = 0; inline bool - operator!=(const operation & other) const noexcept + operator!=(const Operation & other) const noexcept { return !(*this == other); } @@ -50,18 +50,16 @@ class operation template static inline bool -is(const jlm::rvsdg::operation & operation) noexcept +is(const Operation & operation) noexcept { static_assert( - std::is_base_of::value, + std::is_base_of::value, "Template parameter T must be derived from jlm::rvsdg::operation."); return dynamic_cast(&operation) != nullptr; } -/* simple operation */ - -class SimpleOperation : public operation +class SimpleOperation : public Operation { public: ~SimpleOperation() noexcept override; @@ -93,13 +91,11 @@ class SimpleOperation : public operation std::vector> results_; }; -/* structural operation */ - -class StructuralOperation : public operation +class StructuralOperation : public Operation { public: virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; static jlm::rvsdg::structural_normal_form * normal_form(Graph * graph) noexcept; diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 249ec3dc1..b305933ae 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -9,7 +9,7 @@ static jlm::rvsdg::Node * node_cse( jlm::rvsdg::Region * region, - const jlm::rvsdg::operation & op, + const jlm::rvsdg::Operation & op, const std::vector & arguments) { auto cse_test = [&](const jlm::rvsdg::Node * node) diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 3967fb89f..83e27ed92 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -16,7 +16,7 @@ mux_op::~mux_op() noexcept {} bool -mux_op::operator==(const operation & other) const noexcept +mux_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->narguments() == narguments() && op->nresults() == nresults() @@ -29,10 +29,10 @@ mux_op::debug_string() const return "STATEMUX"; } -std::unique_ptr +std::unique_ptr mux_op::copy() const { - return std::unique_ptr(new mux_op(*this)); + return std::make_unique(*this); } /* mux normal form */ diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index d0c39fb0e..4b688f0c8 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -70,12 +70,12 @@ class mux_op final : public SimpleOperation {} virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static jlm::rvsdg::mux_normal_form * @@ -86,7 +86,7 @@ class mux_op final : public SimpleOperation }; static inline bool -is_mux_op(const jlm::rvsdg::operation & op) +is_mux_op(const Operation & op) { return dynamic_cast(&op) != nullptr; } diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 621054abe..7cc13f345 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -10,8 +10,6 @@ namespace jlm::rvsdg { -/* theta operation */ - ThetaOperation::~ThetaOperation() noexcept = default; std::string @@ -20,10 +18,10 @@ ThetaOperation::debug_string() const return "THETA"; } -std::unique_ptr +std::unique_ptr ThetaOperation::copy() const { - return std::unique_ptr(new ThetaOperation(*this)); + return std::make_unique(*this); } ThetaNode::ThetaNode(rvsdg::Region & parent) @@ -116,7 +114,7 @@ ThetaNode::add_loopvar(jlm::rvsdg::output * origin) ThetaNode * ThetaNode::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { - auto nf = graph()->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph()->node_normal_form(typeid(Operation)); nf->set_mutable(false); rvsdg::SubstitutionMap rmap; diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 1acac7be1..4265e052f 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -23,7 +23,7 @@ class ThetaOperation final : public StructuralOperation virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; }; diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 1df50e903..0b4bc4ded 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -19,7 +19,7 @@ StoreTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -73,7 +73,7 @@ StoreTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -135,7 +135,7 @@ LoadTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath("LoadTest1.c"), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -173,7 +173,7 @@ LoadTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -241,7 +241,7 @@ LoadFromUndefTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); Lambda_ = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); @@ -272,7 +272,7 @@ GetElementPtrTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto & declaration = module->AddStructTypeDeclaration(StructType::Declaration::Create( @@ -336,7 +336,7 @@ BitCastTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -364,7 +364,7 @@ Bits2PtrTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto setupBit2PtrFunction = [&]() @@ -444,7 +444,7 @@ ConstantPointerNullTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -478,7 +478,7 @@ CallTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupF = [&]() @@ -634,7 +634,7 @@ CallTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupCreate = [&]() @@ -772,7 +772,7 @@ IndirectCallTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupConstantFunction = [&](ssize_t n, const std::string & name) @@ -881,7 +881,7 @@ IndirectCallTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupG1 = [&]() @@ -1127,7 +1127,7 @@ ExternalCallTest1::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -1209,7 +1209,7 @@ ExternalCallTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -1326,7 +1326,7 @@ GammaTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -1374,7 +1374,7 @@ GammaTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupLambdaF = [&]() @@ -1568,7 +1568,7 @@ ThetaTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -1620,7 +1620,7 @@ DeltaTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupGlobalF = [&]() @@ -1715,7 +1715,7 @@ DeltaTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupD1 = [&]() @@ -1825,7 +1825,7 @@ DeltaTest3::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupG1 = [&]() @@ -1934,7 +1934,7 @@ ImportTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupF1 = [&](jlm::rvsdg::output * d1) @@ -2023,7 +2023,7 @@ PhiTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pbit64 = PointerType::Create(); @@ -2210,7 +2210,7 @@ PhiTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupEight = [&]() @@ -2542,7 +2542,7 @@ PhiWithDeltaTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -2593,7 +2593,7 @@ ExternalMemoryTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); /** @@ -2624,7 +2624,7 @@ EscapedMemoryTest1::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupDeltaA = [&]() @@ -2752,7 +2752,7 @@ EscapedMemoryTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -2929,7 +2929,7 @@ EscapedMemoryTest3::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3027,7 +3027,7 @@ MemcpyTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto arrayType = arraytype::Create(jlm::rvsdg::bittype::Create(32), 5); @@ -3178,7 +3178,7 @@ MemcpyTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3280,7 +3280,7 @@ MemcpyTest3::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3338,7 +3338,7 @@ LinkedListTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3429,7 +3429,7 @@ AllMemoryNodesTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); // Create imported symbol "imported" @@ -3526,7 +3526,7 @@ NAllocaNodesTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); Function_ = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); @@ -3571,7 +3571,7 @@ EscapingLocalFunctionTest::SetupRvsdg() auto module = RvsdgModule::Create(util::filepath(""), "", ""); const auto graph = &module->Rvsdg(); - graph->node_normal_form(typeid(rvsdg::operation))->set_mutable(false); + graph->node_normal_form(typeid(rvsdg::Operation))->set_mutable(false); Global_ = delta::node::Create( graph->root(), @@ -3639,7 +3639,7 @@ FreeNullTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); LambdaMain_ = @@ -3667,7 +3667,7 @@ LambdaCallArgumentMismatch::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - rvsdg.node_normal_form(typeid(rvsdg::operation))->set_mutable(false); + rvsdg.node_normal_form(typeid(rvsdg::Operation))->set_mutable(false); auto setupLambdaG = [&]() { @@ -3750,7 +3750,7 @@ VariadicFunctionTest1::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - rvsdg.node_normal_form(typeid(rvsdg::operation))->set_mutable(false); + rvsdg.node_normal_form(typeid(rvsdg::Operation))->set_mutable(false); auto pointerType = PointerType::Create(); auto iOStateType = iostatetype::Create(); @@ -3835,7 +3835,7 @@ VariadicFunctionTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index 8b90435f3..415bc23fe 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -22,7 +22,7 @@ TestTraceArgument() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -82,7 +82,7 @@ TestLoad() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -164,7 +164,7 @@ TestLoadStore() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -251,7 +251,7 @@ TestThetaLoad() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 23e8a7fad..d739580fb 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -22,7 +22,7 @@ TestFork() auto ft = FunctionType::Create({ b32, b32, b32 }, { b32, b32, b32 }); RvsdgModule rm(util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::operation)); + auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); @@ -90,7 +90,7 @@ TestConstantFork() auto ft = FunctionType::Create({ b32 }, { b32 }); RvsdgModule rm(util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::operation)); + auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index 8b319291e..993b99433 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -21,7 +21,7 @@ TestWithMatch() auto ft = FunctionType::Create({ jlm::rvsdg::bittype::Create(1), vt, vt }, { vt }); RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); /* Setup graph */ @@ -58,7 +58,7 @@ TestWithoutMatch() auto ft = FunctionType::Create({ jlm::rvsdg::ControlType::Create(2), vt, vt }, { vt }); RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); /* Setup graph */ diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index 201631fb2..2f1432e8f 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -20,7 +20,7 @@ TestUnknownBoundaries() auto ft = FunctionType::Create({ b32, b32, b32 }, { b32, b32, b32 }); RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp index d64edd77a..36122faca 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp @@ -39,7 +39,7 @@ test() { rvsdg::bittype::Create(8) }); jlm::llvm::RvsdgModule rm(util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::operation)); + auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::Operation)); nf->set_mutable(false); /* setup graph */ diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp index 80a862e38..0bb4425dd 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -29,7 +29,7 @@ GammaWithMatch() FunctionType::Create({ jlm::rvsdg::bittype::Create(1), valueType, valueType }, { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); - auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambdaNode = lambda::node::create( @@ -84,7 +84,7 @@ GammaWithoutMatch() { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); - auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambdaNode = lambda::node::create( @@ -139,7 +139,7 @@ EmptyGammaWithThreeSubregions() { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); - auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambdaNode = lambda::node::create( diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index b6aa84eb7..6b1fa3a1a 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -112,7 +112,7 @@ TestCallTypeClassifierIndirectCall() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto SetupFunction = [&]() @@ -164,7 +164,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto vt = jlm::tests::valuetype::Create(); @@ -260,7 +260,7 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto vt = jlm::tests::valuetype::Create(); @@ -374,7 +374,7 @@ TestCallTypeClassifierRecursiveDirectCall() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto SetupFib = [&]() diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index 05e83b0f3..6a2195bc0 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -455,7 +455,7 @@ TestCallSummaryComputationFunctionPointerInDelta() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto valueType = jlm::tests::valuetype::Create(); @@ -493,7 +493,7 @@ TestCallSummaryComputationLambdaResult() // Arrange jlm::rvsdg::Graph rvsdg; - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index a00c8e1d1..ad8e4e3d3 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -28,7 +28,7 @@ test_simple() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); @@ -73,7 +73,7 @@ test_gamma() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -139,7 +139,7 @@ test_theta() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -192,7 +192,7 @@ test_theta2() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -236,7 +236,7 @@ test_theta3() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -295,7 +295,7 @@ test_theta4() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -348,7 +348,7 @@ test_theta5() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -395,7 +395,7 @@ test_lambda() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); @@ -430,7 +430,7 @@ test_phi() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 5f50451fe..2bbb9b6dc 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -93,7 +93,7 @@ test_unrollinfo() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto init0 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); @@ -147,7 +147,7 @@ test_known_boundaries() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); @@ -167,7 +167,7 @@ test_known_boundaries() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); @@ -187,7 +187,7 @@ test_known_boundaries() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); @@ -208,7 +208,7 @@ test_known_boundaries() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 100); @@ -292,7 +292,7 @@ test_nested_theta() jlm::llvm::RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); diff --git a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp index e30f32320..7e6e7307a 100644 --- a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp +++ b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp @@ -21,7 +21,7 @@ TestUndef() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { // Create an undef operation diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index a5cc1507c..105bd8497 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -21,7 +21,7 @@ TestLambda() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -140,7 +140,7 @@ TestAddOperation() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -241,7 +241,7 @@ TestComZeroExt() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -387,7 +387,7 @@ TestMatch() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -494,7 +494,7 @@ TestGamma() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -613,7 +613,7 @@ TestTheta() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::operation)); + auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { // Create a theta operation diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 8589c4227..9d93fb77d 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -21,13 +21,11 @@ GraphExport::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) return GraphExport::Create(origin, Name()); } -/* unary operation */ - unary_op::~unary_op() noexcept {} bool -unary_op::operator==(const rvsdg::operation & other) const noexcept +unary_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -51,19 +49,17 @@ unary_op::debug_string() const return "UNARY_TEST_NODE"; } -std::unique_ptr +std::unique_ptr unary_op::copy() const { - return std::unique_ptr(new unary_op(*this)); + return std::make_unique(*this); } -/* binary operation */ - binary_op::~binary_op() noexcept {} bool -binary_op::operator==(const rvsdg::operation & other) const noexcept +binary_op::operator==(const Operation & other) const noexcept { auto op = dynamic_cast(&other); return op && op->argument(0) == argument(0) && op->result(0) == result(0); @@ -97,17 +93,17 @@ binary_op::debug_string() const return "BINARY_TEST_OP"; } -std::unique_ptr +std::unique_ptr binary_op::copy() const { - return std::unique_ptr(new binary_op(*this)); + return std::make_unique(*this); } test_op::~test_op() {} bool -test_op::operator==(const operation & o) const noexcept +test_op::operator==(const Operation & o) const noexcept { auto other = dynamic_cast(&o); if (!other) @@ -137,14 +133,12 @@ test_op::debug_string() const return "test_op"; } -std::unique_ptr +std::unique_ptr test_op::copy() const { - return std::unique_ptr(new test_op(*this)); + return std::make_unique(*this); } -/* structural operation */ - structural_op::~structural_op() noexcept {} @@ -154,10 +148,10 @@ structural_op::debug_string() const return "STRUCTURAL_TEST_NODE"; } -std::unique_ptr +std::unique_ptr structural_op::copy() const { - return std::unique_ptr(new structural_op(*this)); + return std::make_unique(*this); } structural_node::~structural_node() diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index 2849fd03b..f694ae623 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -80,7 +80,7 @@ class unary_op final : public rvsdg::unary_op {} virtual bool - operator==(const rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual rvsdg::unop_reduction_path_t can_reduce_operand(const rvsdg::output * operand) const noexcept override; @@ -91,7 +91,7 @@ class unary_op final : public rvsdg::unary_op virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static rvsdg::Node * @@ -119,7 +119,7 @@ class unary_op final : public rvsdg::unary_op }; static inline bool -is_unary_op(const rvsdg::operation & op) noexcept +is_unary_op(const rvsdg::Operation & op) noexcept { return dynamic_cast(&op); } @@ -146,7 +146,7 @@ class binary_op final : public rvsdg::binary_op {} virtual bool - operator==(const rvsdg::operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual rvsdg::binop_reduction_path_t can_reduce_operand_pair(const rvsdg::output * op1, const rvsdg::output * op2) @@ -162,7 +162,7 @@ class binary_op final : public rvsdg::binary_op virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static rvsdg::Node * @@ -201,7 +201,7 @@ class structural_op final : public rvsdg::StructuralOperation virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; }; @@ -374,12 +374,12 @@ class test_op final : public rvsdg::SimpleOperation test_op(const test_op &) = default; virtual bool - operator==(const operation & other) const noexcept override; + operator==(const Operation & other) const noexcept override; virtual std::string debug_string() const override; - virtual std::unique_ptr + [[nodiscard]] std::unique_ptr copy() const override; static rvsdg::simple_node * From 29d9755f06a3cba0a8515f6411d24a54bd74d716 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 6 Dec 2024 09:50:40 +0100 Subject: [PATCH 129/170] Fix duplicate state normalization for load nodes (#678) This PR does the following: 1. Splits up all load tests into their own unit tests 2. Fixes the duplicate state normalization for load nodes --- jlm/llvm/ir/operators/Load.cpp | 42 +++++----- tests/jlm/llvm/ir/operators/LoadTests.cpp | 95 +++++++++++++++-------- 2 files changed, 86 insertions(+), 51 deletions(-) diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 6f7ae6346..43f7eb556 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -7,6 +7,7 @@ #include #include #include +#include namespace jlm::llvm { @@ -286,8 +287,8 @@ is_load_store_state_reducible( static bool is_multiple_origin_reducible(const std::vector & operands) { - std::unordered_set states(std::next(operands.begin()), operands.end()); - return states.size() != operands.size() - 1; + const util::HashSet states(std::next(operands.begin()), operands.end()); + return states.Size() != operands.size() - 1; } // s2 = store_op a v1 s1 @@ -453,31 +454,32 @@ perform_multiple_origin_reduction( const LoadNonVolatileOperation & op, const std::vector & operands) { - std::vector new_loadstates; - std::unordered_set seen_state; - std::vector results(operands.size(), nullptr); + JLM_ASSERT(operands.size() > 1); + const auto address = operands[0]; + + std::vector newInputStates; + std::unordered_map stateIndexMap; for (size_t n = 1; n < operands.size(); n++) { auto state = operands[n]; - if (seen_state.find(state) != seen_state.end()) - results[n] = state; - else - new_loadstates.push_back(state); - - seen_state.insert(state); + if (stateIndexMap.find(state) == stateIndexMap.end()) + { + const size_t resultIndex = 1 + newInputStates.size(); // loaded value + states seen so far + newInputStates.push_back(state); + stateIndexMap[state] = resultIndex; + } } - auto ld = LoadNonVolatileNode::Create( - operands[0], - new_loadstates, - op.GetLoadedType(), - op.GetAlignment()); + const auto loadResults = + LoadNonVolatileNode::Create(address, newInputStates, op.GetLoadedType(), op.GetAlignment()); - results[0] = ld[0]; - for (size_t n = 1, s = 1; n < results.size(); n++) + std::vector results(operands.size(), nullptr); + results[0] = loadResults[0]; + for (size_t n = 1; n < operands.size(); n++) { - if (results[n] == nullptr) - results[n] = ld[s++]; + auto state = operands[n]; + JLM_ASSERT(stateIndexMap.find(state) != stateIndexMap.end()); + results[n] = loadResults[stateIndexMap[state]]; } return results; diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index 4f80f02e6..51c277ba8 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -46,7 +46,7 @@ JLM_UNIT_TEST_REGISTER( "jlm/llvm/ir/operators/LoadNonVolatileTests-OperationEquality", OperationEquality) -static void +static int TestCopy() { using namespace jlm::llvm; @@ -74,9 +74,13 @@ TestCopy() auto copiedLoadNode = dynamic_cast(copiedNode); assert(copiedLoadNode != nullptr); assert(loadNode->GetOperation() == copiedLoadNode->GetOperation()); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/operators/LoadNonVolatileTests-Copy", TestCopy) + +static int TestLoadAllocaReduction() { using namespace jlm::llvm; @@ -116,46 +120,73 @@ TestLoadAllocaReduction() assert(node->ninputs() == 3); assert(node->input(1)->origin() == alloca1[1]); assert(node->input(2)->origin() == mux[0]); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadAllocaReduction", + TestLoadAllocaReduction) + +static int TestMultipleOriginReduction() { using namespace jlm::llvm; // Arrange - auto mt = MemoryStateType::Create(); - auto vt = jlm::tests::valuetype::Create(); - auto pt = PointerType::Create(); + const auto memoryType = MemoryStateType::Create(); + const auto valueType = jlm::tests::valuetype::Create(); + const auto pointerType = PointerType::Create(); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); + const auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_multiple_origin_reducible(false); - auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); - auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); + const auto a = &jlm::tests::GraphImport::Create(graph, pointerType, "a"); + auto s1 = &jlm::tests::GraphImport::Create(graph, memoryType, "s1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, memoryType, "s2"); + auto s3 = &jlm::tests::GraphImport::Create(graph, memoryType, "s3"); - auto load = LoadNonVolatileNode::Create(a, { s, s, s, s }, vt, 4)[0]; + const auto loadResults = LoadNonVolatileNode::Create(a, { s1, s2, s1, s2, s3 }, valueType, 4); - auto & ex = GraphExport::Create(*load, "l"); + auto & exA = GraphExport::Create(*loadResults[0], "exA"); + auto & exS1 = GraphExport::Create(*loadResults[1], "exS1"); + auto & exS2 = GraphExport::Create(*loadResults[2], "exS2"); + auto & exS3 = GraphExport::Create(*loadResults[3], "exS3"); + auto & exS4 = GraphExport::Create(*loadResults[4], "exS4"); + auto & exS5 = GraphExport::Create(*loadResults[5], "exS5"); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); // Act nf->set_mutable(true); nf->set_multiple_origin_reducible(true); graph.normalize(); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::output::GetNode(*ex.origin()); + const auto node = jlm::rvsdg::output::GetNode(*exA.origin()); assert(is(node)); - assert(node->ninputs() == 2); + assert(node->ninputs() == 4); // 1 address + 3 states + assert(node->noutputs() == 4); // 1 loaded value + 3 states + + assert(exA.origin() == node->output(0)); + assert(exS1.origin() == node->output(1)); + assert(exS2.origin() == node->output(2)); + assert(exS3.origin() == node->output(1)); + assert(exS4.origin() == node->output(2)); + assert(exS5.origin() == node->output(3)); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-MultipleOriginReduction", + TestMultipleOriginReduction) + +static int TestLoadStoreStateReduction() { using namespace jlm::llvm; @@ -199,9 +230,15 @@ TestLoadStoreStateReduction() node = jlm::rvsdg::output::GetNode(*ex2.origin()); assert(is(node)); assert(node->ninputs() == 2); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadStoreStateReduction", + TestLoadStoreStateReduction) + +static int TestLoadStoreReduction() { using namespace jlm::llvm; @@ -239,9 +276,15 @@ TestLoadStoreReduction() assert(graph.root()->nnodes() == 1); assert(x1.origin() == v); assert(x2.origin() == s1); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadStoreReduction", + TestLoadStoreReduction) + +static int TestLoadLoadReduction() { using namespace jlm::llvm; @@ -298,23 +341,13 @@ TestLoadLoadReduction() assert(is(mx2) && mx2->ninputs() == 2); assert(mx2->input(0)->origin() == ld2[1] || mx2->input(0)->origin() == ld->output(3)); assert(mx2->input(1)->origin() == ld2[1] || mx2->input(1)->origin() == ld->output(3)); -} - -static int -TestLoad() -{ - TestCopy(); - - TestLoadAllocaReduction(); - TestMultipleOriginReduction(); - TestLoadStoreStateReduction(); - TestLoadStoreReduction(); - TestLoadLoadReduction(); return 0; } -JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/operators/LoadNonVolatileTests", TestLoad) +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadLoadReduction", + TestLoadLoadReduction) static int LoadVolatileOperationEquality() From bcd76d06c3bd9619343cb090cca5207a72168653 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Thu, 12 Dec 2024 09:15:34 +0100 Subject: [PATCH 130/170] Make small fixes to scripts + update commit hashes of CIRCT and hls-test-suite (#680) The updated hashes give us a smaller CIRCT build, and cleaner output from the hls-test-suite, plus disables the problem test `test_memory_5`. The other fixes include: - avoid crashing if `lit` is not found immediately (it can be supplied using the option instead) - avoid looping indefinitely when seeing unknown options - make the scripts a little more consistent --- .github/actions/BuildCirct/action.yml | 3 +-- README.md | 3 ++- scripts/build-circt.sh | 14 ++++++++++---- scripts/build-mlir.sh | 2 +- scripts/run-hls-test.sh | 6 +++--- scripts/run-llvm-test-suite.sh | 2 +- scripts/run-polybench.sh | 2 +- 7 files changed, 19 insertions(+), 13 deletions(-) diff --git a/.github/actions/BuildCirct/action.yml b/.github/actions/BuildCirct/action.yml index 07e280351..7fd2ac62b 100644 --- a/.github/actions/BuildCirct/action.yml +++ b/.github/actions/BuildCirct/action.yml @@ -30,8 +30,7 @@ runs: run: | ./scripts/build-circt.sh \ --build-path ${{ github.workspace }}/build-circt \ - --install-path ${{ github.workspace }}/build-circt/circt \ - --llvm-lit-path ~/.local/bin/lit + --install-path ${{ github.workspace }}/build-circt/circt shell: bash - name: "Save CIRCT to the cache" diff --git a/README.md b/README.md index bcf3c3d40..e2ce5843b 100644 --- a/README.md +++ b/README.md @@ -6,6 +6,7 @@ Regionalized Value State Dependence Graph (RVSDG) as intermediate representation ## Dependencies * Clang/LLVM 18 * Doxygen 1.9.1 +* `lit` 18 ### HLS dependencies * MLIR 18 @@ -24,7 +25,7 @@ Regionalized Value State Dependence Graph (RVSDG) as intermediate representation make all ``` -This presumes that the reight version of llvm-config can be found in $PATH. +This presumes that the right version of llvm-config can be found in $PATH. If that is not the case, you may need to explicitly configure it: ``` diff --git a/scripts/build-circt.sh b/scripts/build-circt.sh index 8fc1ad0f7..f5f3d419d 100755 --- a/scripts/build-circt.sh +++ b/scripts/build-circt.sh @@ -1,14 +1,15 @@ #!/bin/bash set -eu -GIT_COMMIT=2dc8240d91a0f993d616b152aa4d7520156862fe +GIT_REPOSITORY=https://github.com/EECS-NTNU/circt.git +GIT_COMMIT=c3c436b321db83dfabc9065e552a5da2f4694faa # Get the absolute path to this script and set default build and install paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" JLM_ROOT_DIR="$(realpath "${SCRIPT_DIR}/..")" CIRCT_BUILD=${JLM_ROOT_DIR}/build-circt CIRCT_INSTALL=${JLM_ROOT_DIR}/usr -LLVM_LIT_PATH=`which lit` +LLVM_LIT_PATH=`command -v lit || true` LLVM_VERSION=18 LLVM_CONFIG_BIN=llvm-config-${LLVM_VERSION} @@ -60,13 +61,18 @@ while [[ "$#" -ge 1 ]] ; do commit >&1 exit 0 ;; - --help) + --help|*) usage >&2 exit 1 ;; esac done +if [ -z "$LLVM_LIT_PATH" ]; then + echo "error: --llvm-lit-path could not be found automatically" >&2 + exit 1 +fi + LLVM_BINDIR=$(${LLVM_CONFIG_BIN} --bindir) LLVM_CMAKEDIR=$(${LLVM_CONFIG_BIN} --cmakedir) @@ -75,7 +81,7 @@ CIRCT_BUILD_DIR=${CIRCT_BUILD}/build if [ ! -d "$CIRCT_GIT_DIR" ] ; then - git clone https://github.com/EECS-NTNU/circt.git ${CIRCT_GIT_DIR} + git clone ${GIT_REPOSITORY} ${CIRCT_GIT_DIR} fi git -C ${CIRCT_GIT_DIR} checkout ${GIT_COMMIT} cmake -G Ninja \ diff --git a/scripts/build-mlir.sh b/scripts/build-mlir.sh index 274fc033a..ae6f318b1 100755 --- a/scripts/build-mlir.sh +++ b/scripts/build-mlir.sh @@ -52,7 +52,7 @@ while [[ "$#" -ge 1 ]] ; do commit >&1 exit 0 ;; - --help) + --help|*) usage >&2 exit 1 ;; diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index 6ec1ba514..6d4587c0d 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,11 +3,11 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=99d309be2a9aa8d565c2ece493dc33a447ad166d +GIT_COMMIT=c81fc559afa3cca66efc908b0a932d81f9c90d49 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" -JLM_ROOT_DIR=${SCRIPT_DIR}/.. +JLM_ROOT_DIR="$(realpath "${SCRIPT_DIR}/..")" JLM_BIN_DIR=${JLM_ROOT_DIR}/build # Set default path for where the benchmark will be cloned and make target for running it @@ -50,7 +50,7 @@ while [[ "$#" -ge 1 ]] ; do commit >&2 exit 1 ;; - --help) + --help|*) usage >&2 exit 1 ;; diff --git a/scripts/run-llvm-test-suite.sh b/scripts/run-llvm-test-suite.sh index 082277e02..8922cd9e4 100755 --- a/scripts/run-llvm-test-suite.sh +++ b/scripts/run-llvm-test-suite.sh @@ -47,7 +47,7 @@ while [[ "$#" -ge 1 ]] ; do commit >&2 exit 1 ;; - --help) + --help|*) usage >&2 exit 1 ;; diff --git a/scripts/run-polybench.sh b/scripts/run-polybench.sh index 709770d09..1ef73f849 100755 --- a/scripts/run-polybench.sh +++ b/scripts/run-polybench.sh @@ -40,7 +40,7 @@ while [[ "$#" -ge 1 ]] ; do commit >&2 exit 1 ;; - --help) + --help|*) usage >&2 exit 1 ;; From 5c82eb176c6892c9a0528cb0593decd1e8e8be5d Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Sun, 15 Dec 2024 15:35:52 +0100 Subject: [PATCH 131/170] Fetch hls-test repo if already checked out (#684) If the git repository already existed, then updates of the run-hls-test.sh script could cause the git hash to not exist as the repo never got updated. --- scripts/run-hls-test.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index 6d4587c0d..ef821a447 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -68,6 +68,8 @@ fi if [ ! -d "$BENCHMARK_DIR" ] ; then git clone ${GIT_REPOSITORY} ${BENCHMARK_DIR} +else + git -C ${BENCHMARK_DIR} fetch origin fi export PATH=${JLM_BIN_DIR}:${PATH} From 5f0abe6d0d2b8cc0d93eb07ffd7a2e0d0c610761 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 16 Dec 2024 10:31:29 +0100 Subject: [PATCH 132/170] Add new normalization interface and convert load reductions to it (#683) This PR is the first in a series of PRs to remove the old node normalization interface that is intertwined in the RVSDG library. This PR does the following: 1. Adds a new and simple node normalization interface for optimizing the operands of a node. This enables to create individual instances of normalizations and therefore pass them around, and/or configure optimization by simply providing these instances instead of utilizing the flags of the old interface. 2. Converts the load reductions to this new interface 3. Utilizes these new reductions in load tests --- jlm/llvm/ir/operators/Load.cpp | 66 ++++++++++ jlm/llvm/ir/operators/Load.hpp | 130 ++++++++++++++++++++ jlm/rvsdg/Makefile.sub | 1 + jlm/rvsdg/NodeNormalization.hpp | 63 ++++++++++ tests/jlm/llvm/ir/operators/LoadTests.cpp | 139 +++++++++++++++------- 5 files changed, 355 insertions(+), 44 deletions(-) create mode 100644 jlm/rvsdg/NodeNormalization.hpp diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 43f7eb556..aba6b444a 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -670,6 +670,72 @@ load_normal_form::normalized_create( return simple_normal_form::normalized_create(region, op, operands); } +std::optional> +NormalizeLoadMux( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_load_mux_reducible(operands)) + return perform_load_mux_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeLoadStore( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_load_store_reducible(operation, operands)) + return perform_load_store_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeLoadAlloca( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_load_alloca_reducible(operands)) + return perform_load_alloca_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeLoadStoreState( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_load_store_state_reducible(operation, operands)) + return perform_load_store_state_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeLoadDuplicateState( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_multiple_origin_reducible(operands)) + return perform_multiple_origin_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeLoadLoadState( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_load_load_state_reducible(operands)) + return perform_load_load_state_reduction(operation, operands); + + return std::nullopt; +} + } namespace diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index c83600b29..129e4fde1 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -12,6 +12,8 @@ #include #include +#include + namespace jlm::llvm { @@ -575,6 +577,134 @@ class LoadNonVolatileNode final : public LoadNode } }; +/** + * \brief Swaps a memory state merge operation and a load operation. + * + * sx1 = MemStateMerge si1 ... siM + * v sl1 = load_op a sx1 + * => + * v sl1 ... slM = load_op a si1 ... siM + * sx1 = MemStateMerge sl1 ... slM + * + * FIXME: The reduction can be generalized: A load node can have multiple operands from different + * merge nodes. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeLoadMux( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief If the producer of a load's address is an alloca operation, then we can remove all + * state edges originating from other alloca operations. + * + * a1 s1 = alloca_op ... + * a2 s2 = alloca_op ... + * s3 = mux_op s1 + * v sl1 sl2 sl3 = load_op a1 s1 s2 s3 + * => + * ... + * v sl1 sl3 = load_op a1 s1 s3 + * + * @param operation The load operation on which the transformation is performed. + * @param operands The operands of the load node. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeLoadAlloca( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief Forwards the value from a store operation. + * + * s2 = store_op a v1 s1 + * v2 s3 = load_op a s2 + * ... = any_op v2 + * => + * s2 = store_op a v1 s1 + * ... = any_op v1 + * + * @param operation The load operation on which the transformation is performed. + * @param operands The operands of the load node. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeLoadStore( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief If the producer of a load's address is an alloca operation, then we can remove all + * state edges originating from other alloca operations coming through store operations. + * + * a1 sa1 = alloca_op ... + * a2 sa2 = alloca_op ... + * ss1 = store_op a1 ... sa1 + * ss2 = store_op a2 ... sa2 + * ... = load_op a1 ss1 ss2 + * => + * ... + * ... = load_op a1 ss1 + * + * @param operation The load operation on which the transformation is performed. + * @param operands The operands of the load node. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeLoadStoreState( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief Remove duplicated state operands + * + * v so1 so2 so3 = load_op a si1 si1 si1 + * => + * v so1 = load_op a si1 + * + * @param operation The load operation on which the transformation is performed. + * @param operands The operands of the load node. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeLoadDuplicateState( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief Avoid sequentialization of load operations. + * + * _ so1 = load_op _ si1 + * _ so2 = load_op _ so1 + * _ so3 = load_op _ so2 + * => + * _ so1 = load_op _ si1 + * _ so2 = load_op _ si1 + * _ so3 = load_op _ si1 + * + * @param operation The load operation on which the transformation is performed. + * @param operands The operands of the load node. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeLoadLoadState( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + } #endif diff --git a/jlm/rvsdg/Makefile.sub b/jlm/rvsdg/Makefile.sub index 5d3fae3a1..697211747 100644 --- a/jlm/rvsdg/Makefile.sub +++ b/jlm/rvsdg/Makefile.sub @@ -61,6 +61,7 @@ librvsdg_HEADERS = \ jlm/rvsdg/bitstring.hpp \ jlm/rvsdg/node.hpp \ jlm/rvsdg/node-normal-form.hpp \ + jlm/rvsdg/NodeNormalization.hpp \ jlm/rvsdg/nullary.hpp \ jlm/rvsdg/structural-node.hpp \ jlm/rvsdg/control.hpp \ diff --git a/jlm/rvsdg/NodeNormalization.hpp b/jlm/rvsdg/NodeNormalization.hpp new file mode 100644 index 000000000..f4ce375e5 --- /dev/null +++ b/jlm/rvsdg/NodeNormalization.hpp @@ -0,0 +1,63 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_RVSDG_NODENORMALIZATION_HPP +#define JLM_RVSDG_NODENORMALIZATION_HPP + +#include +#include +#include + +#include +#include +#include + +namespace jlm::rvsdg +{ + +class output; + +template +using NodeNormalization = std::function< + std::optional>(const TOperation &, const std::vector &)>; + +template +std::optional> +NormalizeSequence( + const std::vector> & nodeNormalizations, + const TOperation & operation, + const std::vector & operands) +{ + for (auto & nodeNormalization : nodeNormalizations) + { + if (auto results = nodeNormalization(operation, operands)) + { + return results; + } + } + + return std::nullopt; +} + +template +bool +ReduceNode(const NodeNormalization & nodeNormalization, Node & node) +{ + auto operation = util::AssertedCast(&node.GetOperation()); + auto operands = rvsdg::operands(&node); + + if (auto results = nodeNormalization(*operation, operands)) + { + divert_users(&node, *results); + remove(&node); + return true; + } + + return false; +} + +} + +#endif diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index 51c277ba8..d932d32f9 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -7,6 +7,7 @@ #include #include +#include #include #include @@ -99,20 +100,18 @@ TestLoadAllocaReduction() auto alloca1 = alloca_op::create(bt, size, 4); auto alloca2 = alloca_op::create(bt, size, 4); auto mux = jlm::rvsdg::create_state_mux(mt, { alloca1[1] }, 1); - auto value = - LoadNonVolatileNode::Create(alloca1[0], { alloca1[1], alloca2[1], mux[0] }, bt, 4)[0]; + auto & loadNode = + LoadNonVolatileNode::CreateNode(*alloca1[0], { alloca1[1], alloca2[1], mux[0] }, bt, 4); - auto & ex = GraphExport::Create(*value, "l"); + auto & ex = GraphExport::Create(*loadNode.output(0), "l"); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Act - nf->set_mutable(true); - nf->set_load_alloca_reducible(true); - graph.normalize(); + jlm::rvsdg::ReduceNode(NormalizeLoadAlloca, loadNode); graph.prune(); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Assert auto node = jlm::rvsdg::output::GetNode(*ex.origin()); @@ -129,7 +128,57 @@ JLM_UNIT_TEST_REGISTER( TestLoadAllocaReduction) static int -TestMultipleOriginReduction() +TestLoadMuxReduction() +{ + using namespace jlm::llvm; + + // Arrange + auto memoryStateType = MemoryStateType::Create(); + auto pointerType = PointerType::Create(); + auto bitstringType = jlm::rvsdg::bittype::Create(32); + + jlm::rvsdg::Graph graph; + auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_load_mux_reducible(false); + + auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); + auto s1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state2"); + auto s3 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state3"); + + auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); + auto & loadNode = LoadNonVolatileNode::CreateNode(*address, { mux }, bitstringType, 4); + + auto & ex = GraphExport::Create(*loadNode.output(0), "l"); + + jlm::rvsdg::view(graph.root(), stdout); + + // Act + auto success = jlm::rvsdg::ReduceNode(NormalizeLoadMux, loadNode); + graph.prune(); + + jlm::rvsdg::view(graph.root(), stdout); + + // Assert + assert(success); + auto node = jlm::rvsdg::output::GetNode(*ex.origin()); + assert(is(node)); + assert(node->ninputs() == 4); + assert(node->input(0)->origin() == address); + assert(node->input(1)->origin() == s1); + assert(node->input(2)->origin() == s2); + assert(node->input(3)->origin() == s3); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadMuxReduction", + TestLoadMuxReduction) + +static int +TestDuplicateStateReduction() { using namespace jlm::llvm; @@ -148,25 +197,25 @@ TestMultipleOriginReduction() auto s2 = &jlm::tests::GraphImport::Create(graph, memoryType, "s2"); auto s3 = &jlm::tests::GraphImport::Create(graph, memoryType, "s3"); - const auto loadResults = LoadNonVolatileNode::Create(a, { s1, s2, s1, s2, s3 }, valueType, 4); + auto & loadNode = LoadNonVolatileNode::CreateNode(*a, { s1, s2, s1, s2, s3 }, valueType, 4); - auto & exA = GraphExport::Create(*loadResults[0], "exA"); - auto & exS1 = GraphExport::Create(*loadResults[1], "exS1"); - auto & exS2 = GraphExport::Create(*loadResults[2], "exS2"); - auto & exS3 = GraphExport::Create(*loadResults[3], "exS3"); - auto & exS4 = GraphExport::Create(*loadResults[4], "exS4"); - auto & exS5 = GraphExport::Create(*loadResults[5], "exS5"); + auto & exA = GraphExport::Create(*loadNode.output(0), "exA"); + auto & exS1 = GraphExport::Create(*loadNode.output(1), "exS1"); + auto & exS2 = GraphExport::Create(*loadNode.output(2), "exS2"); + auto & exS3 = GraphExport::Create(*loadNode.output(3), "exS3"); + auto & exS4 = GraphExport::Create(*loadNode.output(4), "exS4"); + auto & exS5 = GraphExport::Create(*loadNode.output(5), "exS5"); view(graph.root(), stdout); // Act - nf->set_mutable(true); - nf->set_multiple_origin_reducible(true); - graph.normalize(); + auto success = + jlm::rvsdg::ReduceNode(NormalizeLoadDuplicateState, loadNode); view(graph.root(), stdout); // Assert + assert(success); const auto node = jlm::rvsdg::output::GetNode(*exA.origin()); assert(is(node)); assert(node->ninputs() == 4); // 1 address + 3 states @@ -183,8 +232,8 @@ TestMultipleOriginReduction() } JLM_UNIT_TEST_REGISTER( - "jlm/llvm/ir/operators/LoadNonVolatileTests-MultipleOriginReduction", - TestMultipleOriginReduction) + "jlm/llvm/ir/operators/LoadNonVolatileTests-DuplicateStateReduction", + TestDuplicateStateReduction) static int TestLoadStoreStateReduction() @@ -206,27 +255,30 @@ TestLoadStoreStateReduction() auto store1 = StoreNonVolatileNode::Create(alloca1[0], size, { alloca1[1] }, 4); auto store2 = StoreNonVolatileNode::Create(alloca2[0], size, { alloca2[1] }, 4); - auto value1 = LoadNonVolatileNode::Create(alloca1[0], { store1[0], store2[0] }, bt, 4)[0]; - auto value2 = LoadNonVolatileNode::Create(alloca1[0], { store1[0] }, bt, 8)[0]; + auto & loadNode1 = LoadNonVolatileNode::CreateNode(*alloca1[0], { store1[0], store2[0] }, bt, 4); + auto & loadNode2 = LoadNonVolatileNode::CreateNode(*alloca1[0], { store1[0] }, bt, 8); - auto & ex1 = GraphExport::Create(*value1, "l1"); - auto & ex2 = GraphExport::Create(*value2, "l2"); + auto & ex1 = GraphExport::Create(*loadNode1.output(0), "l1"); + auto & ex2 = GraphExport::Create(*loadNode2.output(0), "l2"); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Act - nf->set_mutable(true); - nf->set_load_store_state_reducible(true); - graph.normalize(); + auto success1 = + jlm::rvsdg::ReduceNode(NormalizeLoadStoreState, loadNode1); + auto success2 = + jlm::rvsdg::ReduceNode(NormalizeLoadStoreState, loadNode2); graph.prune(); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Assert + assert(success1); auto node = jlm::rvsdg::output::GetNode(*ex1.origin()); assert(is(node)); assert(node->ninputs() == 2); + assert(success2 == false); node = jlm::rvsdg::output::GetNode(*ex2.origin()); assert(is(node)); assert(node->ninputs() == 2); @@ -258,21 +310,21 @@ TestLoadStoreReduction() auto s = &jlm::tests::GraphImport::Create(graph, mt, "state"); auto s1 = StoreNonVolatileNode::Create(a, v, { s }, 4)[0]; - auto load = LoadNonVolatileNode::Create(a, { s1 }, vt, 4); + auto & loadNode = LoadNonVolatileNode::CreateNode(*a, { s1 }, vt, 4); - auto & x1 = GraphExport::Create(*load[0], "value"); - auto & x2 = GraphExport::Create(*load[1], "state"); + auto & x1 = GraphExport::Create(*loadNode.output(0), "value"); + auto & x2 = GraphExport::Create(*loadNode.output(1), "state"); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Act - nf->set_mutable(true); - nf->set_load_store_reducible(true); + auto success = jlm::rvsdg::ReduceNode(NormalizeLoadStore, loadNode); graph.normalize(); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Assert + assert(success); assert(graph.root()->nnodes() == 1); assert(x1.origin() == v); assert(x2.origin() == s1); @@ -310,23 +362,22 @@ TestLoadLoadReduction() auto ld1 = LoadNonVolatileNode::Create(a2, { s1 }, vt, 4); auto ld2 = LoadNonVolatileNode::Create(a3, { s2 }, vt, 4); - auto ld3 = LoadNonVolatileNode::Create(a4, { st1[0], ld1[1], ld2[1] }, vt, 4); + auto & loadNode = LoadNonVolatileNode::CreateNode(*a4, { st1[0], ld1[1], ld2[1] }, vt, 4); - auto & x1 = GraphExport::Create(*ld3[1], "s"); - auto & x2 = GraphExport::Create(*ld3[2], "s"); - auto & x3 = GraphExport::Create(*ld3[3], "s"); + auto & x1 = GraphExport::Create(*loadNode.output(1), "s"); + auto & x2 = GraphExport::Create(*loadNode.output(2), "s"); + auto & x3 = GraphExport::Create(*loadNode.output(3), "s"); jlm::rvsdg::view(graph.root(), stdout); // Act - nf->set_mutable(true); - nf->set_load_load_state_reducible(true); - graph.normalize(); + auto success = jlm::rvsdg::ReduceNode(NormalizeLoadLoadState, loadNode); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); // Assert + assert(success); assert(graph.root()->nnodes() == 6); auto ld = jlm::rvsdg::output::GetNode(*x1.origin()); From e18cdd62854db24d1d346c0d23ab7b2e91380c7c Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Mon, 16 Dec 2024 17:54:33 +0100 Subject: [PATCH 133/170] Adds loop invariant optimization (#681) Inserts a hls_loop_const_buff for arguments that are constant. --- .../backend/rvsdg2rhls/ThetaConversion.cpp | 36 ++++++++++++++----- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 36 +++++++++++-------- 2 files changed, 49 insertions(+), 23 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index 18aa7e259..8786a08ca 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -18,16 +18,31 @@ ConvertThetaNode(rvsdg::ThetaNode & theta) auto loop = hls::loop_node::create(theta.region()); std::vector branches; - // add loopvars and populate the smap + // Add loop variables, insert loop constant buffers for invariant variables, and populate the + // smap. for (size_t i = 0; i < theta.ninputs(); i++) { - jlm::rvsdg::output * buffer; - loop->add_loopvar(theta.input(i)->origin(), &buffer); - smap.insert(theta.input(i)->argument(), buffer); - // buffer out is only used by branch - branches.push_back(*buffer->begin()); - // divert theta outputs - theta.output(i)->divert_users(loop->output(i)); + // Check if the input is a loop invariant such that a loop constant buffer should be created. + // Memory state inputs are not loop variables containting a value, so we ignor these. + if (is_invariant(theta.input(i)) + && !jlm::rvsdg::is(theta.input(i)->Type())) + { + smap.insert(theta.input(i)->argument(), loop->add_loopconst(theta.input(i)->origin())); + branches.push_back(nullptr); + // The HLS loop has no output for this input. The users of the theta output is + // therefore redirected to the input origin, as the value is loop invariant. + theta.output(i)->divert_users(theta.input(i)->origin()); + } + else + { + jlm::rvsdg::output * buffer; + loop->add_loopvar(theta.input(i)->origin(), &buffer); + smap.insert(theta.input(i)->argument(), buffer); + // buffer out is only used by branch + branches.push_back(*buffer->begin()); + // divert theta outputs + theta.output(i)->divert_users(loop->output(loop->noutputs() - 1)); + } } // copy contents of theta @@ -37,7 +52,10 @@ ConvertThetaNode(rvsdg::ThetaNode & theta) loop->set_predicate(smap.lookup(theta.predicate()->origin())); for (size_t i = 0; i < theta.ninputs(); i++) { - branches[i]->divert_to(smap.lookup(theta.input(i)->result()->origin())); + if (branches[i]) + { + branches[i]->divert_to(smap.lookup(theta.input(i)->result()->origin())); + } } remove(&theta); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index 2f1432e8f..32699ff96 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -10,10 +10,11 @@ #include #include -static inline void +static int TestUnknownBoundaries() { using namespace jlm::llvm; + using namespace jlm::hls; // Arrange auto b32 = jlm::rvsdg::bittype::Create(32); @@ -48,27 +49,34 @@ TestUnknownBoundaries() theta->set_predicate(match); auto f = lambda->finalize({ theta->output(0), theta->output(1), theta->output(2) }); - jlm::llvm::GraphExport::Create(*f, ""); + GraphExport::Create(*f, ""); jlm::rvsdg::view(rm.Rvsdg(), stdout); // Act - jlm::hls::ConvertThetaNodes(rm); + ConvertThetaNodes(rm); jlm::rvsdg::view(rm.Rvsdg(), stdout); // Assert - assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); - assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); - assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); - assert(jlm::rvsdg::Region::Contains(*lambda->subregion(), true)); -} - -static int -Test() -{ - TestUnknownBoundaries(); + auto lambdaRegion = lambda->subregion(); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + // Check that two constant buffers are created for the loop invariant variables + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(lambdaRegion->argument(0)->nusers() == 1); + auto loopInput = + jlm::util::AssertedCast(*lambdaRegion->argument(0)->begin()); + auto loopNode = jlm::util::AssertedCast(loopInput->node()); + auto loopConstInput = jlm::util::AssertedCast( + *loopNode->subregion()->argument(3)->begin()); + jlm::util::AssertedCast(&loopConstInput->node()->GetOperation()); + loopConstInput = jlm::util::AssertedCast( + *loopNode->subregion()->argument(4)->begin()); + jlm::util::AssertedCast(&loopConstInput->node()->GetOperation()); return 0; } -JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/TestTheta", Test) +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/TestTheta", TestUnknownBoundaries) From e1f5ec0148716775cfef1d9475ffde5432eeedea Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 18 Dec 2024 09:08:45 +0100 Subject: [PATCH 134/170] Add clang-tidy support to CI (#685) This PR adds support for clang-tidy to the CI. It does not enforce any checks yet, but only adds the necessary infrastructure and fixes the problems in the code such that clang-tidy finishes without errors. Relevant checks will be added in future PRs. Close #313 --- .clang-tidy | 8 +++++ .github/actions/InstallPackages/action.yml | 13 +++++++- .github/workflows/ClangTidy.yml | 32 +++++++++++++++++++ Makefile.rules | 6 ++++ jlm/llvm/backend/jlm2llvm/instruction.hpp | 3 ++ .../frontend/LlvmInstructionConversion.hpp | 2 ++ jlm/llvm/opt/InvariantValueRedirection.hpp | 4 +++ .../opt/alias-analyses/MemoryStateEncoder.hpp | 12 +++++++ jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 30 ++++++++++++++++- jlm/llvm/opt/unroll.hpp | 3 +- jlm/util/AnnotationMap.hpp | 1 + jlm/util/intrusive-hash.hpp | 1 + 12 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 .clang-tidy create mode 100644 .github/workflows/ClangTidy.yml diff --git a/.clang-tidy b/.clang-tidy new file mode 100644 index 000000000..ea3b01b35 --- /dev/null +++ b/.clang-tidy @@ -0,0 +1,8 @@ +--- +Checks: '-*, + modernize-deprecated-headers, + ' + +WarningsAsErrors: ' +# modernize-deprecated-headers, + ' \ No newline at end of file diff --git a/.github/actions/InstallPackages/action.yml b/.github/actions/InstallPackages/action.yml index abc62af62..f8dfd4d41 100644 --- a/.github/actions/InstallPackages/action.yml +++ b/.github/actions/InstallPackages/action.yml @@ -27,6 +27,11 @@ inputs: default: "false" required: false + install-clang-tidy: + description: "Install clang-tidy package. Default is 'false'." + default: "false" + required: false + install-ninja: description: "Install ninja package. Default is 'false'." default: "false" @@ -54,7 +59,8 @@ runs: if: ${{inputs.install-llvm == 'true' || inputs.install-clang == 'true' || inputs.install-mlir == 'true' - || inputs.install-clang-format == 'true'}} + || inputs.install-clang-format == 'true' + || inputs.install-clang-tidy == 'true'}} run: | export HAS_LLVM_REPOSITORY=$(find /etc/apt/ -name *.list | xargs cat | grep llvm-toolchain-jammy-${{inputs.llvm-version}}) if [[ -z $HAS_LLVM_REPOSITORY ]]; then @@ -95,6 +101,11 @@ runs: run: sudo apt-get install clang-format-${{inputs.llvm-version}} shell: bash + - name: "Install clang-tidy package" + if: ${{inputs.install-clang-tidy == 'true'}} + run: sudo apt-get install clang-tidy-${{inputs.llvm-version}} + shell: bash + - name: "Install ninja package" if: ${{inputs.install-ninja == 'true'}} run: sudo apt-get install ninja-build diff --git a/.github/workflows/ClangTidy.yml b/.github/workflows/ClangTidy.yml new file mode 100644 index 000000000..7cc9c73b9 --- /dev/null +++ b/.github/workflows/ClangTidy.yml @@ -0,0 +1,32 @@ +name: ClangTidy + +on: + pull_request: + branches: [ master ] + +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true + +jobs: + CheckTidy: + runs-on: ubuntu-22.04 + steps: + - uses: actions/checkout@v4 + + - name: "Install clang tidy" + uses: ./.github/actions/InstallPackages + with: + install-clang-tidy: true + + - name: "Install HLS dialect dependencies" + uses: ./.github/actions/BuildCirct + + - name: "Install MLIR dialect dependencies" + uses: ./.github/actions/BuildMlirDialect + + - name: "Configure jlm with HLS and MLIR enabled" + run: ./configure.sh --enable-mlir=${{ github.workspace }}/lib/mlir-rvsdg --enable-hls=${{ github.workspace }}/build-circt/circt + + - name: "Run clang tidy" + run: make tidy diff --git a/Makefile.rules b/Makefile.rules index 04c7572a7..0e8a8edff 100644 --- a/Makefile.rules +++ b/Makefile.rules @@ -183,3 +183,9 @@ format: format-dry-run: clang-format-$(LLVM_VERSION) --dry-run --Werror --style="file:.clang-format" --verbose -i $(SOURCES) $(HEADERS) + +################################################################################# +# Clang tidy rules + +tidy: $(COMMANDPATHSFILE) + clang-tidy-$(LLVM_VERSION) --config-file=.clang-tidy $(HEADERS) $(SOURCES) -- $(CXXFLAGS) $(CPPFLAGS) -I$(BUILD_OUT_PREFIX) diff --git a/jlm/llvm/backend/jlm2llvm/instruction.hpp b/jlm/llvm/backend/jlm2llvm/instruction.hpp index f95f6d366..746b2abfe 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.hpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.hpp @@ -6,6 +6,8 @@ #ifndef JLM_LLVM_BACKEND_JLM2LLVM_INSTRUCTION_HPP #define JLM_LLVM_BACKEND_JLM2LLVM_INSTRUCTION_HPP +#include + namespace llvm { @@ -16,6 +18,7 @@ class Constant; namespace jlm::llvm { +class cfg_node; class tac; namespace jlm2llvm diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.hpp b/jlm/llvm/frontend/LlvmInstructionConversion.hpp index 992d23acf..7bf79c8cb 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.hpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.hpp @@ -6,6 +6,8 @@ #ifndef JLM_LLVM_FRONTEND_LLVMINSTRUCTIONCONVERSION_HPP #define JLM_LLVM_FRONTEND_LLVMINSTRUCTIONCONVERSION_HPP +#include + namespace llvm { class Constant; diff --git a/jlm/llvm/opt/InvariantValueRedirection.hpp b/jlm/llvm/opt/InvariantValueRedirection.hpp index 79f0b3dc5..3942d69ac 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.hpp +++ b/jlm/llvm/opt/InvariantValueRedirection.hpp @@ -11,12 +11,16 @@ namespace jlm::rvsdg { class GammaNode; +class Graph; +class Region; +class StructuralNode; class ThetaNode; } namespace jlm::llvm { +class CallNode; class RvsdgModule; /** \brief Invariant Value Redirection Optimization diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index f3f2bff37..6d08583e0 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -7,6 +7,18 @@ #define JLM_LLVM_OPT_ALIAS_ANALYSES_MEMORYSTATEENCODER_HPP #include +#include + +namespace rvsdg +{ +class GammaNode; +class output; +class Region; +class simple_node; +class StructuralNode; +class ThetaNode; +class ThetaOutput; +} namespace jlm::util { diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index fcf197730..e0e4b5952 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -7,17 +7,44 @@ #define JLM_LLVM_OPT_ALIAS_ANALYSES_STEENSGAARD_HPP #include +#include #include namespace jlm::rvsdg { class GammaNode; +class Graph; +class output; +class Region; +class simple_node; +class StructuralNode; class ThetaNode; } -namespace jlm::llvm::aa +namespace jlm::llvm { +namespace delta +{ +class node; +} + +namespace lambda +{ +class node; +} + +namespace phi +{ +class node; +} + +class CallNode; +class LoadNode; +class StoreNode; + +namespace aa +{ class Location; class RegisterLocation; @@ -244,6 +271,7 @@ class Steensgaard final : public AliasAnalysis std::unique_ptr Context_; }; +} } #endif diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index bcb64e4ec..5550743ba 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -78,8 +78,7 @@ class unrollinfo final theta() const noexcept { auto node = idv()->region()->node(); - JLM_ASSERT(is(node)); - return static_cast(node); + return util::AssertedCast(node); } inline bool diff --git a/jlm/util/AnnotationMap.hpp b/jlm/util/AnnotationMap.hpp index f38fd7a74..b6eb9a9b4 100644 --- a/jlm/util/AnnotationMap.hpp +++ b/jlm/util/AnnotationMap.hpp @@ -13,6 +13,7 @@ #include #include #include +#include namespace jlm::util { diff --git a/jlm/util/intrusive-hash.hpp b/jlm/util/intrusive-hash.hpp index 1558c30c4..54d979fc9 100644 --- a/jlm/util/intrusive-hash.hpp +++ b/jlm/util/intrusive-hash.hpp @@ -9,6 +9,7 @@ #include #include +#include #include #include From 6b410ac9f999b1f24148f4616e81663376b19dbe Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 18 Dec 2024 14:59:37 +0100 Subject: [PATCH 135/170] Fix duplicate state normalization for store nodes (#686) This PR does the following: 1. Splits up all store tests into their own unit tests 2. Fixes the duplicate state normalization for store nodes --- jlm/llvm/ir/operators/Store.cpp | 68 ++++++++------- tests/jlm/llvm/ir/operators/StoreTests.cpp | 97 ++++++++++++++-------- 2 files changed, 98 insertions(+), 67 deletions(-) diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 781ef4257..d1cecd9ba 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -6,6 +6,7 @@ #include #include #include +#include namespace jlm::llvm { @@ -241,10 +242,8 @@ is_store_alloca_reducible(const std::vector & operands) static bool is_multiple_origin_reducible(const std::vector & operands) { - std::unordered_set states( - std::next(std::next(operands.begin())), - operands.end()); - return states.size() != operands.size() - 2; + const util::HashSet states(std::next(operands.begin(), 2), operands.end()); + return states.Size() != operands.size() - 2; } static std::vector @@ -296,17 +295,40 @@ perform_store_alloca_reduction( static std::vector perform_multiple_origin_reduction( - const StoreNonVolatileOperation & op, + const StoreNonVolatileOperation & operation, const std::vector & operands) { - std::unordered_set states( - std::next(std::next(operands.begin())), - operands.end()); - return StoreNonVolatileNode::Create( - operands[0], - operands[1], - { states.begin(), states.end() }, - op.GetAlignment()); + // FIXME: Unify with the duplicate state removal reduction of the LoadNonVolatile operation + + JLM_ASSERT(operands.size() > 2); + const auto address = operands[0]; + const auto value = operands[1]; + + std::vector newInputStates; + std::unordered_map stateIndexMap; + for (size_t n = 2; n < operands.size(); n++) + { + auto state = operands[n]; + if (stateIndexMap.find(state) == stateIndexMap.end()) + { + const size_t resultIndex = newInputStates.size(); + newInputStates.push_back(state); + stateIndexMap[state] = resultIndex; + } + } + + const auto storeResults = + StoreNonVolatileNode::Create(address, value, newInputStates, operation.GetAlignment()); + + std::vector results(operation.nresults(), nullptr); + for (size_t n = 2; n < operands.size(); n++) + { + auto state = operands[n]; + JLM_ASSERT(stateIndexMap.find(state) != stateIndexMap.end()); + results[n - 2] = storeResults[stateIndexMap[state]]; + } + + return results; } store_normal_form::~store_normal_form() @@ -363,24 +385,8 @@ store_normal_form::normalize_node(rvsdg::Node * node) const if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) { - auto outputs = perform_multiple_origin_reduction(*op, operands); - auto new_node = jlm::rvsdg::output::GetNode(*outputs[0]); - - std::unordered_map origin2output; - for (size_t n = 0; n < outputs.size(); n++) - { - auto origin = new_node->input(n + 2)->origin(); - JLM_ASSERT(origin2output.find(origin) == origin2output.end()); - origin2output[origin] = outputs[n]; - } - - for (size_t n = 2; n < node->ninputs(); n++) - { - auto origin = node->input(n)->origin(); - JLM_ASSERT(origin2output.find(origin) != origin2output.end()); - node->output(n - 2)->divert_users(origin2output[origin]); - } - remove(node); + divert_users(node, perform_multiple_origin_reduction(*op, operands)); + node->region()->remove_node(node); return false; } diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index aac44a41c..a09307c27 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -173,7 +173,7 @@ JLM_UNIT_TEST_REGISTER( "jlm/llvm/ir/operators/StoreTests-StoreVolatileNodeCopy", StoreVolatileNodeCopy) -static void +static int TestCopy() { using namespace jlm::llvm; @@ -202,9 +202,13 @@ TestCopy() auto copiedStoreNode = dynamic_cast(copiedNode); assert(copiedNode != nullptr); assert(storeNode->GetOperation() == copiedStoreNode->GetOperation()); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/operators/StoreTests-TestCopy", TestCopy) + +static int TestStoreMuxReduction() { using namespace jlm::llvm; @@ -251,48 +255,72 @@ TestStoreMuxReduction() assert(jlm::rvsdg::is(n0->GetOperation())); assert(jlm::rvsdg::is(n1->GetOperation())); assert(jlm::rvsdg::is(n2->GetOperation())); + + return 0; } -static void -TestMultipleOriginReduction() +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/StoreTests-TestStoreMuxReduction", + TestStoreMuxReduction) + +static int +TestDuplicateStateReduction() { using namespace jlm::llvm; // Arrange - auto vt = jlm::tests::valuetype::Create(); - auto pt = PointerType::Create(); - auto mt = MemoryStateType::Create(); + auto valueType = jlm::tests::valuetype::Create(); + auto pointerType = PointerType::Create(); + auto memoryStateType = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(StoreNonVolatileOperation)); - auto snf = static_cast(nf); - snf->set_mutable(false); - snf->set_multiple_origin_reducible(false); + const auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_multiple_origin_reducible(false); - auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); - auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); - auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); + auto a = &jlm::tests::GraphImport::Create(graph, pointerType, "a"); + auto v = &jlm::tests::GraphImport::Create(graph, valueType, "v"); + auto s1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "s1"); + auto s2 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "s2"); + auto s3 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "s3"); - auto states = StoreNonVolatileNode::Create(a, v, { s, s, s, s }, 4); + auto states = StoreNonVolatileNode::Create(a, v, { s1, s2, s1, s2, s3 }, 4); - auto & ex = GraphExport::Create(*states[0], "s"); + auto & exS1 = GraphExport::Create(*states[0], "exS1"); + auto & exS2 = GraphExport::Create(*states[1], "exS2"); + auto & exS3 = GraphExport::Create(*states[2], "exS3"); + auto & exS4 = GraphExport::Create(*states[3], "exS4"); + auto & exS5 = GraphExport::Create(*states[4], "exS5"); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); // Act - snf->set_mutable(true); - snf->set_multiple_origin_reducible(true); + nf->set_mutable(true); + nf->set_multiple_origin_reducible(true); graph.normalize(); graph.prune(); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); // Assert - auto node = jlm::rvsdg::output::GetNode(*ex.origin()); - assert(jlm::rvsdg::is(node->GetOperation()) && node->ninputs() == 3); + auto node = jlm::rvsdg::output::GetNode(*exS1.origin()); + assert(is(node)); + assert(node->ninputs() == 5); + assert(node->noutputs() == 3); + assert(exS1.origin() == node->output(0)); + assert(exS2.origin() == node->output(1)); + assert(exS3.origin() == node->output(0)); + assert(exS4.origin() == node->output(1)); + assert(exS5.origin() == node->output(2)); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/StoreTests-TestDuplicateStateReduction", + TestDuplicateStateReduction) + +static int TestStoreAllocaReduction() { using namespace jlm::llvm; @@ -339,9 +367,15 @@ TestStoreAllocaReduction() has_add_import = true; } assert(has_add_import); + + return 0; } -static void +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/StoreTests-TestStoreAllocaReduction", + TestStoreAllocaReduction) + +static int TestStoreStoreReduction() { using namespace jlm::llvm; @@ -375,19 +409,10 @@ TestStoreStoreReduction() // Assert assert(graph.root()->nnodes() == 1); assert(jlm::rvsdg::output::GetNode(*ex.origin())->input(1)->origin() == v2); -} - -static int -TestStore() -{ - TestCopy(); - - TestStoreMuxReduction(); - TestStoreAllocaReduction(); - TestMultipleOriginReduction(); - TestStoreStoreReduction(); return 0; } -JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/operators/StoreTests", TestStore) +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/StoreTests-TestStoreStoreReduction", + TestStoreStoreReduction) From 0c351864119706a3f7efa71274bbf8f0e0f24936 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 18 Dec 2024 22:02:53 +0100 Subject: [PATCH 136/170] Add modernize-deprecated-headers check to clang-tidy (#687) --- .clang-tidy | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 -- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 7 ------- jlm/rvsdg/bitstring/concat.cpp | 2 -- jlm/rvsdg/bitstring/constant.hpp | 5 ++--- jlm/rvsdg/control.hpp | 2 -- jlm/rvsdg/graph.hpp | 8 ++------ jlm/rvsdg/node-normal-form.hpp | 6 ++---- jlm/rvsdg/node.hpp | 11 ++++------- jlm/rvsdg/region.hpp | 3 --- jlm/rvsdg/tracker.hpp | 5 +---- jlm/rvsdg/traverser.hpp | 3 --- jlm/util/common.hpp | 3 +-- tests/jlm/llvm/frontend/llvm/test-restructuring.cpp | 2 -- tests/jlm/llvm/ir/test-cfg-orderings.cpp | 2 -- tests/jlm/llvm/ir/test-cfg-structure.cpp | 2 -- tests/jlm/rvsdg/bitstring/bitstring.cpp | 4 ---- tests/jlm/rvsdg/test-graph.cpp | 3 --- tests/jlm/rvsdg/test-statemux.cpp | 2 -- tests/jlm/rvsdg/test-typemismatch.cpp | 2 -- tests/jlm/util/test-disjointset.cpp | 3 --- tests/jlm/util/test-intrusive-hash.cpp | 4 ++-- tests/jlm/util/test-intrusive-list.cpp | 4 ++-- tests/test-runner.cpp | 2 -- 24 files changed, 17 insertions(+), 72 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index ea3b01b35..2bcbaae63 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -4,5 +4,5 @@ Checks: '-*, ' WarningsAsErrors: ' -# modernize-deprecated-headers, + modernize-deprecated-headers, ' \ No newline at end of file diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 6ac11e3ff..e4cb17207 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -4,10 +4,8 @@ */ #include -#include #include -#include namespace jlm::hls { diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 122a2df7c..8d4a66fc8 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -3,18 +3,11 @@ * See COPYING for terms of redistribution. */ -#include - #include #include #include -#include #include -#include -#include -#include #include -#include #include namespace jlm::hls diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 428f0335f..f32885497 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -9,8 +9,6 @@ #include #include -#include - namespace jlm::rvsdg { diff --git a/jlm/rvsdg/bitstring/constant.hpp b/jlm/rvsdg/bitstring/constant.hpp index 912ee93b2..849986be7 100644 --- a/jlm/rvsdg/bitstring/constant.hpp +++ b/jlm/rvsdg/bitstring/constant.hpp @@ -7,15 +7,14 @@ #ifndef JLM_RVSDG_BITSTRING_CONSTANT_HPP #define JLM_RVSDG_BITSTRING_CONSTANT_HPP -#include -#include - #include #include #include #include #include +#include + namespace jlm::rvsdg { diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index 5a5feb3a4..b43f8e429 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -15,8 +15,6 @@ #include -#include - namespace jlm::rvsdg { diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index 939151112..0cd8378d8 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -7,18 +7,14 @@ #ifndef JLM_RVSDG_GRAPH_HPP #define JLM_RVSDG_GRAPH_HPP -#include -#include - -#include - #include #include #include #include - #include +#include + namespace jlm::rvsdg { diff --git a/jlm/rvsdg/node-normal-form.hpp b/jlm/rvsdg/node-normal-form.hpp index f23a72272..4beae2780 100644 --- a/jlm/rvsdg/node-normal-form.hpp +++ b/jlm/rvsdg/node-normal-form.hpp @@ -7,16 +7,14 @@ #ifndef JLM_RVSDG_NODE_NORMAL_FORM_HPP #define JLM_RVSDG_NODE_NORMAL_FORM_HPP -#include +#include +#include #include #include #include #include -#include -#include - /* normal forms */ namespace jlm::rvsdg diff --git a/jlm/rvsdg/node.hpp b/jlm/rvsdg/node.hpp index 5baf927bc..5618ab218 100644 --- a/jlm/rvsdg/node.hpp +++ b/jlm/rvsdg/node.hpp @@ -7,18 +7,15 @@ #ifndef JLM_RVSDG_NODE_HPP #define JLM_RVSDG_NODE_HPP -#include -#include -#include -#include -#include -#include - #include #include #include #include +#include +#include +#include + namespace jlm::rvsdg { namespace base diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index f77a21ddc..843408d7c 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -7,9 +7,6 @@ #ifndef JLM_RVSDG_REGION_HPP #define JLM_RVSDG_REGION_HPP -#include -#include - #include #include #include diff --git a/jlm/rvsdg/tracker.hpp b/jlm/rvsdg/tracker.hpp index 9c24794cb..6aa1112a8 100644 --- a/jlm/rvsdg/tracker.hpp +++ b/jlm/rvsdg/tracker.hpp @@ -7,13 +7,10 @@ #ifndef JLM_RVSDG_TRACKER_HPP #define JLM_RVSDG_TRACKER_HPP -#include -#include +#include #include -#include - namespace jlm::rvsdg { diff --git a/jlm/rvsdg/traverser.hpp b/jlm/rvsdg/traverser.hpp index a201469f1..18a9d3523 100644 --- a/jlm/rvsdg/traverser.hpp +++ b/jlm/rvsdg/traverser.hpp @@ -9,9 +9,6 @@ #include -#include -#include - namespace jlm::rvsdg { diff --git a/jlm/util/common.hpp b/jlm/util/common.hpp index 0618da4b5..a36bc33c7 100644 --- a/jlm/util/common.hpp +++ b/jlm/util/common.hpp @@ -6,8 +6,7 @@ #ifndef JLM_UTIL_COMMON_HPP #define JLM_UTIL_COMMON_HPP -#include - +#include #include #include diff --git a/tests/jlm/llvm/frontend/llvm/test-restructuring.cpp b/tests/jlm/llvm/frontend/llvm/test-restructuring.cpp index d7a6e0890..241859cd4 100644 --- a/tests/jlm/llvm/frontend/llvm/test-restructuring.cpp +++ b/tests/jlm/llvm/frontend/llvm/test-restructuring.cpp @@ -10,8 +10,6 @@ #include #include -#include - static inline void test_acyclic_structured() { diff --git a/tests/jlm/llvm/ir/test-cfg-orderings.cpp b/tests/jlm/llvm/ir/test-cfg-orderings.cpp index 31b4d725f..1260f47b9 100644 --- a/tests/jlm/llvm/ir/test-cfg-orderings.cpp +++ b/tests/jlm/llvm/ir/test-cfg-orderings.cpp @@ -8,8 +8,6 @@ #include #include -#include - static int test() { diff --git a/tests/jlm/llvm/ir/test-cfg-structure.cpp b/tests/jlm/llvm/ir/test-cfg-structure.cpp index c2be94a5e..d57d2b6b9 100644 --- a/tests/jlm/llvm/ir/test-cfg-structure.cpp +++ b/tests/jlm/llvm/ir/test-cfg-structure.cpp @@ -12,8 +12,6 @@ #include #include -#include - static void test_straightening() { diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 5a7582dd9..93fda72cb 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -7,11 +7,7 @@ #include "test-registry.hpp" #include -#include -#include - #include -#include #include static int diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index f34cc9e08..6f8c206e7 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -8,9 +8,6 @@ #include "test-registry.hpp" #include "test-types.hpp" -#include -#include - #include static bool diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index 648b91c59..82ee7da42 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -8,8 +8,6 @@ #include "test-registry.hpp" #include "test-types.hpp" -#include - #include #include diff --git a/tests/jlm/rvsdg/test-typemismatch.cpp b/tests/jlm/rvsdg/test-typemismatch.cpp index 8098e2bbb..ab55cb267 100644 --- a/tests/jlm/rvsdg/test-typemismatch.cpp +++ b/tests/jlm/rvsdg/test-typemismatch.cpp @@ -8,8 +8,6 @@ #include "test-registry.hpp" #include "test-types.hpp" -#include - static int test_main(void) { diff --git a/tests/jlm/util/test-disjointset.cpp b/tests/jlm/util/test-disjointset.cpp index 6e717663b..8bf1f32f7 100644 --- a/tests/jlm/util/test-disjointset.cpp +++ b/tests/jlm/util/test-disjointset.cpp @@ -7,9 +7,6 @@ #include -#include -#include - static void print(const jlm::util::disjointset::set & set) { diff --git a/tests/jlm/util/test-intrusive-hash.cpp b/tests/jlm/util/test-intrusive-hash.cpp index 3071ad290..d1f90a9b5 100644 --- a/tests/jlm/util/test-intrusive-hash.cpp +++ b/tests/jlm/util/test-intrusive-hash.cpp @@ -5,10 +5,10 @@ #include "test-registry.hpp" -#include - #include +#include + struct my_item { my_item(int k, int v) diff --git a/tests/jlm/util/test-intrusive-list.cpp b/tests/jlm/util/test-intrusive-list.cpp index cc04de36e..cb8ebb9ff 100644 --- a/tests/jlm/util/test-intrusive-list.cpp +++ b/tests/jlm/util/test-intrusive-list.cpp @@ -5,10 +5,10 @@ #include "test-registry.hpp" -#include - #include +#include + namespace { diff --git a/tests/test-runner.cpp b/tests/test-runner.cpp index eb8c0df22..e14e6dec2 100644 --- a/tests/test-runner.cpp +++ b/tests/test-runner.cpp @@ -5,8 +5,6 @@ #include "test-registry.hpp" -#include - int main(int argc, char ** argv) { From 24cfa78019c6d1f3e1299909c13f3ab5d2f50bca Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 19 Dec 2024 09:41:15 +0100 Subject: [PATCH 137/170] Convert store normalizations to new normalization interface (#688) --- jlm/llvm/ir/operators/Store.cpp | 44 +++++++++ jlm/llvm/ir/operators/Store.hpp | 85 +++++++++++++++++ tests/jlm/llvm/ir/operators/StoreTests.cpp | 104 ++++++++++----------- 3 files changed, 181 insertions(+), 52 deletions(-) diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index d1cecd9ba..62265e253 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -470,6 +470,50 @@ store_normal_form::set_multiple_origin_reducible(bool enable) graph()->mark_denormalized(); } +std::optional> +NormalizeStoreMux( + const StoreNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_store_mux_reducible(operands)) + return perform_store_mux_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeStoreStore( + const StoreNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_store_store_reducible(operation, operands)) + return perform_store_store_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeStoreAlloca( + const StoreNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_store_alloca_reducible(operands)) + return perform_store_alloca_reduction(operation, operands); + + return std::nullopt; +} + +std::optional> +NormalizeStoreDuplicateState( + const StoreNonVolatileOperation & operation, + const std::vector & operands) +{ + if (is_multiple_origin_reducible(operands)) + return perform_multiple_origin_reduction(operation, operands); + + return std::nullopt; +} + } namespace diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 891f2a582..1160b825d 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -12,6 +12,8 @@ #include #include +#include + namespace jlm::llvm { @@ -569,6 +571,89 @@ class StoreVolatileNode final : public StoreNode } }; +/** + * \brief Swaps a memory state merge operation and a store operation. + * + * sx1 = MemStateMerge si1 ... siM + * sl1 = StoreNonVolatile a v sx1 + * => + * sl1 ... slM = StoreNonVolatile a v si1 ... siM + * sx1 = MemStateMerge sl1 ... slM + * + * FIXME: The reduction can be generalized: A store node can have multiple operands from different + * merge nodes. + * + * @param operation The operation of the StoreNonVolatile node. + * @param operands The operands of the StoreNonVolatile node. + * + * @return If the normalization could be applied, then the results of the store operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeStoreMux( + const StoreNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief Removes a duplicated store to the same address. + * + * so1 so2 = StoreNonVolatile a v1 si1 si2 + * sx1 sx2 = StoreNonVolatile a v2 so1 so2 + * => + * sx1 sx2 = StoreNonVolatile a v2 si1 si2 + * + * @param operation The operation of the StoreNonVolatile node. + * @param operands The operands of the StoreNonVolatile node. + * + * @return If the normalization could be applied, then the results of the store operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeStoreStore( + const StoreNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief Removes unnecessary state from a store node when its address originates directly from an + * alloca node. + * + * a s = Alloca b + * so1 so2 = StoreNonVolatile a v s si1 si2 + * ... = AnyOp so1 so2 + * => + * a s = Alloca b + * so1 = StoreNonVolatile a v s + * ... = AnyOp so1 so1 + * + * @param operation The operation of the StoreNonVolatile node. + * @param operands The operands of the StoreNonVolatile node. + * + * @return If the normalization could be applied, then the results of the store operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeStoreAlloca( + const StoreNonVolatileOperation & operation, + const std::vector & operands); + +/** + * \brief Remove duplicated state operands + * + * so1 so2 so3 = StoreNonVolatile a v si1 si1 si1 + * => + * so1 = StoreNonVolatile a v si1 + * + * @param operation The load operation on which the transformation is performed. + * @param operands The operands of the load node. + * + * @return If the normalization could be applied, then the results of the load operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +NormalizeStoreDuplicateState( + const StoreNonVolatileOperation & operation, + const std::vector & operands); + } #endif diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index a09307c27..ed75cd70a 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -7,13 +7,13 @@ #include #include -#include -#include - #include #include #include #include +#include +#include +#include static int StoreNonVolatileOperationEquality() @@ -209,7 +209,7 @@ TestCopy() JLM_UNIT_TEST_REGISTER("jlm/llvm/ir/operators/StoreTests-TestCopy", TestCopy) static int -TestStoreMuxReduction() +TestStoreMuxNormalization() { using namespace jlm::llvm; @@ -219,10 +219,9 @@ TestStoreMuxReduction() auto mt = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(StoreNonVolatileOperation)); - auto snf = static_cast(nf); - snf->set_mutable(false); - snf->set_store_mux_reducible(false); + auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_store_mux_reducible(false); auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); @@ -231,27 +230,26 @@ TestStoreMuxReduction() auto s3 = &jlm::tests::GraphImport::Create(graph, mt, "s3"); auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); - auto state = StoreNonVolatileNode::Create(a, v, { mux }, 4); + auto & storeNode = StoreNonVolatileNode::CreateNode(*a, *v, { mux }, 4); - auto & ex = GraphExport::Create(*state[0], "s"); + auto & ex = GraphExport::Create(*storeNode.output(0), "s"); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Act - snf->set_mutable(true); - snf->set_store_mux_reducible(true); - graph.normalize(); + auto success = jlm::rvsdg::ReduceNode(NormalizeStoreMux, storeNode); graph.prune(); - // jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(graph.root(), stdout); // Assert - auto muxnode = jlm::rvsdg::output::GetNode(*ex.origin()); - assert(is(muxnode)); - assert(muxnode->ninputs() == 3); - auto n0 = jlm::rvsdg::output::GetNode(*muxnode->input(0)->origin()); - auto n1 = jlm::rvsdg::output::GetNode(*muxnode->input(1)->origin()); - auto n2 = jlm::rvsdg::output::GetNode(*muxnode->input(2)->origin()); + assert(success); + auto muxNode = jlm::rvsdg::output::GetNode(*ex.origin()); + assert(is(muxNode)); + assert(muxNode->ninputs() == 3); + auto n0 = jlm::rvsdg::output::GetNode(*muxNode->input(0)->origin()); + auto n1 = jlm::rvsdg::output::GetNode(*muxNode->input(1)->origin()); + auto n2 = jlm::rvsdg::output::GetNode(*muxNode->input(2)->origin()); assert(jlm::rvsdg::is(n0->GetOperation())); assert(jlm::rvsdg::is(n1->GetOperation())); assert(jlm::rvsdg::is(n2->GetOperation())); @@ -260,8 +258,8 @@ TestStoreMuxReduction() } JLM_UNIT_TEST_REGISTER( - "jlm/llvm/ir/operators/StoreTests-TestStoreMuxReduction", - TestStoreMuxReduction) + "jlm/llvm/ir/operators/StoreTests-TestStoreMuxNormalization", + TestStoreMuxNormalization) static int TestDuplicateStateReduction() @@ -284,25 +282,25 @@ TestDuplicateStateReduction() auto s2 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "s2"); auto s3 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "s3"); - auto states = StoreNonVolatileNode::Create(a, v, { s1, s2, s1, s2, s3 }, 4); + auto & storeNode = StoreNonVolatileNode::CreateNode(*a, *v, { s1, s2, s1, s2, s3 }, 4); - auto & exS1 = GraphExport::Create(*states[0], "exS1"); - auto & exS2 = GraphExport::Create(*states[1], "exS2"); - auto & exS3 = GraphExport::Create(*states[2], "exS3"); - auto & exS4 = GraphExport::Create(*states[3], "exS4"); - auto & exS5 = GraphExport::Create(*states[4], "exS5"); + auto & exS1 = GraphExport::Create(*storeNode.output(0), "exS1"); + auto & exS2 = GraphExport::Create(*storeNode.output(1), "exS2"); + auto & exS3 = GraphExport::Create(*storeNode.output(2), "exS3"); + auto & exS4 = GraphExport::Create(*storeNode.output(3), "exS4"); + auto & exS5 = GraphExport::Create(*storeNode.output(4), "exS5"); view(graph.root(), stdout); // Act - nf->set_mutable(true); - nf->set_multiple_origin_reducible(true); - graph.normalize(); + auto success = + jlm::rvsdg::ReduceNode(NormalizeStoreDuplicateState, storeNode); graph.prune(); view(graph.root(), stdout); // Assert + assert(success); auto node = jlm::rvsdg::output::GetNode(*exS1.origin()); assert(is(node)); assert(node->ninputs() == 5); @@ -331,10 +329,9 @@ TestStoreAllocaReduction() auto bt = jlm::rvsdg::bittype::Create(32); jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(StoreNonVolatileOperation)); - auto snf = static_cast(nf); - snf->set_mutable(false); - snf->set_store_alloca_reducible(false); + auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_store_alloca_reducible(false); auto size = &jlm::tests::GraphImport::Create(graph, bt, "size"); auto value = &jlm::tests::GraphImport::Create(graph, vt, "value"); @@ -342,24 +339,28 @@ TestStoreAllocaReduction() auto alloca1 = alloca_op::create(vt, size, 4); auto alloca2 = alloca_op::create(vt, size, 4); - auto states1 = StoreNonVolatileNode::Create(alloca1[0], value, { alloca1[1], alloca2[1], s }, 4); - auto states2 = StoreNonVolatileNode::Create(alloca2[0], value, states1, 4); + auto & storeNode1 = + StoreNonVolatileNode::CreateNode(*alloca1[0], *value, { alloca1[1], alloca2[1], s }, 4); + auto & storeNode2 = + StoreNonVolatileNode::CreateNode(*alloca2[0], *value, outputs(&storeNode1), 4); - GraphExport::Create(*states2[0], "s1"); - GraphExport::Create(*states2[1], "s2"); - GraphExport::Create(*states2[2], "s3"); + GraphExport::Create(*storeNode2.output(0), "s1"); + GraphExport::Create(*storeNode2.output(1), "s2"); + GraphExport::Create(*storeNode2.output(2), "s3"); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); // Act - snf->set_mutable(true); - snf->set_store_alloca_reducible(true); - graph.normalize(); + auto success1 = + jlm::rvsdg::ReduceNode(NormalizeStoreAlloca, storeNode1); + auto success2 = + jlm::rvsdg::ReduceNode(NormalizeStoreAlloca, storeNode2); graph.prune(); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); // Assert + assert(success1 && success2); bool has_add_import = false; for (size_t n = 0; n < graph.root()->nresults(); n++) { @@ -391,22 +392,21 @@ TestStoreStoreReduction() auto v2 = &jlm::tests::GraphImport::Create(graph, vt, "value"); auto s = &jlm::tests::GraphImport::Create(graph, mt, "state"); - auto s1 = StoreNonVolatileNode::Create(a, v1, { s }, 4)[0]; - auto s2 = StoreNonVolatileNode::Create(a, v2, { s1 }, 4)[0]; + auto & storeNode1 = StoreNonVolatileNode::CreateNode(*a, *v1, { s }, 4); + auto & storeNode2 = StoreNonVolatileNode::CreateNode(*a, *v2, outputs(&storeNode1), 4); - auto & ex = GraphExport::Create(*s2, "state"); + auto & ex = GraphExport::Create(*storeNode2.output(0), "state"); jlm::rvsdg::view(graph.root(), stdout); // Act - auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); - nf->set_store_store_reducible(true); - graph.normalize(); + auto success = jlm::rvsdg::ReduceNode(NormalizeStoreStore, storeNode2); graph.prune(); jlm::rvsdg::view(graph.root(), stdout); // Assert + assert(success); assert(graph.root()->nnodes() == 1); assert(jlm::rvsdg::output::GetNode(*ex.origin())->input(1)->origin() == v2); From dd6f2bad98dbd505c415e4bd72e7517a376e994f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 19 Dec 2024 13:10:22 +0100 Subject: [PATCH 138/170] Rename simple_node class to SimpleNode (#689) --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 62 +++++++-------- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 50 ++++++------ jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 8 +- jlm/hls/backend/rvsdg2rhls/add-buffers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 4 +- .../rvsdg2rhls/distribute-constants.cpp | 8 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 78 +++++++++---------- jlm/hls/backend/rvsdg2rhls/mem-conv.hpp | 20 ++--- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 10 +-- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 12 +-- jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- .../rvsdg2rhls/remove-redundant-buf.cpp | 2 +- .../rvsdg2rhls/remove-unused-state.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 +- jlm/hls/ir/hls.hpp | 44 +++++------ jlm/hls/opt/cne.cpp | 6 +- jlm/hls/util/view.cpp | 4 +- .../InterProceduralGraphConversion.cpp | 4 +- jlm/llvm/ir/operators/GetElementPtr.hpp | 2 +- jlm/llvm/ir/operators/Load.cpp | 4 +- jlm/llvm/ir/operators/Load.hpp | 4 +- jlm/llvm/ir/operators/MemCpy.hpp | 6 +- .../ir/operators/MemoryStateOperations.hpp | 12 +-- jlm/llvm/ir/operators/Store.cpp | 2 +- jlm/llvm/ir/operators/Store.hpp | 4 +- jlm/llvm/ir/operators/alloca.hpp | 2 +- jlm/llvm/ir/operators/call.hpp | 14 ++-- jlm/llvm/ir/operators/lambda.cpp | 8 +- jlm/llvm/ir/operators/lambda.hpp | 4 +- jlm/llvm/ir/operators/operators.hpp | 31 ++++---- jlm/llvm/ir/operators/sext.cpp | 7 +- jlm/llvm/ir/operators/sext.hpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 4 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 32 ++++---- jlm/llvm/opt/alias-analyses/Andersen.hpp | 30 +++---- .../opt/alias-analyses/MemoryStateEncoder.cpp | 16 ++-- .../opt/alias-analyses/MemoryStateEncoder.hpp | 20 ++--- .../RegionAwareMemoryNodeProvider.cpp | 22 +++--- .../RegionAwareMemoryNodeProvider.hpp | 12 +-- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 36 ++++----- jlm/llvm/opt/alias-analyses/Steensgaard.hpp | 32 ++++---- .../TopDownMemoryNodeEliminator.cpp | 6 +- .../TopDownMemoryNodeEliminator.hpp | 8 +- jlm/llvm/opt/cne.cpp | 6 +- jlm/llvm/opt/inlining.cpp | 4 +- jlm/llvm/opt/inlining.hpp | 2 +- jlm/llvm/opt/unroll.cpp | 10 +-- jlm/mlir/backend/JlmToMlirConverter.cpp | 4 +- jlm/mlir/backend/JlmToMlirConverter.hpp | 6 +- jlm/rvsdg/binary.cpp | 6 +- jlm/rvsdg/bitstring/arithmetic.hpp | 4 +- jlm/rvsdg/bitstring/comparison.hpp | 2 +- jlm/rvsdg/bitstring/concat.cpp | 4 +- jlm/rvsdg/bitstring/constant.hpp | 2 +- jlm/rvsdg/bitstring/slice.cpp | 2 +- jlm/rvsdg/control.cpp | 4 +- jlm/rvsdg/control.hpp | 2 +- jlm/rvsdg/nullary.hpp | 2 +- jlm/rvsdg/region.hpp | 2 +- jlm/rvsdg/simple-node.cpp | 30 ++++--- jlm/rvsdg/simple-node.hpp | 28 ++++--- jlm/rvsdg/simple-normal-form.cpp | 2 +- jlm/rvsdg/statemux.hpp | 2 +- jlm/rvsdg/view.cpp | 4 +- tests/TestRvsdgs.cpp | 22 ++---- tests/TestRvsdgs.hpp | 32 ++++---- .../rvsdg2rhls/MemoryConverterTests.cpp | 4 +- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 8 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 5 +- tests/jlm/llvm/opt/test-unroll.cpp | 5 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 6 +- tests/jlm/rvsdg/test-binary.cpp | 12 +-- tests/test-operation.hpp | 22 +++--- 79 files changed, 440 insertions(+), 462 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index 153962534..daa0f3624 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -15,7 +15,7 @@ namespace jlm::hls // Handles nodes with 2 inputs and 1 output circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::SimpleNode * node) { // Only handles nodes with a single output if (node->noutputs() != 1) @@ -385,7 +385,7 @@ RhlsToFirrtlConverter::MlirGenSimpleNode(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenSink(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenSink(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -410,7 +410,7 @@ RhlsToFirrtlConverter::MlirGenSink(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenLoopConstBuffer(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenLoopConstBuffer(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -465,7 +465,7 @@ RhlsToFirrtlConverter::MlirGenLoopConstBuffer(const jlm::rvsdg::simple_node * no } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenFork(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenFork(const jlm::rvsdg::SimpleNode * node) { auto op = dynamic_cast(&node->GetOperation()); bool isConstant = op->IsConstant(); @@ -567,7 +567,7 @@ RhlsToFirrtlConverter::MlirGenFork(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenStateGate(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenStateGate(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -659,7 +659,7 @@ RhlsToFirrtlConverter::MlirGenStateGate(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenHlsMemResp(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenHlsMemResp(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node, false); @@ -740,7 +740,7 @@ RhlsToFirrtlConverter::MlirGenHlsMemResp(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node, false); @@ -902,7 +902,7 @@ RhlsToFirrtlConverter::MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenHlsLoad(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenHlsLoad(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node, false); @@ -1077,7 +1077,7 @@ RhlsToFirrtlConverter::MlirGenHlsLoad(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenHlsDLoad(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenHlsDLoad(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node, false); @@ -1124,7 +1124,7 @@ RhlsToFirrtlConverter::MlirGenHlsDLoad(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::SimpleNode * node) { auto lmem_op = dynamic_cast(&(node->GetOperation())); JLM_ASSERT(lmem_op); @@ -1333,7 +1333,7 @@ RhlsToFirrtlConverter::MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenHlsStore(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenHlsStore(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node, false); @@ -1477,7 +1477,7 @@ RhlsToFirrtlConverter::MlirGenHlsStore(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenMem(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenMem(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node, true); @@ -1694,7 +1694,7 @@ RhlsToFirrtlConverter::MlirGenMem(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenTrigger(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenTrigger(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -1728,7 +1728,7 @@ RhlsToFirrtlConverter::MlirGenTrigger(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenPrint(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenPrint(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -1763,7 +1763,7 @@ RhlsToFirrtlConverter::MlirGenPrint(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenPredicationBuffer(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenPredicationBuffer(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -1828,7 +1828,7 @@ RhlsToFirrtlConverter::MlirGenPredicationBuffer(const jlm::rvsdg::simple_node * } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenBuffer(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenBuffer(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -1968,7 +1968,7 @@ RhlsToFirrtlConverter::MlirGenBuffer(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenAddrQueue(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenAddrQueue(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -2136,7 +2136,7 @@ RhlsToFirrtlConverter::MlirGenAddrQueue(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenDMux(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenDMux(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -2275,7 +2275,7 @@ RhlsToFirrtlConverter::MlirGenDMux(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenNDMux(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenNDMux(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -2322,7 +2322,7 @@ RhlsToFirrtlConverter::MlirGenNDMux(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGenBranch(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGenBranch(const jlm::rvsdg::SimpleNode * node) { // Create the module and its input/output ports auto module = nodeToModule(node); @@ -2369,7 +2369,7 @@ RhlsToFirrtlConverter::MlirGenBranch(const jlm::rvsdg::simple_node * node) } circt::firrtl::FModuleOp -RhlsToFirrtlConverter::MlirGen(const jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::MlirGen(const jlm::rvsdg::SimpleNode * node) { if (dynamic_cast(&(node->GetOperation()))) { @@ -2468,7 +2468,7 @@ RhlsToFirrtlConverter::MlirGen(const jlm::rvsdg::simple_node * node) return MlirGenSimpleNode(node); } -std::unordered_map +std::unordered_map RhlsToFirrtlConverter::MlirGen( hls::loop_node * loopNode, mlir::Block * body, @@ -2571,7 +2571,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB auto body = module.getBodyBlock(); // First we create and instantiate all the modules and keep them in a dictionary - std::unordered_map instances = + std::unordered_map instances = createInstances(subRegion, circuitBody, body); // Wire up the instances for (const auto & instance : instances) @@ -2620,7 +2620,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB { // Connect directly to mem auto mem_out = dynamic_cast(source->input(0)->origin()); - auto sourceNode = instances[dynamic_cast(mem_out->node())]; + auto sourceNode = instances[dynamic_cast(mem_out->node())]; auto sourcePort = GetInstancePort(sourceNode, "o" + std::to_string(o->index())); auto sinkPort = sinkNode->getResult(i + 2); Connect(body, sinkPort, sourcePort); @@ -2712,7 +2712,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB throw std::logic_error("Unsupported output"); } // Get the node of the output - jlm::rvsdg::simple_node * source = output->node(); + jlm::rvsdg::SimpleNode * source = output->node(); // Get the corresponding InstanceOp auto sourceNode = instances[source]; // Calculate the result port of the instance: @@ -2736,7 +2736,7 @@ RhlsToFirrtlConverter::MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitB return module; } -std::unordered_map +std::unordered_map RhlsToFirrtlConverter::createInstances( rvsdg::Region * subRegion, mlir::Block * circuitBody, @@ -2745,10 +2745,10 @@ RhlsToFirrtlConverter::createInstances( // create and instantiate all the modules and keep them in a dictionary auto clock = body->getArgument(0); auto reset = body->getArgument(1); - std::unordered_map instances; + std::unordered_map instances; for (const auto node : jlm::rvsdg::topdown_traverser(subRegion)) { - if (auto sn = dynamic_cast(node)) + if (auto sn = dynamic_cast(node)) { if (dynamic_cast(&(node->GetOperation())) || dynamic_cast(&(node->GetOperation()))) @@ -3728,7 +3728,7 @@ RhlsToFirrtlConverter::check_module(circt::firrtl::FModuleOp & module) } circt::firrtl::InstanceOp -RhlsToFirrtlConverter::AddInstanceOp(mlir::Block * body, jlm::rvsdg::simple_node * node) +RhlsToFirrtlConverter::AddInstanceOp(mlir::Block * body, jlm::rvsdg::SimpleNode * node) { auto name = GetModuleName(node); // Check if the module has already been instantiated else we need to generate it @@ -3855,11 +3855,11 @@ RhlsToFirrtlConverter::InitializeMemReq(circt::firrtl::FModuleOp module) Connect(body, memWidth, invalid3); } -// Takes a jlm::rvsdg::simple_node and creates a firrtl module with an input +// Takes a jlm::rvsdg::SimpleNode and creates a firrtl module with an input // bundle for each node input and output bundle for each node output // Returns a circt::firrtl::FModuleOp with an empty body circt::firrtl::FModuleOp -RhlsToFirrtlConverter::nodeToModule(const jlm::rvsdg::simple_node * node, bool mem) +RhlsToFirrtlConverter::nodeToModule(const jlm::rvsdg::SimpleNode * node, bool mem) { // Generate a vector with all inputs and outputs of the module ::llvm::SmallVector ports; diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 922973327..373edff25 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -104,23 +104,23 @@ class RhlsToFirrtlConverter : public BaseHLS std::unordered_map modules; // FIRRTL generating functions - std::unordered_map + std::unordered_map MlirGen(hls::loop_node * loopNode, mlir::Block * body, mlir::Block * circuitBody); circt::firrtl::FModuleOp MlirGen(rvsdg::Region * subRegion, mlir::Block * circuitBody); circt::firrtl::FModuleOp - MlirGen(const jlm::rvsdg::simple_node * node); + MlirGen(const jlm::rvsdg::SimpleNode * node); // Operations circt::firrtl::FModuleOp - MlirGenSink(const jlm::rvsdg::simple_node * node); + MlirGenSink(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenLoopConstBuffer(const jlm::rvsdg::simple_node * node); + MlirGenLoopConstBuffer(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenFork(const jlm::rvsdg::simple_node * node); + MlirGenFork(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenStateGate(const jlm::rvsdg::simple_node * node); + MlirGenStateGate(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenMem(const jlm::rvsdg::simple_node * node); + MlirGenMem(const jlm::rvsdg::SimpleNode * node); /** * Generate a FIRRTL module for a HLS memory response node that implements the functionality for * retreiving memory responses. @@ -128,7 +128,7 @@ class RhlsToFirrtlConverter : public BaseHLS * @return The generated FIRRTL module. */ circt::firrtl::FModuleOp - MlirGenHlsMemResp(const jlm::rvsdg::simple_node * node); + MlirGenHlsMemResp(const jlm::rvsdg::SimpleNode * node); /** * Generate a FIRRTL module for a HLS memory request node that implements the functionality for * performing memory requests. @@ -136,33 +136,33 @@ class RhlsToFirrtlConverter : public BaseHLS * @return The generated FIRRTL module. */ circt::firrtl::FModuleOp - MlirGenHlsMemReq(const jlm::rvsdg::simple_node * node); + MlirGenHlsMemReq(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenHlsLoad(const jlm::rvsdg::simple_node * node); + MlirGenHlsLoad(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenHlsDLoad(const jlm::rvsdg::simple_node * node); + MlirGenHlsDLoad(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenHlsLocalMem(const jlm::rvsdg::simple_node * node); + MlirGenHlsLocalMem(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenHlsStore(const jlm::rvsdg::simple_node * node); + MlirGenHlsStore(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenTrigger(const jlm::rvsdg::simple_node * node); + MlirGenTrigger(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenPrint(const jlm::rvsdg::simple_node * node); + MlirGenPrint(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenAddrQueue(const jlm::rvsdg::simple_node * node); + MlirGenAddrQueue(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenPredicationBuffer(const jlm::rvsdg::simple_node * node); + MlirGenPredicationBuffer(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenBuffer(const jlm::rvsdg::simple_node * node); + MlirGenBuffer(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenDMux(const jlm::rvsdg::simple_node * node); + MlirGenDMux(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenNDMux(const jlm::rvsdg::simple_node * node); + MlirGenNDMux(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenBranch(const jlm::rvsdg::simple_node * node); + MlirGenBranch(const jlm::rvsdg::SimpleNode * node); circt::firrtl::FModuleOp - MlirGenSimpleNode(const jlm::rvsdg::simple_node * node); + MlirGenSimpleNode(const jlm::rvsdg::SimpleNode * node); // Helper functions void @@ -247,7 +247,7 @@ class RhlsToFirrtlConverter : public BaseHLS circt::firrtl::WhenOp AddWhenOp(mlir::Block * body, mlir::Value condition, bool elseStatment); circt::firrtl::InstanceOp - AddInstanceOp(mlir::Block * body, jlm::rvsdg::simple_node * node); + AddInstanceOp(mlir::Block * body, jlm::rvsdg::SimpleNode * node); circt::firrtl::ConstantOp GetConstant(mlir::Block * body, int size, int value); circt::firrtl::InvalidValueOp @@ -274,7 +274,7 @@ class RhlsToFirrtlConverter : public BaseHLS mlir::BlockArgument GetResetSignal(circt::firrtl::FModuleOp module); circt::firrtl::FModuleOp - nodeToModule(const jlm::rvsdg::simple_node * node, bool mem = false); + nodeToModule(const jlm::rvsdg::SimpleNode * node, bool mem = false); circt::firrtl::IntType GetIntType(int size); circt::firrtl::IntType @@ -286,7 +286,7 @@ class RhlsToFirrtlConverter : public BaseHLS bool IsIdentityMapping(const jlm::rvsdg::match_op & op); - std::unordered_map + std::unordered_map createInstances(rvsdg::Region * subRegion, mlir::Block * circuitBody, mlir::Block * body); void check_module(circt::firrtl::FModuleOp & module); diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index e4cb17207..84fac591a 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -140,7 +140,7 @@ BaseHLS::create_node_names(rvsdg::Region * r) { for (auto & node : r->Nodes()) { - if (dynamic_cast(&node)) + if (dynamic_cast(&node)) { get_node_name(&node); } diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index 52782c33e..8477f7818 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -216,7 +216,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) for (auto node : jlm::rvsdg::topdown_traverser(sr)) { - if (dynamic_cast(node)) + if (dynamic_cast(node)) { auto node_dot = node_to_dot(node); if (top_nodes.count(node)) @@ -272,7 +272,7 @@ DotHLS::loop_to_dot(hls::loop_node * ln) // do edges outside in order not to pull other nodes into the cluster for (auto node : jlm::rvsdg::topdown_traverser(sr)) { - if (dynamic_cast(node)) + if (dynamic_cast(node)) { auto mx = dynamic_cast(&node->GetOperation()); auto node_name = get_node_name(node); @@ -307,7 +307,7 @@ DotHLS::prepare_loop_out_port(hls::loop_node * ln) // just translate outputs for (auto node : jlm::rvsdg::topdown_traverser(sr)) { - if (dynamic_cast(node)) + if (dynamic_cast(node)) { auto node_name = get_node_name(node); for (size_t i = 0; i < node->noutputs(); ++i) @@ -388,7 +388,7 @@ DotHLS::subregion_to_dot(rvsdg::Region * sr) // process nodes for (auto node : jlm::rvsdg::topdown_traverser(sr)) { - if (dynamic_cast(node)) + if (dynamic_cast(node)) { auto node_dot = node_to_dot(node); dot << node_dot; diff --git a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp index b0ac75705..806c30d1c 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp @@ -23,7 +23,7 @@ add_buffers(rvsdg::Region * region, bool pass_through) add_buffers(structnode->subregion(n), pass_through); } } - else if (dynamic_cast(node)) + else if (dynamic_cast(node)) { if (jlm::rvsdg::is(node) || jlm::rvsdg::is(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 297310b1e..29f98c916 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -31,7 +31,7 @@ add_prints(rvsdg::Region * region) // auto po = hls::print_op::create(*node->input(1)->origin())[0]; // node->input(1)->divert_to(po); // } - if (dynamic_cast(node) && node->noutputs() == 1 + if (dynamic_cast(node) && node->noutputs() == 1 && jlm::rvsdg::is(node->output(0)->type()) && !jlm::rvsdg::is(node)) { @@ -123,7 +123,7 @@ convert_prints( auto bt = dynamic_cast(&val->type()); JLM_ASSERT(bt); auto op = llvm::zext_op(bt->nbits(), 64); - val = jlm::rvsdg::simple_node::create_normalized(region, op, { val })[0]; + val = jlm::rvsdg::SimpleNode::create_normalized(region, op, { val })[0]; } llvm::CallNode::Create(printf_local, functionType, { bc, val }); node->output(0)->divert_users(node->input(0)->origin()); diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 4dbb0eac8..93e04e0af 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -124,7 +124,7 @@ add_triggers(rvsdg::Region * region) throw jlm::util::error("Unexpected node type: " + node->GetOperation().debug_string()); } } - else if (auto sn = dynamic_cast(node)) + else if (auto sn = dynamic_cast(node)) { JLM_ASSERT(trigger != nullptr); if (is_constant(node)) diff --git a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp index 45d08eba9..fbc02d960 100644 --- a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp @@ -23,8 +23,8 @@ namespace jlm::hls class TraceAllocaUses { public: - std::vector load_nodes; - std::vector store_nodes; + std::vector load_nodes; + std::vector store_nodes; TraceAllocaUses(jlm::rvsdg::output * op) { diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index ae7c50069..02cf0b147 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -219,7 +219,7 @@ dump_xml(const rvsdg::Region * region, const std::string & file_name) void decouple_load( loop_node * loopNode, - jlm::rvsdg::simple_node * loadNode, + jlm::rvsdg::SimpleNode * loadNode, std::unordered_set & loop_slice) { // loadNode is always a part of loop_slice due to state edges @@ -377,7 +377,7 @@ process_loopnode(loop_node * loopNode) return true; } } - else if (auto simplenode = dynamic_cast(node)) + else if (auto simplenode = dynamic_cast(node)) { if (dynamic_cast(&simplenode->GetOperation())) { diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 0bb6d26bc..438be104a 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -33,10 +33,10 @@ distribute_constant(const rvsdg::SimpleOperation & op, rvsdg::simple_output * ou { // pass-through auto arg_replacement = dynamic_cast( - rvsdg::simple_node::create_normalized(ti->node()->subregion(), op, {})[0]); + rvsdg::SimpleNode::create_normalized(ti->node()->subregion(), op, {})[0]); ti->argument()->divert_users(arg_replacement); ti->output()->divert_users( - rvsdg::simple_node::create_normalized(out->region(), op, {})[0]); + rvsdg::SimpleNode::create_normalized(out->region(), op, {})[0]); distribute_constant(op, arg_replacement); arg->region()->RemoveResult(res->index()); arg->region()->RemoveArgument(arg->index()); @@ -57,7 +57,7 @@ distribute_constant(const rvsdg::SimpleOperation & op, rvsdg::simple_output * ou if (argument->nusers()) { auto arg_replacement = dynamic_cast( - rvsdg::simple_node::create_normalized(argument->region(), op, {})[0]); + rvsdg::SimpleNode::create_normalized(argument->region(), op, {})[0]); argument->divert_users(arg_replacement); distribute_constant(op, arg_replacement); } @@ -100,7 +100,7 @@ hls::distribute_constants(rvsdg::Region * region) throw util::error("Unexpected node type: " + node->GetOperation().debug_string()); } } - else if (auto sn = dynamic_cast(node)) + else if (auto sn = dynamic_cast(node)) { if (is_constant(node)) { diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 8d4a66fc8..5393959eb 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -245,7 +245,7 @@ instrument_ref( if (*dbt != *jlm::rvsdg::bittype::Create(64)) { jlm::llvm::zext_op op(dbt->nbits(), 64); - data = jlm::rvsdg::simple_node::create_normalized(data->region(), op, { data })[0]; + data = jlm::rvsdg::SimpleNode::create_normalized(data->region(), op, { data })[0]; } auto memstate = node->input(2)->origin(); auto callOp = jlm::llvm::CallNode::Create( diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index acaf0ba73..cb664d3ac 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -53,8 +53,8 @@ jlm::hls::route_request(rvsdg::Region * target, jlm::rvsdg::output * request) } } -jlm::rvsdg::simple_node * -replace_load(jlm::rvsdg::simple_node * orig, jlm::rvsdg::output * resp) +jlm::rvsdg::SimpleNode * +replace_load(jlm::rvsdg::SimpleNode * orig, jlm::rvsdg::output * resp) { auto addr = orig->input(0)->origin(); std::vector states; @@ -78,7 +78,7 @@ replace_load(jlm::rvsdg::simple_node * orig, jlm::rvsdg::output * resp) orig->output(i)->divert_users(nn->output(i)); } remove(orig); - return dynamic_cast(nn); + return dynamic_cast(nn); } const jlm::rvsdg::bitconstant_op * @@ -178,7 +178,7 @@ get_impport_function_name(jlm::rvsdg::input * input) void trace_function_calls( jlm::rvsdg::output * output, - std::vector & calls, + std::vector & calls, std::unordered_set & visited) { if (visited.count(output)) @@ -230,7 +230,7 @@ trace_function_calls( } } -jlm::rvsdg::simple_node * +jlm::rvsdg::SimpleNode * find_decouple_response( const jlm::llvm::lambda::node * lambda, const jlm::rvsdg::bitconstant_op * request_constant) @@ -246,7 +246,7 @@ find_decouple_response( } } JLM_ASSERT(response_function == nullptr); - std::vector reponse_calls; + std::vector reponse_calls; std::unordered_set visited; trace_function_calls(response_function, reponse_calls, visited); JLM_ASSERT(!reponse_calls.empty()); @@ -261,10 +261,10 @@ find_decouple_response( JLM_UNREACHABLE("No response found"); } -jlm::rvsdg::simple_node * +jlm::rvsdg::SimpleNode * replace_decouple( const jlm::llvm::lambda::node * lambda, - jlm::rvsdg::simple_node * decouple_request, + jlm::rvsdg::SimpleNode * decouple_request, jlm::rvsdg::output * resp) { JLM_ASSERT(dynamic_cast(&decouple_request->GetOperation())); @@ -299,11 +299,11 @@ replace_decouple( remove(decouple_response); auto nn = dynamic_cast(dload_out[0])->node(); - return dynamic_cast(nn); + return dynamic_cast(nn); } -jlm::rvsdg::simple_node * -replace_store(jlm::rvsdg::simple_node * orig) +jlm::rvsdg::SimpleNode * +replace_store(jlm::rvsdg::SimpleNode * orig) { auto addr = orig->input(0)->origin(); auto data = orig->input(1)->origin(); @@ -319,16 +319,16 @@ replace_store(jlm::rvsdg::simple_node * orig) orig->output(i)->divert_users(nn->output(i)); } remove(orig); - return dynamic_cast(nn); + return dynamic_cast(nn); } void gather_mem_nodes( jlm::rvsdg::Region * region, - std::vector & loadNodes, - std::vector & storeNodes, - std::vector & decoupleNodes, - std::unordered_set exclude) + std::vector & loadNodes, + std::vector & storeNodes, + std::vector & decoupleNodes, + std::unordered_set exclude) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -337,7 +337,7 @@ gather_mem_nodes( for (size_t n = 0; n < structnode->nsubregions(); n++) gather_mem_nodes(structnode->subregion(n), loadNodes, storeNodes, decoupleNodes, exclude); } - else if (auto simplenode = dynamic_cast(node)) + else if (auto simplenode = dynamic_cast(node)) { if (exclude.find(simplenode) != exclude.end()) { @@ -373,9 +373,9 @@ gather_mem_nodes( void TracePointer( jlm::rvsdg::output * output, - std::vector & loadNodes, - std::vector & storeNodes, - std::vector & decoupleNodes, + std::vector & loadNodes, + std::vector & storeNodes, + std::vector & decoupleNodes, std::unordered_set & visited) { if (!dynamic_cast(&output->type())) @@ -596,7 +596,7 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) auto requestTypePtr = get_mem_req_type(jlm::rvsdg::bittype::Create(64), false); auto requestTypePtrWrite = get_mem_req_type(jlm::rvsdg::bittype::Create(64), true); - std::unordered_set accountedNodes; + std::unordered_set accountedNodes; for (auto & portNode : portNodes) { newArgumentTypes.push_back(responseTypePtr); @@ -612,9 +612,9 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) accountedNodes.insert(std::get<1>(portNode).begin(), std::get<1>(portNode).end()); accountedNodes.insert(std::get<2>(portNode).begin(), std::get<2>(portNode).end()); } - std::vector unknownLoadNodes; - std::vector unknownStoreNodes; - std::vector unknownDecoupledNodes; + std::vector unknownLoadNodes; + std::vector unknownStoreNodes; + std::vector unknownDecoupledNodes; gather_mem_nodes( root, unknownLoadNodes, @@ -756,15 +756,15 @@ jlm::hls::ConnectRequestResponseMemPorts( const jlm::llvm::lambda::node * lambda, size_t argumentIndex, rvsdg::SubstitutionMap & smap, - const std::vector & originalLoadNodes, - const std::vector & originalStoreNodes, - const std::vector & originalDecoupledNodes) + const std::vector & originalLoadNodes, + const std::vector & originalStoreNodes, + const std::vector & originalDecoupledNodes) { // // We have the memory operations from the original lambda and need to lookup the corresponding // nodes in the new lambda // - std::vector loadNodes; + std::vector loadNodes; std::vector> loadTypes; for (auto loadNode : originalLoadNodes) { @@ -775,14 +775,14 @@ jlm::hls::ConnectRequestResponseMemPorts( &loadOutput->node()->GetOperation()); loadTypes.push_back(loadOp->GetLoadedType()); } - std::vector storeNodes; + std::vector storeNodes; for (auto storeNode : originalStoreNodes) { JLM_ASSERT(smap.contains(*storeNode->output(0))); auto storeOutput = dynamic_cast(smap.lookup(storeNode->output(0))); storeNodes.push_back(storeOutput->node()); } - std::vector decoupledNodes; + std::vector decoupledNodes; for (auto decoupledNode : originalDecoupledNodes) { JLM_ASSERT(smap.contains(*decoupledNode->output(0))); @@ -855,10 +855,10 @@ jlm::hls::ConnectRequestResponseMemPorts( return mem_req_op::create(loadAddresses, loadTypes, storeOperands, lambdaRegion)[0]; } -jlm::rvsdg::simple_node * +jlm::rvsdg::SimpleNode * jlm::hls::ReplaceLoad( rvsdg::SubstitutionMap & smap, - const jlm::rvsdg::simple_node * originalLoad, + const jlm::rvsdg::SimpleNode * originalLoad, jlm::rvsdg::output * response) { // We have the load from the original lambda since it is needed to update the smap @@ -891,11 +891,11 @@ jlm::hls::ReplaceLoad( replacedLoad->output(i)->divert_users(newLoad->output(i)); } remove(replacedLoad); - return dynamic_cast(newLoad); + return dynamic_cast(newLoad); } -jlm::rvsdg::simple_node * -jlm::hls::ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_node * originalStore) +jlm::rvsdg::SimpleNode * +jlm::hls::ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::SimpleNode * originalStore) { // We have the store from the original lambda since it is needed to update the smap // We need the store in the new lambda such that we can replace it with a store node with explicit @@ -918,14 +918,14 @@ jlm::hls::ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_n replacedStore->output(i)->divert_users(newStore->output(i)); } remove(replacedStore); - return dynamic_cast(newStore); + return dynamic_cast(newStore); } -jlm::rvsdg::simple_node * +jlm::rvsdg::SimpleNode * ReplaceDecouple( jlm::rvsdg::SubstitutionMap & smap, const jlm::llvm::lambda::node * lambda, - jlm::rvsdg::simple_node * originalDecoupleRequest, + jlm::rvsdg::SimpleNode * originalDecoupleRequest, jlm::rvsdg::output * response) { // We have the load from the original lambda since it is needed to update the smap @@ -966,5 +966,5 @@ ReplaceDecouple( remove(decoupledResponse); auto nodeOutput = dynamic_cast(decoupledLoadOutput[0])->node(); - return dynamic_cast(nodeOutput); + return dynamic_cast(nodeOutput); } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp index a8798afee..7cad9fc67 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.hpp @@ -13,9 +13,9 @@ namespace jlm::hls { typedef std::vector, - std::vector, - std::vector>> + std::vector, + std::vector, + std::vector>> port_load_store_decouple; /** @@ -45,18 +45,18 @@ ConnectRequestResponseMemPorts( const llvm::lambda::node * lambda, size_t argumentIndex, rvsdg::SubstitutionMap & smap, - const std::vector & originalLoadNodes, - const std::vector & originalStoreNodes, - const std::vector & originalDecoupledNodes); + const std::vector & originalLoadNodes, + const std::vector & originalStoreNodes, + const std::vector & originalDecoupledNodes); -jlm::rvsdg::simple_node * +jlm::rvsdg::SimpleNode * ReplaceLoad( rvsdg::SubstitutionMap & smap, - const jlm::rvsdg::simple_node * originalLoad, + const jlm::rvsdg::SimpleNode * originalLoad, jlm::rvsdg::output * response); -jlm::rvsdg::simple_node * -ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::simple_node * originalStore); +jlm::rvsdg::SimpleNode * +ReplaceStore(rvsdg::SubstitutionMap & smap, const jlm::rvsdg::SimpleNode * originalStore); jlm::rvsdg::output * route_response(rvsdg::Region * target, jlm::rvsdg::output * response); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index 7f2b49917..c23c7b7b2 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -38,8 +38,8 @@ dump_xml(const jlm::rvsdg::Region * region, const std::string & file_name) void find_load_store( jlm::rvsdg::output * op, - std::vector & load_nodes, - std::vector & store_nodes, + std::vector & load_nodes, + std::vector & store_nodes, std::unordered_set & visited) { if (!dynamic_cast(&op->type())) @@ -181,7 +181,7 @@ jlm::rvsdg::output * separate_load_edge( jlm::rvsdg::output * mem_edge, jlm::rvsdg::output * addr_edge, - jlm::rvsdg::simple_node ** load, + jlm::rvsdg::SimpleNode ** load, jlm::rvsdg::output ** new_mem_edge, std::vector & store_addresses, std::vector & store_dequeues, @@ -467,8 +467,8 @@ process_loops(jlm::rvsdg::output * state_edge) JLM_ASSERT(mem_edge_after_loop->nusers() == 1); auto common_user = *mem_edge_after_loop->begin(); - std::vector load_nodes; - std::vector store_nodes; + std::vector load_nodes; + std::vector store_nodes; std::unordered_set visited; // this is a hack to keep search within the loop visited.insert(mem_edge_after_loop); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index c920be727..2d6ab9fa1 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -67,7 +67,7 @@ GetMemoryStateResult(const llvm::lambda::node & lambda) } void -gather_mem_nodes(rvsdg::Region * region, std::vector & mem_nodes) +gather_mem_nodes(rvsdg::Region * region, std::vector & mem_nodes) { for (auto & node : jlm::rvsdg::topdown_traverser(region)) { @@ -76,7 +76,7 @@ gather_mem_nodes(rvsdg::Region * region, std::vector for (size_t n = 0; n < structnode->nsubregions(); n++) gather_mem_nodes(structnode->subregion(n), mem_nodes); } - else if (auto simplenode = dynamic_cast(node)) + else if (auto simplenode = dynamic_cast(node)) { if (dynamic_cast(&simplenode->GetOperation())) { @@ -137,7 +137,7 @@ mem_sep_independent(rvsdg::Region * region) return; } auto state_user = *state_arg->begin(); - std::vector mem_nodes; + std::vector mem_nodes; gather_mem_nodes(lambda_region, mem_nodes); auto entry_states = jlm::llvm::LambdaEntryMemoryStateSplitOperation::Create(*state_arg, 1 + mem_nodes.size()); @@ -169,9 +169,9 @@ rvsdg::RegionResult * trace_edge( jlm::rvsdg::output * common_edge, jlm::rvsdg::output * new_edge, - std::vector & load_nodes, - const std::vector & store_nodes, - std::vector & decouple_nodes) + std::vector & load_nodes, + const std::vector & store_nodes, + std::vector & decouple_nodes) { // follows along common edge and routes new edge through the same regions // redirects the supplied loads, stores and decouples to the new edge diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp index d3d0a7991..675b3ffbf 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp @@ -34,7 +34,7 @@ memstate_conv(rvsdg::Region * region) for (size_t n = 0; n < structnode->nsubregions(); n++) memstate_conv(structnode->subregion(n)); } - else if (auto simplenode = dynamic_cast(node)) + else if (auto simplenode = dynamic_cast(node)) { if (dynamic_cast( &simplenode->GetOperation()) diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index c9a63478f..dbe680b55 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -114,7 +114,7 @@ fix_match_inversion(rvsdg::GammaNode * old_gamma) { { 0, match->alternative(1) }, { 1, match->alternative(0) } }, default_alternative, match->nalternatives()); - auto new_match = rvsdg::simple_node::create_normalized( + auto new_match = rvsdg::SimpleNode::create_normalized( no->region(), op, { no->node()->input(0)->origin() })[0]; diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index c7f69d296..6291c5804 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -44,7 +44,7 @@ remove_redundant_buf(rvsdg::Region * region) remove_redundant_buf(structnode->subregion(n)); } } - else if (dynamic_cast(node)) + else if (dynamic_cast(node)) { if (auto buf = dynamic_cast(&node->GetOperation())) { diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index 80c30b844..a0fe61ce3 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -44,7 +44,7 @@ remove_unused_state(rvsdg::Region * region, bool can_remove_arguments) // exit will come before entry for (auto & node : jlm::rvsdg::bottomup_traverser(region)) { - if (auto simplenode = dynamic_cast(node)) + if (auto simplenode = dynamic_cast(node)) { if (dynamic_cast(&node->GetOperation())) { diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 20e9534d2..16373bf09 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -164,7 +164,7 @@ inline_calls(rvsdg::Region * region) JLM_ASSERT(rvsdg::is(so->node())); auto ln = dynamic_cast(traced)->node(); llvm::inlineCall( - dynamic_cast(node), + dynamic_cast(node), dynamic_cast(ln)); // restart for this region inline_calls(region); @@ -209,7 +209,7 @@ convert_alloca(rvsdg::Region * region) else { llvm::ConstantAggregateZero cop(po->ValueType()); - cout = jlm::rvsdg::simple_node::create_normalized(db->subregion(), cop, {})[0]; + cout = jlm::rvsdg::SimpleNode::create_normalized(db->subregion(), cop, {})[0]; } auto delta = db->finalize(cout); jlm::llvm::GraphExport::Create(*delta, delta_name); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 8306c70f5..56ce261b1 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -64,7 +64,7 @@ class branch_op final : public rvsdg::SimpleOperation auto region = predicate.region(); branch_op op(ctl->nalternatives(), value.Type(), loop); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &predicate, &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &predicate, &value }); } bool loop; // only used for dot output @@ -153,7 +153,7 @@ class fork_op final : public rvsdg::SimpleOperation auto region = value.region(); fork_op op(nalternatives, value.Type(), isConstant); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &value }); } /** @@ -209,7 +209,7 @@ class merge_op final : public rvsdg::SimpleOperation auto region = alternatives.front()->region(); merge_op op(alternatives.size(), alternatives.front()->Type()); - return jlm::rvsdg::simple_node::create_normalized(region, op, alternatives); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, alternatives); } }; @@ -270,7 +270,7 @@ class mux_op final : public rvsdg::SimpleOperation operands.push_back(&predicate); operands.insert(operands.end(), alternatives.begin(), alternatives.end()); mux_op op(alternatives.size(), alternatives.front()->Type(), discarding, loop); - return jlm::rvsdg::simple_node::create_normalized(region, op, operands); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, operands); } bool discarding; @@ -320,7 +320,7 @@ class sink_op final : public rvsdg::SimpleOperation { auto region = value.region(); sink_op op(value.Type()); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &value }); } }; @@ -361,7 +361,7 @@ class predicate_buffer_op final : public rvsdg::SimpleOperation if (!ctl) throw util::error("Predicate needs to be a control type."); predicate_buffer_op op(ctl); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &predicate }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &predicate }); } }; @@ -404,7 +404,7 @@ class loop_constant_buffer_op final : public rvsdg::SimpleOperation if (!ctl) throw util::error("Predicate needs to be a control type."); loop_constant_buffer_op op(ctl, value.Type()); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &predicate, &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &predicate, &value }); } }; @@ -448,7 +448,7 @@ class buffer_op final : public rvsdg::SimpleOperation { auto region = value.region(); buffer_op op(value.Type(), capacity, pass_through); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &value }); } size_t capacity; @@ -525,7 +525,7 @@ class trigger_op final : public rvsdg::SimpleOperation auto region = value.region(); trigger_op op(value.Type()); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &tg, &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &tg, &value }); } }; @@ -580,7 +580,7 @@ class print_op final : public rvsdg::SimpleOperation auto region = value.region(); print_op op(value.Type()); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &value }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &value }); } }; @@ -945,7 +945,7 @@ class load_op final : public rvsdg::SimpleOperation inputs.push_back(&addr); inputs.insert(inputs.end(), states.begin(), states.end()); inputs.push_back(&load_result); - return jlm::rvsdg::simple_node::create_normalized(region, op, inputs); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, inputs); } [[nodiscard]] const llvm::PointerType & @@ -1027,7 +1027,7 @@ class addr_queue_op final : public rvsdg::SimpleOperation auto region = check.region(); auto pointerType = std::dynamic_pointer_cast(check.Type()); addr_queue_op op(pointerType, capacity, combinatorial); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &check, &enq, &deq })[0]; + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &check, &enq, &deq })[0]; } bool combinatorial; @@ -1083,7 +1083,7 @@ class state_gate_op final : public rvsdg::SimpleOperation std::vector inputs; inputs.push_back(&addr); inputs.insert(inputs.end(), states.begin(), states.end()); - return jlm::rvsdg::simple_node::create_normalized(region, op, inputs); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, inputs); } }; @@ -1140,7 +1140,7 @@ class decoupled_load_op final : public rvsdg::SimpleOperation std::vector inputs; inputs.push_back(&addr); inputs.push_back(&load_result); - return jlm::rvsdg::simple_node::create_normalized(load_result.region(), op, inputs); + return jlm::rvsdg::SimpleNode::create_normalized(load_result.region(), op, inputs); } [[nodiscard]] const llvm::PointerType & @@ -1224,7 +1224,7 @@ class mem_resp_op final : public rvsdg::SimpleOperation // auto result_type = dynamic_cast(&result.type()); // JLM_ASSERT(result_type && result_type->nbits()==64); mem_resp_op op(output_types); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &result }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &result }); } }; @@ -1335,7 +1335,7 @@ class mem_req_op final : public rvsdg::SimpleOperation mem_req_op op(loadTypes, storeTypes); std::vector operands(load_operands); operands.insert(operands.end(), store_operands.begin(), store_operands.end()); - return jlm::rvsdg::simple_node::create_normalized(region, op, operands); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, operands); } size_t @@ -1428,7 +1428,7 @@ class store_op final : public rvsdg::SimpleOperation inputs.push_back(&addr); inputs.push_back(&value); inputs.insert(inputs.end(), states.begin(), states.end()); - return rvsdg::simple_node::create_normalized(value.region(), op, inputs); + return rvsdg::SimpleNode::create_normalized(value.region(), op, inputs); } [[nodiscard]] const llvm::PointerType & @@ -1486,7 +1486,7 @@ class local_mem_op final : public rvsdg::SimpleOperation create(std::shared_ptr at, rvsdg::Region * region) { local_mem_op op(std::move(at)); - return jlm::rvsdg::simple_node::create_normalized(region, op, {}); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, {}); } }; @@ -1534,7 +1534,7 @@ class local_mem_resp_op final : public rvsdg::SimpleOperation auto region = mem.region(); auto at = std::dynamic_pointer_cast(mem.Type()); local_mem_resp_op op(at, resp_count); - return jlm::rvsdg::simple_node::create_normalized(region, op, { &mem }); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, { &mem }); } }; @@ -1606,7 +1606,7 @@ class local_load_op final : public rvsdg::SimpleOperation inputs.push_back(&index); inputs.insert(inputs.end(), states.begin(), states.end()); inputs.push_back(&load_result); - return jlm::rvsdg::simple_node::create_normalized(region, op, inputs); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, inputs); } [[nodiscard]] std::shared_ptr @@ -1683,7 +1683,7 @@ class local_store_op final : public rvsdg::SimpleOperation inputs.push_back(&index); inputs.push_back(&value); inputs.insert(inputs.end(), states.begin(), states.end()); - return jlm::rvsdg::simple_node::create_normalized(region, op, inputs); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, inputs); } [[nodiscard]] const jlm::rvsdg::ValueType & @@ -1761,7 +1761,7 @@ class local_mem_req_op final : public rvsdg::SimpleOperation std::vector operands(1, &mem); operands.insert(operands.end(), load_operands.begin(), load_operands.end()); operands.insert(operands.end(), store_operands.begin(), store_operands.end()); - return jlm::rvsdg::simple_node::create_normalized(region, op, operands); + return jlm::rvsdg::SimpleNode::create_normalized(region, op, operands); } }; diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index d8a1f7a09..a4b7e53f6 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -427,7 +427,7 @@ mark(const rvsdg::StructuralNode * node, cnectx & ctx) } static void -mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) +mark(const jlm::rvsdg::SimpleNode * node, cnectx & ctx) { if (node->ninputs() == 0) { @@ -470,7 +470,7 @@ mark(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto simple = dynamic_cast(node)) + if (auto simple = dynamic_cast(node)) mark(simple, ctx); else mark(static_cast(node), ctx); @@ -593,7 +593,7 @@ divert(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto simple = dynamic_cast(node)) + if (auto simple = dynamic_cast(node)) divert_outputs(simple, ctx); else divert(static_cast(node), ctx); diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index e0a5d3f87..6c9f276c1 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -236,7 +236,7 @@ structural_node_to_dot(rvsdg::StructuralNode * structuralNode) } std::string -simple_node_to_dot(jlm::rvsdg::simple_node * simpleNode) +simple_node_to_dot(jlm::rvsdg::SimpleNode * simpleNode) { auto SPACER = " \n"; auto name = get_dot_name(simpleNode); @@ -341,7 +341,7 @@ region_to_dot(rvsdg::Region * region) // nodes for (auto node : jlm::rvsdg::topdown_traverser(region)) { - if (auto simpleNode = dynamic_cast(node)) + if (auto simpleNode = dynamic_cast(node)) { auto node_dot = simple_node_to_dot(simpleNode); dot << node_dot; diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 752d2a41e..10f6b4789 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -482,7 +482,7 @@ ConvertSelect( auto op = rvsdg::match_op(1, { { 1, 1 } }, 0, 2); auto p = variableMap.lookup(threeAddressCode.operand(0)); - auto predicate = rvsdg::simple_node::create_normalized(®ion, op, { p })[0]; + auto predicate = rvsdg::SimpleNode::create_normalized(®ion, op, { p })[0]; auto gamma = rvsdg::GammaNode::create(predicate, 2); auto ev1 = gamma->AddEntryVar(variableMap.lookup(threeAddressCode.operand(2))); @@ -571,7 +571,7 @@ ConvertThreeAddressCode( auto & simpleOperation = static_cast(threeAddressCode.operation()); - auto results = rvsdg::simple_node::create_normalized(®ion, simpleOperation, operands); + auto results = rvsdg::SimpleNode::create_normalized(®ion, simpleOperation, operands); JLM_ASSERT(results.size() == threeAddressCode.nresults()); for (size_t n = 0; n < threeAddressCode.nresults(); n++) diff --git a/jlm/llvm/ir/operators/GetElementPtr.hpp b/jlm/llvm/ir/operators/GetElementPtr.hpp index bfda9dd3c..8fc19d0cd 100644 --- a/jlm/llvm/ir/operators/GetElementPtr.hpp +++ b/jlm/llvm/ir/operators/GetElementPtr.hpp @@ -111,7 +111,7 @@ class GetElementPtrOperation final : public rvsdg::SimpleOperation std::vector operands(1, baseAddress); operands.insert(operands.end(), offsets.begin(), offsets.end()); - return rvsdg::simple_node::create_normalized(baseAddress->region(), operation, operands)[0]; + return rvsdg::SimpleNode::create_normalized(baseAddress->region(), operation, operands)[0]; } private: diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index aba6b444a..ca8b64ca1 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -15,7 +15,7 @@ namespace jlm::llvm const LoadOperation & LoadNode::GetOperation() const noexcept { - return *util::AssertedCast(&simple_node::GetOperation()); + return *util::AssertedCast(&SimpleNode::GetOperation()); } LoadNonVolatileOperation::~LoadNonVolatileOperation() noexcept = default; @@ -51,7 +51,7 @@ LoadNonVolatileOperation::NumMemoryStates() const noexcept const LoadNonVolatileOperation & LoadNonVolatileNode::GetOperation() const noexcept { - return *util::AssertedCast(&simple_node::GetOperation()); + return *util::AssertedCast(&SimpleNode::GetOperation()); } [[nodiscard]] LoadNode::MemoryStateInputRange diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index 129e4fde1..1d5f9d197 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -257,14 +257,14 @@ class LoadVolatileOperation final : public LoadOperation * @see LoadVolatileNode * @see LoadNonVolatileNode */ -class LoadNode : public rvsdg::simple_node +class LoadNode : public rvsdg::SimpleNode { protected: LoadNode( rvsdg::Region & region, const LoadOperation & operation, const std::vector & operands) - : simple_node(®ion, operation, operands) + : SimpleNode(®ion, operation, operands) {} public: diff --git a/jlm/llvm/ir/operators/MemCpy.hpp b/jlm/llvm/ir/operators/MemCpy.hpp index d7b431193..96c63bb3e 100644 --- a/jlm/llvm/ir/operators/MemCpy.hpp +++ b/jlm/llvm/ir/operators/MemCpy.hpp @@ -115,7 +115,7 @@ class MemCpyNonVolatileOperation final : public MemCpyOperation operands.insert(operands.end(), memoryStates.begin(), memoryStates.end()); MemCpyNonVolatileOperation operation(length->Type(), memoryStates.size()); - return rvsdg::simple_node::create_normalized(destination->region(), operation, operands); + return rvsdg::SimpleNode::create_normalized(destination->region(), operation, operands); } private: @@ -184,7 +184,7 @@ class MemCpyVolatileOperation final : public MemCpyOperation return tac::create(operation, operands); } - static rvsdg::simple_node & + static rvsdg::SimpleNode & CreateNode( rvsdg::output & destination, rvsdg::output & source, @@ -196,7 +196,7 @@ class MemCpyVolatileOperation final : public MemCpyOperation operands.insert(operands.end(), memoryStates.begin(), memoryStates.end()); MemCpyVolatileOperation operation(length.Type(), memoryStates.size()); - return *rvsdg::simple_node::create(destination.region(), operation, operands); + return *rvsdg::SimpleNode::create(destination.region(), operation, operands); } private: diff --git a/jlm/llvm/ir/operators/MemoryStateOperations.hpp b/jlm/llvm/ir/operators/MemoryStateOperations.hpp index 597b6ed03..a6bcd8d63 100644 --- a/jlm/llvm/ir/operators/MemoryStateOperations.hpp +++ b/jlm/llvm/ir/operators/MemoryStateOperations.hpp @@ -58,7 +58,7 @@ class MemoryStateMergeOperation final : public MemoryStateOperation MemoryStateMergeOperation operation(operands.size()); auto region = operands.front()->region(); - return rvsdg::simple_node::create_normalized(region, operation, operands)[0]; + return rvsdg::SimpleNode::create_normalized(region, operation, operands)[0]; } static std::unique_ptr @@ -103,7 +103,7 @@ class MemoryStateSplitOperation final : public MemoryStateOperation throw util::error("Insufficient number of results."); MemoryStateSplitOperation operation(numResults); - return rvsdg::simple_node::create_normalized(operand.region(), operation, { &operand }); + return rvsdg::SimpleNode::create_normalized(operand.region(), operation, { &operand }); } }; @@ -140,7 +140,7 @@ class LambdaEntryMemoryStateSplitOperation final : public MemoryStateOperation { auto region = output.region(); LambdaEntryMemoryStateSplitOperation operation(numResults); - return rvsdg::simple_node::create_normalized(region, operation, { &output }); + return rvsdg::SimpleNode::create_normalized(region, operation, { &output }); } }; @@ -176,7 +176,7 @@ class LambdaExitMemoryStateMergeOperation final : public MemoryStateOperation Create(rvsdg::Region & region, const std::vector & operands) { LambdaExitMemoryStateMergeOperation operation(operands.size()); - return *rvsdg::simple_node::create_normalized(®ion, operation, operands)[0]; + return *rvsdg::SimpleNode::create_normalized(®ion, operation, operands)[0]; } }; @@ -212,7 +212,7 @@ class CallEntryMemoryStateMergeOperation final : public MemoryStateOperation Create(rvsdg::Region & region, const std::vector & operands) { CallEntryMemoryStateMergeOperation operation(operands.size()); - return *rvsdg::simple_node::create_normalized(®ion, operation, operands)[0]; + return *rvsdg::SimpleNode::create_normalized(®ion, operation, operands)[0]; } }; @@ -249,7 +249,7 @@ class CallExitMemoryStateSplitOperation final : public MemoryStateOperation { auto region = output.region(); CallExitMemoryStateSplitOperation operation(numResults); - return rvsdg::simple_node::create_normalized(region, operation, { &output }); + return rvsdg::SimpleNode::create_normalized(region, operation, { &output }); } }; diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 62265e253..53cea7de9 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -14,7 +14,7 @@ namespace jlm::llvm const StoreOperation & StoreNode::GetOperation() const noexcept { - return *util::AssertedCast(&simple_node::GetOperation()); + return *util::AssertedCast(&SimpleNode::GetOperation()); } StoreNonVolatileOperation::~StoreNonVolatileOperation() noexcept = default; diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index 1160b825d..fef0851c7 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -214,14 +214,14 @@ class StoreNonVolatileOperation final : public StoreOperation * @see StoreVolatileNode * @see StoreNonVolatileNode */ -class StoreNode : public rvsdg::simple_node +class StoreNode : public rvsdg::SimpleNode { protected: StoreNode( rvsdg::Region & region, const StoreOperation & operation, const std::vector & operands) - : simple_node(®ion, operation, operands) + : SimpleNode(®ion, operation, operands) {} public: diff --git a/jlm/llvm/ir/operators/alloca.hpp b/jlm/llvm/ir/operators/alloca.hpp index 8236ddc43..590fb5e8e 100644 --- a/jlm/llvm/ir/operators/alloca.hpp +++ b/jlm/llvm/ir/operators/alloca.hpp @@ -94,7 +94,7 @@ class alloca_op final : public rvsdg::SimpleOperation throw jlm::util::error("expected bits type."); alloca_op op(std::move(allocatedType), std::move(bt), alignment); - return rvsdg::simple_node::create_normalized(size->region(), op, { size }); + return rvsdg::SimpleNode::create_normalized(size->region(), op, { size }); } private: diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index 2bef541d4..f3214509b 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -285,21 +285,21 @@ class CallTypeClassifier final /** \brief Call node * */ -class CallNode final : public jlm::rvsdg::simple_node +class CallNode final : public jlm::rvsdg::SimpleNode { private: CallNode( rvsdg::Region & region, const CallOperation & operation, const std::vector & operands) - : simple_node(®ion, operation, operands) + : SimpleNode(®ion, operation, operands) {} public: [[nodiscard]] const CallOperation & GetOperation() const noexcept override { - return *jlm::util::AssertedCast(&simple_node::GetOperation()); + return *jlm::util::AssertedCast(&SimpleNode::GetOperation()); } /** @@ -418,11 +418,11 @@ class CallNode final : public jlm::rvsdg::simple_node * @see GetMemoryStateInput() * @see GetMemoryStateExitSplit() */ - [[nodiscard]] static rvsdg::simple_node * + [[nodiscard]] static rvsdg::SimpleNode * GetMemoryStateEntryMerge(const CallNode & callNode) noexcept { auto node = rvsdg::output::GetNode(*callNode.GetMemoryStateInput()->origin()); - return is(node) ? dynamic_cast(node) + return is(node) ? dynamic_cast(node) : nullptr; } @@ -435,7 +435,7 @@ class CallNode final : public jlm::rvsdg::simple_node * @see GetMemoryStateOutput() * @see GetMemoryStateEntryMerge() */ - [[nodiscard]] static rvsdg::simple_node * + [[nodiscard]] static rvsdg::SimpleNode * GetMemoryStateExitSplit(const CallNode & callNode) noexcept { // If a memory state exit split node is present, then we would expect the node to be the only @@ -444,7 +444,7 @@ class CallNode final : public jlm::rvsdg::simple_node return nullptr; auto node = rvsdg::node_input::GetNode(**callNode.GetMemoryStateOutput()->begin()); - return is(node) ? dynamic_cast(node) + return is(node) ? dynamic_cast(node) : nullptr; } diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index f0471ab59..f0dbabc58 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -134,17 +134,17 @@ node::GetMemoryStateRegionResult() const noexcept return *result; } -rvsdg::simple_node * +rvsdg::SimpleNode * node::GetMemoryStateExitMerge(const lambda::node & lambdaNode) noexcept { auto & result = lambdaNode.GetMemoryStateRegionResult(); auto node = rvsdg::output::GetNode(*result.origin()); - return is(node) ? dynamic_cast(node) + return is(node) ? dynamic_cast(node) : nullptr; } -rvsdg::simple_node * +rvsdg::SimpleNode * node::GetMemoryStateEntrySplit(const lambda::node & lambdaNode) noexcept { auto & argument = lambdaNode.GetMemoryStateRegionArgument(); @@ -155,7 +155,7 @@ node::GetMemoryStateEntrySplit(const lambda::node & lambdaNode) noexcept return nullptr; auto node = rvsdg::node_input::GetNode(**argument.begin()); - return is(node) ? dynamic_cast(node) + return is(node) ? dynamic_cast(node) : nullptr; } diff --git a/jlm/llvm/ir/operators/lambda.hpp b/jlm/llvm/ir/operators/lambda.hpp index b9a3a579b..67b939942 100644 --- a/jlm/llvm/ir/operators/lambda.hpp +++ b/jlm/llvm/ir/operators/lambda.hpp @@ -342,7 +342,7 @@ class node final : public rvsdg::StructuralNode * * @see GetMemoryStateExitMerge() */ - static rvsdg::simple_node * + static rvsdg::SimpleNode * GetMemoryStateEntrySplit(const lambda::node & lambdaNode) noexcept; /** @@ -354,7 +354,7 @@ class node final : public rvsdg::StructuralNode * * @see GetMemoryStateEntrySplit() */ - [[nodiscard]] static rvsdg::simple_node * + [[nodiscard]] static rvsdg::SimpleNode * GetMemoryStateExitMerge(const lambda::node & lambdaNode) noexcept; /** diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index fee9c06b7..a5ba66b58 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -473,7 +473,7 @@ class ConstantPointerNullOperation final : public rvsdg::SimpleOperation Create(rvsdg::Region * region, std::shared_ptr type) { ConstantPointerNullOperation operation(CheckAndExtractType(type)); - return jlm::rvsdg::simple_node::create_normalized(region, operation, {})[0]; + return jlm::rvsdg::SimpleNode::create_normalized(region, operation, {})[0]; } private: @@ -563,7 +563,7 @@ class bits2ptr_op final : public jlm::rvsdg::unary_op throw jlm::util::error("expected pointer type."); bits2ptr_op op(ot, pt); - return jlm::rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; + return jlm::rvsdg::SimpleNode::create_normalized(operand->region(), op, { operand })[0]; } }; @@ -694,10 +694,7 @@ class ConstantDataArray final : public rvsdg::SimpleOperation } ConstantDataArray operation(std::move(valueType), elements.size()); - return jlm::rvsdg::simple_node::create_normalized( - elements[0]->region(), - operation, - elements)[0]; + return jlm::rvsdg::SimpleNode::create_normalized(elements[0]->region(), operation, elements)[0]; } }; @@ -848,7 +845,7 @@ class zext_op final : public jlm::rvsdg::unary_op auto resultBitType = CheckAndExtractBitType(resultType); zext_op operation(std::move(operandBitType), std::move(resultBitType)); - return *rvsdg::simple_node::create_normalized(operand.region(), operation, { &operand })[0]; + return *rvsdg::SimpleNode::create_normalized(operand.region(), operation, { &operand })[0]; } private: @@ -1042,7 +1039,7 @@ class UndefValueOperation final : public rvsdg::SimpleOperation Create(rvsdg::Region & region, std::shared_ptr type) { UndefValueOperation operation(std::move(type)); - return jlm::rvsdg::simple_node::create_normalized(®ion, operation, {})[0]; + return jlm::rvsdg::SimpleNode::create_normalized(®ion, operation, {})[0]; } static std::unique_ptr @@ -1125,7 +1122,7 @@ class PoisonValueOperation final : public rvsdg::SimpleOperation auto valueType = CheckAndConvertType(type); PoisonValueOperation operation(std::move(valueType)); - return jlm::rvsdg::simple_node::create_normalized(region, operation, {})[0]; + return jlm::rvsdg::SimpleNode::create_normalized(region, operation, {})[0]; } private: @@ -1478,7 +1475,7 @@ class valist_op final : public rvsdg::SimpleOperation operandTypes.emplace_back(operand->Type()); valist_op operation(std::move(operandTypes)); - return jlm::rvsdg::simple_node::create_normalized(®ion, operation, operands)[0]; + return jlm::rvsdg::SimpleNode::create_normalized(®ion, operation, operands)[0]; } }; @@ -1544,7 +1541,7 @@ class bitcast_op final : public jlm::rvsdg::unary_op auto pair = check_types(operand->Type(), rtype); bitcast_op op(pair.first, pair.second); - return jlm::rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; + return jlm::rvsdg::SimpleNode::create_normalized(operand->region(), op, { operand })[0]; } private: @@ -1613,7 +1610,7 @@ class ConstantStruct final : public rvsdg::SimpleOperation auto structType = CheckAndExtractStructType(std::move(resultType)); ConstantStruct operation(std::move(structType)); - return *rvsdg::simple_node::create_normalized(®ion, operation, operands)[0]; + return *rvsdg::SimpleNode::create_normalized(®ion, operation, operands)[0]; } private: @@ -1723,7 +1720,7 @@ class trunc_op final : public jlm::rvsdg::unary_op throw jlm::util::error("expected bits type."); trunc_op op(std::move(ot), jlm::rvsdg::bittype::Create(ndstbits)); - return jlm::rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; + return jlm::rvsdg::SimpleNode::create_normalized(operand->region(), op, { operand })[0]; } }; @@ -1907,7 +1904,7 @@ class ConstantArray final : public rvsdg::SimpleOperation } ConstantArray operation(valueType, operands.size()); - return rvsdg::simple_node::create_normalized(operands[0]->region(), operation, operands)[0]; + return rvsdg::SimpleNode::create_normalized(operands[0]->region(), operation, operands)[0]; } }; @@ -1948,7 +1945,7 @@ class ConstantAggregateZero final : public rvsdg::SimpleOperation Create(rvsdg::Region & region, std::shared_ptr type) { ConstantAggregateZero operation(std::move(type)); - return jlm::rvsdg::simple_node::create_normalized(®ion, operation, {})[0]; + return jlm::rvsdg::SimpleNode::create_normalized(®ion, operation, {})[0]; } }; @@ -2508,7 +2505,7 @@ class malloc_op final : public rvsdg::SimpleOperation throw jlm::util::error("expected bits type."); malloc_op op(std::move(bt)); - return jlm::rvsdg::simple_node::create_normalized(size->region(), op, { size }); + return jlm::rvsdg::SimpleNode::create_normalized(size->region(), op, { size }); } }; @@ -2562,7 +2559,7 @@ class FreeOperation final : public rvsdg::SimpleOperation operands.push_back(iOState); FreeOperation operation(memoryStates.size()); - return jlm::rvsdg::simple_node::create_normalized(pointer->region(), operation, operands); + return jlm::rvsdg::SimpleNode::create_normalized(pointer->region(), operation, operands); } private: diff --git a/jlm/llvm/ir/operators/sext.cpp b/jlm/llvm/ir/operators/sext.cpp index d19651345..1ea23141a 100644 --- a/jlm/llvm/ir/operators/sext.cpp +++ b/jlm/llvm/ir/operators/sext.cpp @@ -44,7 +44,7 @@ perform_bitunary_reduction(const sext_op & op, rvsdg::output * operand) auto uop = static_cast(&unary->GetOperation()); auto output = sext_op::create(op.ndstbits(), unary->input(0)->origin()); - return rvsdg::simple_node::create_normalized(region, *uop->create(op.ndstbits()), { output })[0]; + return rvsdg::SimpleNode::create_normalized(region, *uop->create(op.ndstbits()), { output })[0]; } static rvsdg::output * @@ -59,10 +59,7 @@ perform_bitbinary_reduction(const sext_op & op, rvsdg::output * operand) auto op1 = sext_op::create(op.ndstbits(), binary->input(0)->origin()); auto op2 = sext_op::create(op.ndstbits(), binary->input(1)->origin()); - return rvsdg::simple_node::create_normalized( - region, - *bop->create(op.ndstbits()), - { op1, op2 })[0]; + return rvsdg::SimpleNode::create_normalized(region, *bop->create(op.ndstbits()), { op1, op2 })[0]; } static rvsdg::output * diff --git a/jlm/llvm/ir/operators/sext.hpp b/jlm/llvm/ir/operators/sext.hpp index 3bf1af6e5..f8a851fdf 100644 --- a/jlm/llvm/ir/operators/sext.hpp +++ b/jlm/llvm/ir/operators/sext.hpp @@ -96,7 +96,7 @@ class sext_op final : public rvsdg::unary_op throw jlm::util::error("expected bits type."); sext_op op(std::move(ot), rvsdg::bittype::Create(ndstbits)); - return rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; + return rvsdg::SimpleNode::create_normalized(operand->region(), op, { operand })[0]; } }; diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 8ef58b653..660667c11 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -60,7 +60,7 @@ class DeadNodeElimination::Context final bool IsAlive(const rvsdg::Node & node) const noexcept { - if (auto simpleNode = dynamic_cast(&node)) + if (auto simpleNode = dynamic_cast(&node)) { return SimpleNodes_.Contains(simpleNode); } @@ -83,7 +83,7 @@ class DeadNodeElimination::Context final } private: - util::HashSet SimpleNodes_; + util::HashSet SimpleNodes_; util::HashSet Outputs_; }; diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index 0873b6609..bea16c108 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -608,7 +608,7 @@ class Andersen::Statistics final : public util::Statistics }; void -Andersen::AnalyzeSimpleNode(const rvsdg::simple_node & node) +Andersen::AnalyzeSimpleNode(const rvsdg::SimpleNode & node) { const auto & op = node.GetOperation(); @@ -661,7 +661,7 @@ Andersen::AnalyzeSimpleNode(const rvsdg::simple_node & node) } void -Andersen::AnalyzeAlloca(const rvsdg::simple_node & node) +Andersen::AnalyzeAlloca(const rvsdg::SimpleNode & node) { const auto allocaOp = util::AssertedCast(&node.GetOperation()); @@ -674,7 +674,7 @@ Andersen::AnalyzeAlloca(const rvsdg::simple_node & node) } void -Andersen::AnalyzeMalloc(const rvsdg::simple_node & node) +Andersen::AnalyzeMalloc(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -747,7 +747,7 @@ Andersen::AnalyzeCall(const CallNode & callNode) } void -Andersen::AnalyzeGep(const rvsdg::simple_node & node) +Andersen::AnalyzeGep(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -762,7 +762,7 @@ Andersen::AnalyzeGep(const rvsdg::simple_node & node) } void -Andersen::AnalyzeBitcast(const rvsdg::simple_node & node) +Andersen::AnalyzeBitcast(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -781,7 +781,7 @@ Andersen::AnalyzeBitcast(const rvsdg::simple_node & node) } void -Andersen::AnalyzeBits2ptr(const rvsdg::simple_node & node) +Andersen::AnalyzeBits2ptr(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); const auto & output = *node.output(0); @@ -795,7 +795,7 @@ Andersen::AnalyzeBits2ptr(const rvsdg::simple_node & node) } void -Andersen::AnalyzePtr2bits(const rvsdg::simple_node & node) +Andersen::AnalyzePtr2bits(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); const auto & inputRegister = *node.input(0)->origin(); @@ -807,7 +807,7 @@ Andersen::AnalyzePtr2bits(const rvsdg::simple_node & node) } void -Andersen::AnalyzeConstantPointerNull(const rvsdg::simple_node & node) +Andersen::AnalyzeConstantPointerNull(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); const auto & output = *node.output(0); @@ -819,7 +819,7 @@ Andersen::AnalyzeConstantPointerNull(const rvsdg::simple_node & node) } void -Andersen::AnalyzeUndef(const rvsdg::simple_node & node) +Andersen::AnalyzeUndef(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); const auto & output = *node.output(0); @@ -833,7 +833,7 @@ Andersen::AnalyzeUndef(const rvsdg::simple_node & node) } void -Andersen::AnalyzeMemcpy(const rvsdg::simple_node & node) +Andersen::AnalyzeMemcpy(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -855,7 +855,7 @@ Andersen::AnalyzeMemcpy(const rvsdg::simple_node & node) } void -Andersen::AnalyzeConstantArray(const rvsdg::simple_node & node) +Andersen::AnalyzeConstantArray(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -877,7 +877,7 @@ Andersen::AnalyzeConstantArray(const rvsdg::simple_node & node) } void -Andersen::AnalyzeConstantStruct(const rvsdg::simple_node & node) +Andersen::AnalyzeConstantStruct(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -900,7 +900,7 @@ Andersen::AnalyzeConstantStruct(const rvsdg::simple_node & node) } void -Andersen::AnalyzeConstantAggregateZero(const rvsdg::simple_node & node) +Andersen::AnalyzeConstantAggregateZero(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); auto & output = *node.output(0); @@ -914,7 +914,7 @@ Andersen::AnalyzeConstantAggregateZero(const rvsdg::simple_node & node) } void -Andersen::AnalyzeExtractValue(const rvsdg::simple_node & node) +Andersen::AnalyzeExtractValue(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -929,7 +929,7 @@ Andersen::AnalyzeExtractValue(const rvsdg::simple_node & node) } void -Andersen::AnalyzeValist(const rvsdg::simple_node & node) +Andersen::AnalyzeValist(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1182,7 +1182,7 @@ Andersen::AnalyzeRegion(rvsdg::Region & region) // PointerObjects for any of the node's outputs of pointer type for (const auto node : traverser) { - if (auto simpleNode = dynamic_cast(node)) + if (auto simpleNode = dynamic_cast(node)) AnalyzeSimpleNode(*simpleNode); else if (auto structuralNode = dynamic_cast(node)) AnalyzeStructuralNode(*structuralNode); diff --git a/jlm/llvm/opt/alias-analyses/Andersen.hpp b/jlm/llvm/opt/alias-analyses/Andersen.hpp index f35a5b078..4978a3ed6 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.hpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.hpp @@ -348,13 +348,13 @@ class Andersen final : public AliasAnalysis AnalyzeRegion(rvsdg::Region & region); void - AnalyzeSimpleNode(const rvsdg::simple_node & node); + AnalyzeSimpleNode(const rvsdg::SimpleNode & node); void - AnalyzeAlloca(const rvsdg::simple_node & node); + AnalyzeAlloca(const rvsdg::SimpleNode & node); void - AnalyzeMalloc(const rvsdg::simple_node & node); + AnalyzeMalloc(const rvsdg::SimpleNode & node); void AnalyzeLoad(const LoadNode & loadNode); @@ -366,40 +366,40 @@ class Andersen final : public AliasAnalysis AnalyzeCall(const CallNode & callNode); void - AnalyzeGep(const rvsdg::simple_node & node); + AnalyzeGep(const rvsdg::SimpleNode & node); void - AnalyzeBitcast(const rvsdg::simple_node & node); + AnalyzeBitcast(const rvsdg::SimpleNode & node); void - AnalyzeBits2ptr(const rvsdg::simple_node & node); + AnalyzeBits2ptr(const rvsdg::SimpleNode & node); void - AnalyzePtr2bits(const rvsdg::simple_node & node); + AnalyzePtr2bits(const rvsdg::SimpleNode & node); void - AnalyzeConstantPointerNull(const rvsdg::simple_node & node); + AnalyzeConstantPointerNull(const rvsdg::SimpleNode & node); void - AnalyzeUndef(const rvsdg::simple_node & node); + AnalyzeUndef(const rvsdg::SimpleNode & node); void - AnalyzeMemcpy(const rvsdg::simple_node & node); + AnalyzeMemcpy(const rvsdg::SimpleNode & node); void - AnalyzeConstantArray(const rvsdg::simple_node & node); + AnalyzeConstantArray(const rvsdg::SimpleNode & node); void - AnalyzeConstantStruct(const rvsdg::simple_node & node); + AnalyzeConstantStruct(const rvsdg::SimpleNode & node); void - AnalyzeConstantAggregateZero(const rvsdg::simple_node & node); + AnalyzeConstantAggregateZero(const rvsdg::SimpleNode & node); void - AnalyzeExtractValue(const rvsdg::simple_node & node); + AnalyzeExtractValue(const rvsdg::SimpleNode & node); void - AnalyzeValist(const rvsdg::simple_node & node); + AnalyzeValist(const rvsdg::SimpleNode & node); void AnalyzeStructuralNode(const rvsdg::StructuralNode & node); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index b702221f3..1902e8c57 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -492,7 +492,7 @@ MemoryStateEncoder::EncodeRegion(rvsdg::Region & region) topdown_traverser traverser(®ion); for (auto & node : traverser) { - if (auto simpleNode = dynamic_cast(node)) + if (auto simpleNode = dynamic_cast(node)) { EncodeSimpleNode(*simpleNode); } @@ -537,7 +537,7 @@ MemoryStateEncoder::EncodeStructuralNode(rvsdg::StructuralNode & structuralNode) } void -MemoryStateEncoder::EncodeSimpleNode(const rvsdg::simple_node & simpleNode) +MemoryStateEncoder::EncodeSimpleNode(const rvsdg::SimpleNode & simpleNode) { if (is(&simpleNode)) { @@ -579,7 +579,7 @@ MemoryStateEncoder::EncodeSimpleNode(const rvsdg::simple_node & simpleNode) } void -MemoryStateEncoder::EncodeAlloca(const rvsdg::simple_node & allocaNode) +MemoryStateEncoder::EncodeAlloca(const rvsdg::SimpleNode & allocaNode) { JLM_ASSERT(is(&allocaNode)); @@ -602,7 +602,7 @@ MemoryStateEncoder::EncodeAlloca(const rvsdg::simple_node & allocaNode) } void -MemoryStateEncoder::EncodeMalloc(const rvsdg::simple_node & mallocNode) +MemoryStateEncoder::EncodeMalloc(const rvsdg::SimpleNode & mallocNode) { JLM_ASSERT(is(&mallocNode)); auto & stateMap = Context_->GetRegionalizedStateMap(); @@ -657,7 +657,7 @@ MemoryStateEncoder::EncodeStore(const StoreNode & storeNode) } void -MemoryStateEncoder::EncodeFree(const rvsdg::simple_node & freeNode) +MemoryStateEncoder::EncodeFree(const rvsdg::SimpleNode & freeNode) { JLM_ASSERT(is(&freeNode)); auto & stateMap = Context_->GetRegionalizedStateMap(); @@ -725,7 +725,7 @@ MemoryStateEncoder::EncodeCallExit(const CallNode & callNode) } void -MemoryStateEncoder::EncodeMemcpy(const rvsdg::simple_node & memcpyNode) +MemoryStateEncoder::EncodeMemcpy(const rvsdg::SimpleNode & memcpyNode) { JLM_ASSERT(is(&memcpyNode)); auto & stateMap = Context_->GetRegionalizedStateMap(); @@ -989,7 +989,7 @@ MemoryStateEncoder::ReplaceStoreNode( std::vector MemoryStateEncoder::ReplaceMemcpyNode( - const rvsdg::simple_node & memcpyNode, + const rvsdg::SimpleNode & memcpyNode, const std::vector & memoryStates) { JLM_ASSERT(is(&memcpyNode)); @@ -1022,7 +1022,7 @@ MemoryStateEncoder::ReplaceMemcpyNode( } bool -MemoryStateEncoder::ShouldHandle(const rvsdg::simple_node & simpleNode) noexcept +MemoryStateEncoder::ShouldHandle(const rvsdg::SimpleNode & simpleNode) noexcept { for (size_t n = 0; n < simpleNode.ninputs(); n++) { diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index 6d08583e0..9929f2bb2 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -14,7 +14,7 @@ namespace rvsdg class GammaNode; class output; class Region; -class simple_node; +class SimpleNode; class StructuralNode; class ThetaNode; class ThetaOutput; @@ -101,13 +101,13 @@ class MemoryStateEncoder final EncodeStructuralNode(rvsdg::StructuralNode & structuralNode); void - EncodeSimpleNode(const rvsdg::simple_node & simpleNode); + EncodeSimpleNode(const rvsdg::SimpleNode & simpleNode); void - EncodeAlloca(const rvsdg::simple_node & allocaNode); + EncodeAlloca(const rvsdg::SimpleNode & allocaNode); void - EncodeMalloc(const rvsdg::simple_node & mallocNode); + EncodeMalloc(const rvsdg::SimpleNode & mallocNode); void EncodeLoad(const LoadNode & loadNode); @@ -116,7 +116,7 @@ class MemoryStateEncoder final EncodeStore(const StoreNode & storeNode); void - EncodeFree(const rvsdg::simple_node & freeNode); + EncodeFree(const rvsdg::SimpleNode & freeNode); void EncodeCall(const CallNode & callNode); @@ -128,7 +128,7 @@ class MemoryStateEncoder final EncodeCallExit(const CallNode & callNode); void - EncodeMemcpy(const rvsdg::simple_node & memcpyNode); + EncodeMemcpy(const rvsdg::SimpleNode & memcpyNode); void EncodeLambda(const lambda::node & lambda); @@ -194,24 +194,24 @@ class MemoryStateEncoder final * the outputs of \p memcpyNode are redirected to the respective outputs of the newly created * copy. * - * @param memcpyNode A rvsdg::simple_node representing a MemCpyOperation. + * @param memcpyNode A rvsdg::SimpleNode representing a MemCpyOperation. * @param memoryStates The memory states the new memcpy node should consume. * * @return A vector with the memory states of the newly created copy. */ [[nodiscard]] static std::vector ReplaceMemcpyNode( - const rvsdg::simple_node & memcpyNode, + const rvsdg::SimpleNode & memcpyNode, const std::vector & memoryStates); /** * Determines whether \p simpleNode should be handled by the MemoryStateEncoder. * - * @param simpleNode A simple_node. + * @param simpleNode A SimpleNode. * @return True, if \p simpleNode should be handled, otherwise false. */ [[nodiscard]] static bool - ShouldHandle(const rvsdg::simple_node & simpleNode) noexcept; + ShouldHandle(const rvsdg::SimpleNode & simpleNode) noexcept; std::unique_ptr Context_; }; diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index 50c510693..e76cdc54f 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -168,7 +168,7 @@ class RegionSummary final return MemoryNodes_; } - [[nodiscard]] const util::HashSet & + [[nodiscard]] const util::HashSet & GetUnknownMemoryNodeReferences() const noexcept { return UnknownMemoryNodeReferences_; @@ -199,7 +199,7 @@ class RegionSummary final } void - AddUnknownMemoryNodeReferences(const util::HashSet & nodes) + AddUnknownMemoryNodeReferences(const util::HashSet & nodes) { UnknownMemoryNodeReferences_.UnionWith(nodes); } @@ -250,7 +250,7 @@ class RegionSummary final private: const rvsdg::Region * Region_; util::HashSet MemoryNodes_; - util::HashSet UnknownMemoryNodeReferences_; + util::HashSet UnknownMemoryNodeReferences_; util::HashSet RecursiveCalls_; util::HashSet NonRecursiveCalls_; @@ -685,7 +685,7 @@ RegionAwareMemoryNodeProvider::AnnotateRegion(rvsdg::Region & region) { AnnotateStructuralNode(*structuralNode); } - else if (auto simpleNode = dynamic_cast(&node)) + else if (auto simpleNode = dynamic_cast(&node)) { AnnotateSimpleNode(*simpleNode); } @@ -697,7 +697,7 @@ RegionAwareMemoryNodeProvider::AnnotateRegion(rvsdg::Region & region) } void -RegionAwareMemoryNodeProvider::AnnotateSimpleNode(const rvsdg::simple_node & simpleNode) +RegionAwareMemoryNodeProvider::AnnotateSimpleNode(const rvsdg::SimpleNode & simpleNode) { if (auto loadNode = dynamic_cast(&simpleNode)) { @@ -746,7 +746,7 @@ RegionAwareMemoryNodeProvider::AnnotateStore(const StoreNode & storeNode) } void -RegionAwareMemoryNodeProvider::AnnotateAlloca(const rvsdg::simple_node & allocaNode) +RegionAwareMemoryNodeProvider::AnnotateAlloca(const rvsdg::SimpleNode & allocaNode) { JLM_ASSERT(is(allocaNode.GetOperation())); @@ -756,7 +756,7 @@ RegionAwareMemoryNodeProvider::AnnotateAlloca(const rvsdg::simple_node & allocaN } void -RegionAwareMemoryNodeProvider::AnnotateMalloc(const rvsdg::simple_node & mallocNode) +RegionAwareMemoryNodeProvider::AnnotateMalloc(const rvsdg::SimpleNode & mallocNode) { JLM_ASSERT(is(mallocNode.GetOperation())); @@ -766,7 +766,7 @@ RegionAwareMemoryNodeProvider::AnnotateMalloc(const rvsdg::simple_node & mallocN } void -RegionAwareMemoryNodeProvider::AnnotateFree(const rvsdg::simple_node & freeNode) +RegionAwareMemoryNodeProvider::AnnotateFree(const rvsdg::SimpleNode & freeNode) { JLM_ASSERT(is(freeNode.GetOperation())); @@ -820,7 +820,7 @@ RegionAwareMemoryNodeProvider::AnnotateCall(const CallNode & callNode) } void -RegionAwareMemoryNodeProvider::AnnotateMemcpy(const rvsdg::simple_node & memcpyNode) +RegionAwareMemoryNodeProvider::AnnotateMemcpy(const rvsdg::SimpleNode & memcpyNode) { JLM_ASSERT(is(memcpyNode.GetOperation())); @@ -896,7 +896,7 @@ RegionAwareMemoryNodeProvider::PropagatePhi(const phi::node & phiNode) PropagateRegion(phiNodeSubregion); util::HashSet memoryNodes; - util::HashSet unknownMemoryNodeReferences; + util::HashSet unknownMemoryNodeReferences; for (auto & lambdaNode : lambdaNodes) { auto & regionSummary = Provisioning_->GetRegionSummary(*lambdaNode->subregion()); @@ -911,7 +911,7 @@ void RegionAwareMemoryNodeProvider::AssignAndPropagateMemoryNodes( const rvsdg::Region & region, const util::HashSet & memoryNodes, - const util::HashSet & unknownMemoryNodeReferences) + const util::HashSet & unknownMemoryNodeReferences) { auto & regionSummary = Provisioning_->GetRegionSummary(region); for (auto structuralNode : regionSummary.GetStructuralNodes().Items()) diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp index 762330f8c..3c00d949d 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.hpp @@ -107,7 +107,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider AnnotateRegion(rvsdg::Region & region); void - AnnotateSimpleNode(const rvsdg::simple_node & provider); + AnnotateSimpleNode(const rvsdg::SimpleNode & provider); void AnnotateStructuralNode(const rvsdg::StructuralNode & structuralNode); @@ -119,19 +119,19 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider AnnotateStore(const StoreNode & storeNode); void - AnnotateAlloca(const rvsdg::simple_node & allocaNode); + AnnotateAlloca(const rvsdg::SimpleNode & allocaNode); void - AnnotateMalloc(const rvsdg::simple_node & mallocNode); + AnnotateMalloc(const rvsdg::SimpleNode & mallocNode); void - AnnotateFree(const rvsdg::simple_node & freeNode); + AnnotateFree(const rvsdg::SimpleNode & freeNode); void AnnotateCall(const CallNode & callNode); void - AnnotateMemcpy(const rvsdg::simple_node & memcpyNode); + AnnotateMemcpy(const rvsdg::SimpleNode & memcpyNode); /** * Propagates the utilized memory locations and simple RVSDG nodes that reference unknown memory @@ -174,7 +174,7 @@ class RegionAwareMemoryNodeProvider final : public MemoryNodeProvider AssignAndPropagateMemoryNodes( const rvsdg::Region & region, const util::HashSet & memoryNodes, - const util::HashSet & unknownMemoryNodeReferences); + const util::HashSet & unknownMemoryNodeReferences); /** * Resolves all references to unknown memory locations. diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index 0508e7109..c629a6b6d 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -30,11 +30,11 @@ HasOrContainsPointerType(const rvsdg::output & output) /** * Determines whether \p node should be handled by the Steensgaard analysis. * - * @param node An rvsdg::simple_node. + * @param node An rvsdg::SimpleNode. * @return True if \p node should be handled, otherwise false. */ static bool -ShouldHandle(const rvsdg::simple_node & node) +ShouldHandle(const rvsdg::SimpleNode & node) { for (size_t n = 0; n < node.ninputs(); n++) { @@ -990,7 +990,7 @@ Steensgaard::~Steensgaard() = default; Steensgaard::Steensgaard() = default; void -Steensgaard::AnalyzeSimpleNode(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeSimpleNode(const jlm::rvsdg::SimpleNode & node) { if (is(&node)) { @@ -1072,7 +1072,7 @@ Steensgaard::AnalyzeSimpleNode(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeAlloca(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeAlloca(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1082,7 +1082,7 @@ Steensgaard::AnalyzeAlloca(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeMalloc(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeMalloc(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1267,7 +1267,7 @@ Steensgaard::AnalyzeIndirectCall(const CallNode & callNode) } void -Steensgaard::AnalyzeGep(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeGep(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1278,7 +1278,7 @@ Steensgaard::AnalyzeGep(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeBitcast(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeBitcast(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1295,7 +1295,7 @@ Steensgaard::AnalyzeBitcast(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeBits2ptr(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeBits2ptr(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1308,7 +1308,7 @@ Steensgaard::AnalyzeBits2ptr(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzePtr2Bits(const rvsdg::simple_node & node) +Steensgaard::AnalyzePtr2Bits(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1316,7 +1316,7 @@ Steensgaard::AnalyzePtr2Bits(const rvsdg::simple_node & node) } void -Steensgaard::AnalyzeExtractValue(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeExtractValue(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1333,7 +1333,7 @@ Steensgaard::AnalyzeExtractValue(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeConstantPointerNull(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeConstantPointerNull(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1343,7 +1343,7 @@ Steensgaard::AnalyzeConstantPointerNull(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeConstantAggregateZero(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeConstantAggregateZero(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); auto & output = *node.output(0); @@ -1357,7 +1357,7 @@ Steensgaard::AnalyzeConstantAggregateZero(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeUndef(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeUndef(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); auto & output = *node.output(0); @@ -1371,7 +1371,7 @@ Steensgaard::AnalyzeUndef(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeConstantArray(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeConstantArray(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1392,7 +1392,7 @@ Steensgaard::AnalyzeConstantArray(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeConstantStruct(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeConstantStruct(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1414,7 +1414,7 @@ Steensgaard::AnalyzeConstantStruct(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeMemcpy(const jlm::rvsdg::simple_node & node) +Steensgaard::AnalyzeMemcpy(const jlm::rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1454,7 +1454,7 @@ Steensgaard::AnalyzeMemcpy(const jlm::rvsdg::simple_node & node) } void -Steensgaard::AnalyzeVaList(const rvsdg::simple_node & node) +Steensgaard::AnalyzeVaList(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); @@ -1732,7 +1732,7 @@ Steensgaard::AnalyzeRegion(rvsdg::Region & region) topdown_traverser traverser(®ion); for (auto & node : traverser) { - if (auto simpleNode = dynamic_cast(node)) + if (auto simpleNode = dynamic_cast(node)) { AnalyzeSimpleNode(*simpleNode); } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp index e0e4b5952..aa9a83072 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.hpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.hpp @@ -16,7 +16,7 @@ class GammaNode; class Graph; class output; class Region; -class simple_node; +class SimpleNode; class StructuralNode; class ThetaNode; } @@ -118,16 +118,16 @@ class Steensgaard final : public AliasAnalysis AnalyzeTheta(const rvsdg::ThetaNode & node); void - AnalyzeSimpleNode(const rvsdg::simple_node & node); + AnalyzeSimpleNode(const rvsdg::SimpleNode & node); void AnalyzeStructuralNode(const rvsdg::StructuralNode & node); void - AnalyzeAlloca(const rvsdg::simple_node & node); + AnalyzeAlloca(const rvsdg::SimpleNode & node); void - AnalyzeMalloc(const rvsdg::simple_node & node); + AnalyzeMalloc(const rvsdg::SimpleNode & node); void AnalyzeLoad(const LoadNode & loadNode); @@ -148,40 +148,40 @@ class Steensgaard final : public AliasAnalysis AnalyzeIndirectCall(const CallNode & callNode); void - AnalyzeGep(const rvsdg::simple_node & node); + AnalyzeGep(const rvsdg::SimpleNode & node); void - AnalyzeBitcast(const rvsdg::simple_node & node); + AnalyzeBitcast(const rvsdg::SimpleNode & node); void - AnalyzeBits2ptr(const rvsdg::simple_node & node); + AnalyzeBits2ptr(const rvsdg::SimpleNode & node); void - AnalyzePtr2Bits(const rvsdg::simple_node & node); + AnalyzePtr2Bits(const rvsdg::SimpleNode & node); void - AnalyzeConstantPointerNull(const rvsdg::simple_node & node); + AnalyzeConstantPointerNull(const rvsdg::SimpleNode & node); void - AnalyzeUndef(const rvsdg::simple_node & node); + AnalyzeUndef(const rvsdg::SimpleNode & node); void - AnalyzeMemcpy(const rvsdg::simple_node & node); + AnalyzeMemcpy(const rvsdg::SimpleNode & node); void - AnalyzeConstantArray(const rvsdg::simple_node & node); + AnalyzeConstantArray(const rvsdg::SimpleNode & node); void - AnalyzeConstantStruct(const rvsdg::simple_node & node); + AnalyzeConstantStruct(const rvsdg::SimpleNode & node); void - AnalyzeConstantAggregateZero(const rvsdg::simple_node & node); + AnalyzeConstantAggregateZero(const rvsdg::SimpleNode & node); void - AnalyzeExtractValue(const rvsdg::simple_node & node); + AnalyzeExtractValue(const rvsdg::SimpleNode & node); void - AnalyzeVaList(const rvsdg::simple_node & node); + AnalyzeVaList(const rvsdg::SimpleNode & node); /** * Marks register \p output as escaping the module. This indicates that the pointer in \p output diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 0c4c118ed..6e3fc2f25 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -513,7 +513,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownRegion(rvsdg::Region & region) rvsdg::topdown_traverser traverser(®ion); for (auto & node : traverser) { - if (auto simpleNode = dynamic_cast(node)) + if (auto simpleNode = dynamic_cast(node)) { EliminateTopDownSimpleNode(*simpleNode); } @@ -752,7 +752,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownTheta(const rvsdg::ThetaNode & thet } void -TopDownMemoryNodeEliminator::EliminateTopDownSimpleNode(const rvsdg::simple_node & simpleNode) +TopDownMemoryNodeEliminator::EliminateTopDownSimpleNode(const rvsdg::SimpleNode & simpleNode) { if (is(&simpleNode)) { @@ -765,7 +765,7 @@ TopDownMemoryNodeEliminator::EliminateTopDownSimpleNode(const rvsdg::simple_node } void -TopDownMemoryNodeEliminator::EliminateTopDownAlloca(const rvsdg::simple_node & node) +TopDownMemoryNodeEliminator::EliminateTopDownAlloca(const rvsdg::SimpleNode & node) { JLM_ASSERT(is(&node)); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp index 65707a0f7..f2b50e776 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.hpp @@ -28,9 +28,9 @@ class node; namespace jlm::rvsdg { class GammaNode; -class node; +class Node; class Region; -class simple_node; +class SimpleNode; class StructuralNode; class ThetaNode; } @@ -155,10 +155,10 @@ class TopDownMemoryNodeEliminator final : public MemoryNodeEliminator EliminateTopDownTheta(const rvsdg::ThetaNode & thetaNode); void - EliminateTopDownSimpleNode(const rvsdg::simple_node & simpleNode); + EliminateTopDownSimpleNode(const rvsdg::SimpleNode & simpleNode); void - EliminateTopDownAlloca(const rvsdg::simple_node & node); + EliminateTopDownAlloca(const rvsdg::SimpleNode & node); void EliminateTopDownCall(const CallNode & callNode); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 159b493bb..94e9016d0 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -388,7 +388,7 @@ mark(const rvsdg::StructuralNode * node, cnectx & ctx) } static void -mark(const jlm::rvsdg::simple_node * node, cnectx & ctx) +mark(const jlm::rvsdg::SimpleNode * node, cnectx & ctx) { if (node->ninputs() == 0) { @@ -431,7 +431,7 @@ mark(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto simple = dynamic_cast(node)) + if (auto simple = dynamic_cast(node)) mark(simple, ctx); else mark(static_cast(node), ctx); @@ -545,7 +545,7 @@ divert(rvsdg::Region * region, cnectx & ctx) { for (const auto & node : jlm::rvsdg::topdown_traverser(region)) { - if (auto simple = dynamic_cast(node)) + if (auto simple = dynamic_cast(node)) divert_outputs(simple, ctx); else divert(static_cast(node), ctx); diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 217ee8c9d..5f9bcb7cc 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -97,7 +97,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) } static std::vector -route_dependencies(const lambda::node * lambda, const jlm::rvsdg::simple_node * apply) +route_dependencies(const lambda::node * lambda, const jlm::rvsdg::SimpleNode * apply) { JLM_ASSERT(is(apply)); @@ -114,7 +114,7 @@ route_dependencies(const lambda::node * lambda, const jlm::rvsdg::simple_node * } void -inlineCall(jlm::rvsdg::simple_node * call, const lambda::node * lambda) +inlineCall(jlm::rvsdg::SimpleNode * call, const lambda::node * lambda) { JLM_ASSERT(is(call)); diff --git a/jlm/llvm/opt/inlining.hpp b/jlm/llvm/opt/inlining.hpp index 9c15b6b6e..a39b7c944 100644 --- a/jlm/llvm/opt/inlining.hpp +++ b/jlm/llvm/opt/inlining.hpp @@ -30,7 +30,7 @@ jlm::rvsdg::output * find_producer(jlm::rvsdg::input * input); void -inlineCall(jlm::rvsdg::simple_node * call, const lambda::node * lambda); +inlineCall(jlm::rvsdg::SimpleNode * call, const lambda::node * lambda); } diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index c9b7c4825..b81bfffef 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -338,9 +338,9 @@ create_unrolled_gamma_predicate(const unrollinfo & ui, size_t factor) auto uf = jlm::rvsdg::create_bitconstant(region, nbits, factor); auto mul = jlm::rvsdg::bitmul_op::create(nbits, step, uf); auto arm = - jlm::rvsdg::simple_node::create_normalized(region, ui.armoperation(), { ui.init(), mul })[0]; + jlm::rvsdg::SimpleNode::create_normalized(region, ui.armoperation(), { ui.init(), mul })[0]; /* FIXME: order of operands */ - auto cmp = jlm::rvsdg::simple_node::create_normalized(region, ui.cmpoperation(), { arm, end })[0]; + auto cmp = jlm::rvsdg::SimpleNode::create_normalized(region, ui.cmpoperation(), { arm, end })[0]; auto pred = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); return pred; @@ -368,9 +368,9 @@ create_unrolled_theta_predicate( auto uf = create_bitconstant(region, nbits, factor); auto mul = bitmul_op::create(nbits, step, uf); - auto arm = simple_node::create_normalized(region, ui.armoperation(), { idv->origin(), mul })[0]; + auto arm = SimpleNode::create_normalized(region, ui.armoperation(), { idv->origin(), mul })[0]; /* FIXME: order of operands */ - auto cmp = simple_node::create_normalized(region, ui.cmpoperation(), { arm, iend->origin() })[0]; + auto cmp = SimpleNode::create_normalized(region, ui.cmpoperation(), { arm, iend->origin() })[0]; auto pred = match(1, { { 1, 1 } }, 0, 2, cmp); return pred; @@ -384,7 +384,7 @@ create_residual_gamma_predicate(const rvsdg::SubstitutionMap & smap, const unrol auto end = ui.end()->input()->origin(); /* FIXME: order of operands */ - auto cmp = jlm::rvsdg::simple_node::create_normalized(region, ui.cmpoperation(), { idv, end })[0]; + auto cmp = jlm::rvsdg::SimpleNode::create_normalized(region, ui.cmpoperation(), { idv, end })[0]; auto pred = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); return pred; diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index d08fc86ae..c15163ed5 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -152,7 +152,7 @@ JlmToMlirConverter::ConvertNode( ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs) { - if (auto simpleNode = dynamic_cast(&node)) + if (auto simpleNode = dynamic_cast(&node)) { return ConvertSimpleNode(*simpleNode, block, inputs); } @@ -302,7 +302,7 @@ JlmToMlirConverter::BitCompareNode( ::mlir::Operation * JlmToMlirConverter::ConvertSimpleNode( - const rvsdg::simple_node & node, + const rvsdg::SimpleNode & node, ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs) { diff --git a/jlm/mlir/backend/JlmToMlirConverter.hpp b/jlm/mlir/backend/JlmToMlirConverter.hpp index 35b0dcf12..efc6c1c0e 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.hpp +++ b/jlm/mlir/backend/JlmToMlirConverter.hpp @@ -134,15 +134,15 @@ class JlmToMlirConverter final BitCompareNode(const rvsdg::SimpleOperation & bitOp, ::llvm::SmallVector<::mlir::Value> inputs); /** - * Converts an RVSDG simple_node to an MLIR RVSDG operation. + * Converts an RVSDG SimpleNode to an MLIR RVSDG operation. * \param node The RVSDG node to be converted * \param block The MLIR RVSDG block to insert the converted node. - * \param inputs The inputs to the simple_node. + * \param inputs The inputs to the SimpleNode. * \return The converted MLIR RVSDG operation. */ ::mlir::Operation * ConvertSimpleNode( - const rvsdg::simple_node & node, + const rvsdg::SimpleNode & node, ::mlir::Block & block, const ::llvm::SmallVector<::mlir::Value> & inputs); diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 2aa8d6a80..67407691e 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -143,7 +143,7 @@ binary_normal_form::normalize_node(Node * node, const binary_op & op) const JLM_ASSERT(new_args.size() >= 2); const auto & new_op = tmp_op ? *tmp_op : static_cast(op); - divert_users(node, simple_node::create_normalized(node->region(), new_op, new_args)); + divert_users(node, SimpleNode::create_normalized(node->region(), new_op, new_args)); remove(node); return false; } @@ -364,7 +364,7 @@ reduce_parallel(const binary_op & op, const std::vector & auto op2 = worklist.front(); worklist.pop_front(); - auto output = simple_node::create_normalized(region, op, { op1, op2 })[0]; + auto output = SimpleNode::create_normalized(region, op, { op1, op2 })[0]; worklist.push_back(output); } @@ -387,7 +387,7 @@ reduce_linear(const binary_op & op, const std::vector & op auto op2 = worklist.front(); worklist.pop_front(); - auto output = simple_node::create_normalized(region, op, { op1, op2 })[0]; + auto output = SimpleNode::create_normalized(region, op, { op1, op2 })[0]; worklist.push_front(output); } diff --git a/jlm/rvsdg/bitstring/arithmetic.hpp b/jlm/rvsdg/bitstring/arithmetic.hpp index d91340df3..fbdb75ae6 100644 --- a/jlm/rvsdg/bitstring/arithmetic.hpp +++ b/jlm/rvsdg/bitstring/arithmetic.hpp @@ -40,7 +40,7 @@ class MakeBitUnaryOperation final : public bitunary_op static output * create(size_t nbits, output * op) { - return simple_node::create_normalized(op->region(), MakeBitUnaryOperation(nbits), { op })[0]; + return SimpleNode::create_normalized(op->region(), MakeBitUnaryOperation(nbits), { op })[0]; } }; @@ -75,7 +75,7 @@ class MakeBitBinaryOperation final : public bitbinary_op static output * create(size_t nbits, output * op1, output * op2) { - return simple_node::create_normalized( + return SimpleNode::create_normalized( op1->region(), MakeBitBinaryOperation(nbits), { op1, op2 })[0]; diff --git a/jlm/rvsdg/bitstring/comparison.hpp b/jlm/rvsdg/bitstring/comparison.hpp index bd1799166..dfad6f280 100644 --- a/jlm/rvsdg/bitstring/comparison.hpp +++ b/jlm/rvsdg/bitstring/comparison.hpp @@ -44,7 +44,7 @@ class MakeBitComparisonOperation final : public bitcompare_op static output * create(size_t nbits, output * op1, output * op2) { - return simple_node::create_normalized( + return SimpleNode::create_normalized( op1->region(), MakeBitComparisonOperation(nbits), { op1, op2 })[0]; diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index f32885497..e188db419 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -21,7 +21,7 @@ bitconcat(const std::vector & operands) auto region = operands[0]->region(); jlm::rvsdg::bitconcat_op op(std::move(types)); - return jlm::rvsdg::simple_node::create_normalized( + return jlm::rvsdg::SimpleNode::create_normalized( region, op, { operands.begin(), operands.end() })[0]; @@ -133,7 +133,7 @@ class concat_normal_form final : public simple_normal_form if (args != new_args) { bitconcat_op op(types_from_arguments(new_args)); - divert_users(node, simple_node::create_normalized(node->region(), op, new_args)); + divert_users(node, SimpleNode::create_normalized(node->region(), op, new_args)); remove(node); return false; } diff --git a/jlm/rvsdg/bitstring/constant.hpp b/jlm/rvsdg/bitstring/constant.hpp index 849986be7..fd3655f03 100644 --- a/jlm/rvsdg/bitstring/constant.hpp +++ b/jlm/rvsdg/bitstring/constant.hpp @@ -59,7 +59,7 @@ extern template class domain_const_op(argument->Type()); jlm::rvsdg::bitslice_op op(type, low, high); - return jlm::rvsdg::simple_node::create_normalized(argument->region(), op, { argument })[0]; + return jlm::rvsdg::SimpleNode::create_normalized(argument->region(), op, { argument })[0]; } } diff --git a/jlm/rvsdg/control.cpp b/jlm/rvsdg/control.cpp index 6810f1a31..d9e312ca1 100644 --- a/jlm/rvsdg/control.cpp +++ b/jlm/rvsdg/control.cpp @@ -155,14 +155,14 @@ match( jlm::rvsdg::output * operand) { match_op op(nbits, mapping, default_alternative, nalternatives); - return simple_node::create_normalized(operand->region(), op, { operand })[0]; + return SimpleNode::create_normalized(operand->region(), op, { operand })[0]; } jlm::rvsdg::output * control_constant(rvsdg::Region * region, size_t nalternatives, size_t alternative) { jlm::rvsdg::ctlconstant_op op({ alternative, nalternatives }); - return jlm::rvsdg::simple_node::create_normalized(region, op, {})[0]; + return jlm::rvsdg::SimpleNode::create_normalized(region, op, {})[0]; } } diff --git a/jlm/rvsdg/control.hpp b/jlm/rvsdg/control.hpp index b43f8e429..0ba8e478c 100644 --- a/jlm/rvsdg/control.hpp +++ b/jlm/rvsdg/control.hpp @@ -216,7 +216,7 @@ class match_op final : public jlm::rvsdg::unary_op auto bitType = CheckAndExtractBitType(predicate.type()); match_op operation(bitType.nbits(), mapping, defaultAlternative, numAlternatives); - return rvsdg::simple_node::create_normalized(predicate.region(), operation, { &predicate })[0]; + return rvsdg::SimpleNode::create_normalized(predicate.region(), operation, { &predicate })[0]; } private: diff --git a/jlm/rvsdg/nullary.hpp b/jlm/rvsdg/nullary.hpp index 7e48bdf2f..6ae61c364 100644 --- a/jlm/rvsdg/nullary.hpp +++ b/jlm/rvsdg/nullary.hpp @@ -98,7 +98,7 @@ class domain_const_op final : public nullary_op create(rvsdg::Region * region, const value_repr & vr) { domain_const_op op(vr); - return simple_node::create_normalized(region, op, {})[0]; + return SimpleNode::create_normalized(region, op, {})[0]; } private: diff --git a/jlm/rvsdg/region.hpp b/jlm/rvsdg/region.hpp index 843408d7c..c5167d7a1 100644 --- a/jlm/rvsdg/region.hpp +++ b/jlm/rvsdg/region.hpp @@ -21,7 +21,7 @@ namespace jlm::rvsdg { class Node; -class simple_node; +class SimpleNode; class SimpleOperation; class StructuralInput; class StructuralNode; diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 5d4d5180c..633b655b6 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -19,7 +19,7 @@ simple_input::~simple_input() noexcept } simple_input::simple_input( - jlm::rvsdg::simple_node * node, + jlm::rvsdg::SimpleNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) : node_input(origin, node, std::move(type)) @@ -27,9 +27,7 @@ simple_input::simple_input( /* outputs */ -simple_output::simple_output( - jlm::rvsdg::simple_node * node, - std::shared_ptr type) +simple_output::simple_output(jlm::rvsdg::SimpleNode * node, std::shared_ptr type) : node_output(node, std::move(type)) {} @@ -38,47 +36,45 @@ simple_output::~simple_output() noexcept on_output_destroy(this); } -/* simple nodes */ - -simple_node::~simple_node() +SimpleNode::~SimpleNode() { on_node_destroy(this); } -simple_node::simple_node( +SimpleNode::SimpleNode( rvsdg::Region * region, const SimpleOperation & op, const std::vector & operands) : Node(op.copy(), region) { - if (simple_node::GetOperation().narguments() != operands.size()) + if (SimpleNode::GetOperation().narguments() != operands.size()) throw jlm::util::error(jlm::util::strfmt( "Argument error - expected ", - simple_node::GetOperation().narguments(), + SimpleNode::GetOperation().narguments(), ", received ", operands.size(), " arguments.")); - for (size_t n = 0; n < simple_node::GetOperation().narguments(); n++) + for (size_t n = 0; n < SimpleNode::GetOperation().narguments(); n++) { add_input( - std::make_unique(this, operands[n], simple_node::GetOperation().argument(n))); + std::make_unique(this, operands[n], SimpleNode::GetOperation().argument(n))); } - for (size_t n = 0; n < simple_node::GetOperation().nresults(); n++) - add_output(std::make_unique(this, simple_node::GetOperation().result(n))); + for (size_t n = 0; n < SimpleNode::GetOperation().nresults(); n++) + add_output(std::make_unique(this, SimpleNode::GetOperation().result(n))); on_node_create(this); } const SimpleOperation & -simple_node::GetOperation() const noexcept +SimpleNode::GetOperation() const noexcept { return *util::AssertedCast(&Node::GetOperation()); } Node * -simple_node::copy(rvsdg::Region * region, const std::vector & operands) const +SimpleNode::copy(rvsdg::Region * region, const std::vector & operands) const { auto node = create(region, GetOperation(), operands); graph()->mark_denormalized(); @@ -86,7 +82,7 @@ simple_node::copy(rvsdg::Region * region, const std::vector operands; for (size_t n = 0; n < ninputs(); n++) diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index 1a7a8c02c..eb20c0c6d 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -18,15 +18,13 @@ class SimpleOperation; class simple_input; class simple_output; -/* simple nodes */ - -class simple_node : public Node +class SimpleNode : public Node { public: - virtual ~simple_node(); + ~SimpleNode() override; protected: - simple_node( + SimpleNode( rvsdg::Region * region, const SimpleOperation & op, const std::vector & operands); @@ -47,13 +45,13 @@ class simple_node : public Node Node * copy(rvsdg::Region * region, SubstitutionMap & smap) const override; - static inline jlm::rvsdg::simple_node * + static inline jlm::rvsdg::SimpleNode * create( rvsdg::Region * region, const SimpleOperation & op, const std::vector & operands) { - return new simple_node(region, op, operands); + return new SimpleNode(region, op, operands); } static inline std::vector @@ -77,15 +75,15 @@ class simple_input final : public node_input virtual ~simple_input() noexcept; simple_input( - simple_node * node, + SimpleNode * node, jlm::rvsdg::output * origin, std::shared_ptr type); public: - simple_node * + SimpleNode * node() const noexcept { - return static_cast(node_input::node()); + return static_cast(node_input::node()); } }; @@ -98,26 +96,26 @@ class simple_output final : public node_output public: virtual ~simple_output() noexcept; - simple_output(jlm::rvsdg::simple_node * node, std::shared_ptr type); + simple_output(jlm::rvsdg::SimpleNode * node, std::shared_ptr type); public: - simple_node * + SimpleNode * node() const noexcept { - return static_cast(node_output::node()); + return static_cast(node_output::node()); } }; /* simple node method definitions */ inline jlm::rvsdg::simple_input * -simple_node::input(size_t index) const noexcept +SimpleNode::input(size_t index) const noexcept { return static_cast(Node::input(index)); } inline jlm::rvsdg::simple_output * -simple_node::output(size_t index) const noexcept +SimpleNode::output(size_t index) const noexcept { return static_cast(Node::output(index)); } diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index b305933ae..07ca14cdb 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -89,7 +89,7 @@ simple_normal_form::normalized_create( if (get_mutable() && get_cse()) node = node_cse(region, op, arguments); if (!node) - node = simple_node::create(region, op, arguments); + node = SimpleNode::create(region, op, arguments); return outputs(node); } diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp index 4b688f0c8..77ceceb41 100644 --- a/jlm/rvsdg/statemux.hpp +++ b/jlm/rvsdg/statemux.hpp @@ -106,7 +106,7 @@ create_state_mux( auto region = operands.front()->region(); jlm::rvsdg::mux_op op(std::move(st), operands.size(), nresults); - return simple_node::create_normalized(region, op, operands); + return SimpleNode::create_normalized(region, op, operands); } static inline jlm::rvsdg::output * diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 6dc3bf388..94bd1d0a1 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -275,7 +275,7 @@ static std::string convert_region(const jlm::rvsdg::Region * region); static inline std::string -convert_simple_node(const jlm::rvsdg::simple_node * node) +convert_simple_node(const jlm::rvsdg::SimpleNode * node) { std::string s; @@ -324,7 +324,7 @@ convert_structural_node(const rvsdg::StructuralNode * node) static inline std::string convert_node(const Node * node) { - if (auto n = dynamic_cast(node)) + if (auto n = dynamic_cast(node)) return convert_simple_node(n); if (auto n = dynamic_cast(node)) diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 0b4bc4ded..e04e547fa 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1042,9 +1042,8 @@ IndirectCallTest2::SetupRvsdg() lambdaOutput, &callX, &callY, - jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pxAlloca[0])), - jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*pyAlloca[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pxAlloca[0])), + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pyAlloca[0]))); }; auto SetupTest2Function = [&](rvsdg::output & functionX) @@ -1077,8 +1076,7 @@ IndirectCallTest2::SetupRvsdg() return std::make_tuple( lambdaOutput, &callX, - jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*pzAlloca[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pzAlloca[0]))); }; auto deltaG1 = SetupG1(); @@ -2284,8 +2282,7 @@ PhiTest2::SetupRvsdg() lambdaOutput, &callB, &callD, - jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*paAlloca[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*paAlloca[0]))); }; auto SetupB = [&](jlm::rvsdg::Region & region, @@ -2329,8 +2326,7 @@ PhiTest2::SetupRvsdg() lambdaOutput, &callI, &callC, - jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*pbAlloca[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pbAlloca[0]))); }; auto SetupC = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) @@ -2368,8 +2364,7 @@ PhiTest2::SetupRvsdg() return std::make_tuple( lambdaOutput, &callA, - jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*pcAlloca[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pcAlloca[0]))); }; auto SetupD = [&](jlm::rvsdg::Region & region, phi::rvargument & functionA) @@ -2398,8 +2393,7 @@ PhiTest2::SetupRvsdg() return std::make_tuple( lambdaOutput, &callA, - jlm::util::AssertedCast( - jlm::rvsdg::output::GetNode(*pdAlloca[0]))); + jlm::util::AssertedCast(jlm::rvsdg::output::GetNode(*pdAlloca[0]))); }; auto SetupPhi = [&](rvsdg::output & lambdaEight, rvsdg::output & lambdaI) @@ -2480,7 +2474,7 @@ PhiTest2::SetupRvsdg() return std::make_tuple( lambdaOutput, &callA, - jlm::util::AssertedCast( + jlm::util::AssertedCast( jlm::rvsdg::output::GetNode(*pTestAlloca[0]))); }; diff --git a/tests/TestRvsdgs.hpp b/tests/TestRvsdgs.hpp index dd37d7ee6..5b2ade48f 100644 --- a/tests/TestRvsdgs.hpp +++ b/tests/TestRvsdgs.hpp @@ -760,19 +760,19 @@ class IndirectCallTest2 final : public RvsdgTest return *CallY_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetAllocaPx() const noexcept { return *AllocaPx_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetAllocaPy() const noexcept { return *AllocaPy_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetAllocaPz() const noexcept { return *AllocaPz_; @@ -800,9 +800,9 @@ class IndirectCallTest2 final : public RvsdgTest jlm::llvm::CallNode * Test2CallX_; jlm::llvm::CallNode * CallY_; - jlm::rvsdg::simple_node * AllocaPx_; - jlm::rvsdg::simple_node * AllocaPy_; - jlm::rvsdg::simple_node * AllocaPz_; + jlm::rvsdg::SimpleNode * AllocaPx_; + jlm::rvsdg::SimpleNode * AllocaPy_; + jlm::rvsdg::SimpleNode * AllocaPz_; }; /** @@ -1566,31 +1566,31 @@ class PhiTest2 final : public RvsdgTest return *IndirectCall_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetPTestAlloca() const noexcept { return *PTestAlloca_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetPaAlloca() const noexcept { return *PaAlloca_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetPbAlloca() const noexcept { return *PbAlloca_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetPcAlloca() const noexcept { return *PcAlloca_; } - [[nodiscard]] jlm::rvsdg::simple_node & + [[nodiscard]] jlm::rvsdg::SimpleNode & GetPdAlloca() const noexcept { return *PdAlloca_; @@ -1617,11 +1617,11 @@ class PhiTest2 final : public RvsdgTest jlm::llvm::CallNode * CallI_; jlm::llvm::CallNode * IndirectCall_; - jlm::rvsdg::simple_node * PTestAlloca_; - jlm::rvsdg::simple_node * PaAlloca_; - jlm::rvsdg::simple_node * PbAlloca_; - jlm::rvsdg::simple_node * PcAlloca_; - jlm::rvsdg::simple_node * PdAlloca_; + jlm::rvsdg::SimpleNode * PTestAlloca_; + jlm::rvsdg::SimpleNode * PaAlloca_; + jlm::rvsdg::SimpleNode * PbAlloca_; + jlm::rvsdg::SimpleNode * PcAlloca_; + jlm::rvsdg::SimpleNode * PdAlloca_; }; /** diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index 415bc23fe..37a4b3ad9 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -281,12 +281,12 @@ TestThetaLoad() jlm::rvsdg::bitsgt_op sgt(32); jlm::rvsdg::bitadd_op add(32); jlm::rvsdg::bitsub_op sub(32); - auto arm = jlm::rvsdg::simple_node::create_normalized( + auto arm = jlm::rvsdg::SimpleNode::create_normalized( thetaRegion, add, { idv->argument(), lvs->argument() })[0]; auto cmp = - jlm::rvsdg::simple_node::create_normalized(thetaRegion, ult, { arm, lve->argument() })[0]; + jlm::rvsdg::SimpleNode::create_normalized(thetaRegion, ult, { arm, lve->argument() })[0]; auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); idv->result()->divert_to(arm); theta->set_predicate(match); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index d739580fb..285647ca8 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -39,8 +39,8 @@ TestFork() rvsdg::output * lveBuffer; loop->add_loopvar(lambda->GetFunctionArguments()[2], &lveBuffer); - auto arm = rvsdg::simple_node::create_normalized(subregion, add, { idvBuffer, lvsBuffer })[0]; - auto cmp = rvsdg::simple_node::create_normalized(subregion, ult, { arm, lveBuffer })[0]; + auto arm = rvsdg::SimpleNode::create_normalized(subregion, add, { idvBuffer, lvsBuffer })[0]; + auto cmp = rvsdg::SimpleNode::create_normalized(subregion, ult, { arm, lveBuffer })[0]; auto match = rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); loop->set_predicate(match); @@ -105,8 +105,8 @@ TestConstantFork() loop->add_loopvar(lambda->GetFunctionArguments()[0], &idvBuffer); auto bitConstant1 = rvsdg::create_bitconstant(subregion, 32, 1); - auto arm = rvsdg::simple_node::create_normalized(subregion, add, { idvBuffer, bitConstant1 })[0]; - auto cmp = rvsdg::simple_node::create_normalized(subregion, ult, { arm, bitConstant1 })[0]; + auto arm = rvsdg::SimpleNode::create_normalized(subregion, add, { idvBuffer, bitConstant1 })[0]; + auto cmp = rvsdg::SimpleNode::create_normalized(subregion, ult, { arm, bitConstant1 })[0]; auto match = rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); loop->set_predicate(match); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index 32699ff96..18575ede2 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -37,12 +37,11 @@ TestUnknownBoundaries() auto lvs = theta->add_loopvar(lambda->GetFunctionArguments()[1]); auto lve = theta->add_loopvar(lambda->GetFunctionArguments()[2]); - auto arm = jlm::rvsdg::simple_node::create_normalized( + auto arm = jlm::rvsdg::SimpleNode::create_normalized( subregion, add, { idv->argument(), lvs->argument() })[0]; - auto cmp = - jlm::rvsdg::simple_node::create_normalized(subregion, ult, { arm, lve->argument() })[0]; + auto cmp = jlm::rvsdg::SimpleNode::create_normalized(subregion, ult, { arm, lve->argument() })[0]; auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); idv->result()->divert_to(arm); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 2bbb9b6dc..034b06e80 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -51,9 +51,8 @@ create_theta( auto lvs = theta->add_loopvar(step); auto lve = theta->add_loopvar(end); - auto arm = - simple_node::create_normalized(subregion, aop, { idv->argument(), lvs->argument() })[0]; - auto cmp = simple_node::create_normalized(subregion, cop, { arm, lve->argument() })[0]; + auto arm = SimpleNode::create_normalized(subregion, aop, { idv->argument(), lvs->argument() })[0]; + auto cmp = SimpleNode::create_normalized(subregion, cop, { arm, lve->argument() })[0]; auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); idv->result()->divert_to(arm); diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 105bd8497..0460592a1 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -264,10 +264,8 @@ TestComZeroExt() // zero extension of constant1 auto zeroExtOp = jlm::llvm::zext_op(8, 16); - auto zeroExt = jlm::rvsdg::simple_node::create_normalized( - lambda->subregion(), - zeroExtOp, - { constant1 })[0]; + auto zeroExt = + jlm::rvsdg::SimpleNode::create_normalized(lambda->subregion(), zeroExtOp, { constant1 })[0]; auto mul = jlm::rvsdg::bitmul_op::create(16, zeroExt, zeroExt); diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 235f420c9..eee0cd3ff 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -25,9 +25,9 @@ test_flattened_binary_reduction() auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i3 = &jlm::tests::GraphImport::Create(graph, vt, ""); - auto o1 = simple_node::create_normalized(graph.root(), op, { i0, i1 })[0]; - auto o2 = simple_node::create_normalized(graph.root(), op, { o1, i2 })[0]; - auto o3 = simple_node::create_normalized(graph.root(), op, { o2, i3 })[0]; + auto o1 = SimpleNode::create_normalized(graph.root(), op, { i0, i1 })[0]; + auto o2 = SimpleNode::create_normalized(graph.root(), op, { o1, i2 })[0]; + auto o3 = SimpleNode::create_normalized(graph.root(), op, { o2, i3 })[0]; auto & ex = jlm::tests::GraphExport::Create(*o3, ""); graph.prune(); @@ -59,9 +59,9 @@ test_flattened_binary_reduction() auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i3 = &jlm::tests::GraphImport::Create(graph, vt, ""); - auto o1 = simple_node::create_normalized(graph.root(), op, { i0, i1 })[0]; - auto o2 = simple_node::create_normalized(graph.root(), op, { o1, i2 })[0]; - auto o3 = simple_node::create_normalized(graph.root(), op, { o2, i3 })[0]; + auto o1 = SimpleNode::create_normalized(graph.root(), op, { i0, i1 })[0]; + auto o2 = SimpleNode::create_normalized(graph.root(), op, { o1, i2 })[0]; + auto o3 = SimpleNode::create_normalized(graph.root(), op, { o2, i3 })[0]; auto & ex = jlm::tests::GraphExport::Create(*o3, ""); graph.prune(); diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index f694ae623..eee6315ab 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -101,7 +101,7 @@ class unary_op final : public rvsdg::unary_op rvsdg::output * operand, std::shared_ptr dsttype) { - return rvsdg::simple_node::create( + return rvsdg::SimpleNode::create( region, unary_op(std::move(srctype), std::move(dsttype)), { operand }); @@ -114,7 +114,7 @@ class unary_op final : public rvsdg::unary_op std::shared_ptr dsttype) { unary_op op(std::move(srctype), std::move(dsttype)); - return rvsdg::simple_node::create_normalized(operand->region(), op, { operand })[0]; + return rvsdg::SimpleNode::create_normalized(operand->region(), op, { operand })[0]; } }; @@ -173,7 +173,7 @@ class binary_op final : public rvsdg::binary_op rvsdg::output * op2) { binary_op op(srctype, std::move(dsttype), rvsdg::binary_op::flags::none); - return rvsdg::simple_node::create(op1->region(), op, { op1, op2 }); + return rvsdg::SimpleNode::create(op1->region(), op, { op1, op2 }); } static inline rvsdg::output * @@ -184,7 +184,7 @@ class binary_op final : public rvsdg::binary_op rvsdg::output * op2) { binary_op op(srctype, std::move(dsttype), rvsdg::binary_op::flags::none); - return rvsdg::simple_node::create_normalized(op1->region(), op, { op1, op2 })[0]; + return rvsdg::SimpleNode::create_normalized(op1->region(), op, { op1, op2 })[0]; } private: @@ -382,7 +382,7 @@ class test_op final : public rvsdg::SimpleOperation [[nodiscard]] std::unique_ptr copy() const override; - static rvsdg::simple_node * + static rvsdg::SimpleNode * create( rvsdg::Region * region, const std::vector & operands, @@ -393,10 +393,10 @@ class test_op final : public rvsdg::SimpleOperation operand_types.push_back(operand->Type()); test_op op(std::move(operand_types), std::move(result_types)); - return rvsdg::simple_node::create(region, op, { operands }); + return rvsdg::SimpleNode::create(region, op, { operands }); } - static rvsdg::simple_node * + static rvsdg::SimpleNode * Create( rvsdg::Region * region, std::vector> operandTypes, @@ -404,18 +404,18 @@ class test_op final : public rvsdg::SimpleOperation std::vector> resultTypes) { test_op op(std::move(operandTypes), std::move(resultTypes)); - return rvsdg::simple_node::create(region, op, { operands }); + return rvsdg::SimpleNode::create(region, op, { operands }); } }; -class SimpleNode final : public rvsdg::simple_node +class SimpleNode final : public rvsdg::SimpleNode { private: SimpleNode( rvsdg::Region & region, const test_op & operation, const std::vector & operands) - : simple_node(®ion, operation, operands) + : rvsdg::SimpleNode(®ion, operation, operands) {} public: @@ -475,7 +475,7 @@ create_testop( operand_types.push_back(operand->Type()); test_op op(std::move(operand_types), std::move(result_types)); - return rvsdg::simple_node::create_normalized(region, op, { operands }); + return rvsdg::SimpleNode::create_normalized(region, op, { operands }); } class TestGraphArgument final : public jlm::rvsdg::RegionArgument From 0ac4805788532968df3ee816f2db50090561252e Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 21 Dec 2024 10:23:13 +0100 Subject: [PATCH 139/170] Convert binary operation normalizations to new normalization interface (#693) --- jlm/rvsdg/binary.cpp | 63 ++++++++ jlm/rvsdg/binary.hpp | 39 +++++ tests/jlm/rvsdg/test-binary.cpp | 267 +++++++++++++++++++++++++++++++- 3 files changed, 364 insertions(+), 5 deletions(-) diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 67407691e..60c961e5d 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -319,6 +319,69 @@ binary_op::flags() const noexcept return jlm::rvsdg::binary_op::flags::none; } +std::optional> +FlattenAssociativeBinaryOperation( + const binary_op & operation, + const std::vector & operands) +{ + JLM_ASSERT(!operands.empty()); + auto region = operands[0]->region(); + + if (!operation.is_associative()) + { + return std::nullopt; + } + + auto newOperands = base::detail::associative_flatten( + operands, + [&operation](rvsdg::output * operand) + { + auto node = TryGetOwnerNode(*operand); + if (node == nullptr) + return false; + + auto flattenedBinaryOperation = + dynamic_cast(&node->GetOperation()); + return node->GetOperation() == operation + || (flattenedBinaryOperation && flattenedBinaryOperation->bin_operation() == operation); + }); + + if (operands == newOperands) + { + JLM_ASSERT(newOperands.size() == 2); + return std::nullopt; + } + + JLM_ASSERT(newOperands.size() > 2); + auto flattenedBinaryOperation = + std::make_unique(operation, newOperands.size()); + return outputs(SimpleNode::create(region, *flattenedBinaryOperation, newOperands)); +} + +std::optional> +NormalizeBinaryOperation(const binary_op & operation, const std::vector & operands) +{ + JLM_ASSERT(!operands.empty()); + auto region = operands[0]->region(); + + auto newOperands = reduce_operands(operation, operands); + + if (newOperands == operands) + { + // The operands did not change, which means that none of the normalizations triggered. + return std::nullopt; + } + + if (newOperands.size() == 1) + { + // The operands could be reduced to a single value by applying constant folding. + return newOperands; + } + + JLM_ASSERT(newOperands.size() == 2); + return outputs(SimpleNode::create(region, operation, newOperands)); +} + /* flattened binary operator */ flattened_binary_op::~flattened_binary_op() noexcept diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index be2ef06c2..07f0fbe74 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -12,6 +12,8 @@ #include #include +#include + namespace jlm::rvsdg { @@ -167,6 +169,43 @@ class binary_op : public SimpleOperation } }; +/** + * \brief Flattens a cascade of the same binary operations into a single flattened binary operation. + * + * o1 = binaryNode i1 i2 + * o2 = binaryNode o1 i3 + * => + * o2 = flattenedBinaryNode i1 i2 i3 + * + * \pre The binary operation must be associative. + * + * @param operation The binary operation on which the transformation is performed. + * @param operands The operands of the binary node. + * @return If the normalization could be applied, then the results of the binary operation after + * the transformation. Otherwise, std::nullopt. + */ +std::optional> +FlattenAssociativeBinaryOperation( + const binary_op & operation, + const std::vector & operands); + +/** + * \brief Applies the reductions implemented in the binary operations reduction functions. + * + * @param operation The binary operation on which the transformation is performed. + * @param operands The operands of the binary node. + * + * @return If the normalization could be applied, then the results of the binary operation after + * the transformation. Otherwise, std::nullopt. + * + * \see binary_op::can_reduce_operand_pair() + * \see binary_op::reduce_operand_pair() + */ +std::optional> +NormalizeBinaryOperation( + const binary_op & operation, + const std::vector & operands); + class flattened_binary_op final : public SimpleOperation { public: diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index eee0cd3ff..64ba71182 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -7,10 +7,80 @@ #include "test-registry.hpp" #include "test-types.hpp" +#include #include -static void -test_flattened_binary_reduction() +class BinaryOperation final : public jlm::rvsdg::binary_op +{ +public: + BinaryOperation( + const std::shared_ptr operandType, + const std::shared_ptr resultType, + const enum jlm::rvsdg::binary_op::flags & flags) + : jlm::rvsdg::binary_op({ operandType, operandType }, resultType), + Flags_(flags) + {} + + jlm::rvsdg::binop_reduction_path_t + can_reduce_operand_pair(const jlm::rvsdg::output * operand1, const jlm::rvsdg::output * operand2) + const noexcept override + { + auto n1 = jlm::rvsdg::TryGetOwnerNode(*operand1); + auto n2 = jlm::rvsdg::TryGetOwnerNode(*operand2); + + if (jlm::rvsdg::is(n1) && jlm::rvsdg::is(n2)) + { + return 1; + } + + return 0; + } + + jlm::rvsdg::output * + reduce_operand_pair( + jlm::rvsdg::unop_reduction_path_t path, + jlm::rvsdg::output * op1, + jlm::rvsdg::output * op2) const override + { + + if (path == 1) + { + return op2; + } + + return nullptr; + } + + [[nodiscard]] enum jlm::rvsdg::binary_op::flags + flags() const noexcept override + { + return Flags_; + } + + bool + operator==(const Operation & other) const noexcept override + { + JLM_UNREACHABLE("Not implemented."); + } + + [[nodiscard]] std::string + debug_string() const override + { + return "BinaryOperation"; + } + + [[nodiscard]] std::unique_ptr + copy() const override + { + return std::make_unique(this->argument(0), this->result(0), Flags_); + } + +private: + enum jlm::rvsdg::binary_op::flags Flags_; +}; + +static int +FlattenedBinaryReduction() { using namespace jlm::rvsdg; @@ -84,14 +154,201 @@ test_flattened_binary_reduction() auto node2 = output::GetNode(*node1->input(0)->origin()); assert(is(node2)); } + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/test-binary-FlattenedBinaryReduction", FlattenedBinaryReduction) + +static int +FlattenAssociativeBinaryOperation_NotAssociativeBinary() +{ + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + Graph graph; + auto i0 = &jlm::tests::GraphImport::Create(graph, valueType, "i0"); + auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); + auto i2 = &jlm::tests::GraphImport::Create(graph, valueType, "i2"); + + jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::none); + auto o1 = SimpleNode::create(graph.root(), binaryOperation, { i0, i1 }); + auto o2 = SimpleNode::create(graph.root(), binaryOperation, { o1->output(0), i2 }); + + auto & ex = jlm::tests::GraphExport::Create(*o2->output(0), "o2"); + + jlm::rvsdg::view(graph, stdout); + + // Act + auto node = TryGetOwnerNode(*ex.origin()); + auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); + + jlm::rvsdg::view(graph, stdout); + + // Assert + assert(success == false); + assert(TryGetOwnerNode(*ex.origin()) == node); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/test-binary-FlattenAssociatedBinaryOperation_NotAssociativeBinary", + FlattenAssociativeBinaryOperation_NotAssociativeBinary) + +static int +FlattenAssociativeBinaryOperation_NoNewOperands() +{ + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + Graph graph; + auto i0 = &jlm::tests::GraphImport::Create(graph, valueType, "i0"); + auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); + + jlm::tests::unary_op unaryOperation(valueType, valueType); + jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); + auto u1 = SimpleNode::create(graph.root(), unaryOperation, { i0 }); + auto u2 = SimpleNode::create(graph.root(), unaryOperation, { i1 }); + auto b2 = SimpleNode::create(graph.root(), binaryOperation, { u1->output(0), u2->output(0) }); + + auto & ex = jlm::tests::GraphExport::Create(*b2->output(0), "o2"); + + jlm::rvsdg::view(graph, stdout); + + // Act + auto node = TryGetOwnerNode(*ex.origin()); + auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); + + jlm::rvsdg::view(graph, stdout); + + // Assert + assert(success == false); + assert(TryGetOwnerNode(*ex.origin()) == node); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/test-binary-FlattenAssociatedBinaryOperation_NoNewOperands", + FlattenAssociativeBinaryOperation_NoNewOperands) + +static int +FlattenAssociativeBinaryOperation_Success() +{ + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + Graph graph; + auto i0 = &jlm::tests::GraphImport::Create(graph, valueType, "i0"); + auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); + auto i2 = &jlm::tests::GraphImport::Create(graph, valueType, "i2"); + + jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); + auto o1 = SimpleNode::create(graph.root(), binaryOperation, { i0, i1 }); + auto o2 = SimpleNode::create(graph.root(), binaryOperation, { o1->output(0), i2 }); + + auto & ex = jlm::tests::GraphExport::Create(*o2->output(0), "o2"); + + jlm::rvsdg::view(graph, stdout); + + // Act + auto node = TryGetOwnerNode(*ex.origin()); + auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); + + jlm::rvsdg::view(graph, stdout); + + // Assert + assert(success); + auto flattenedBinaryNode = TryGetOwnerNode(*ex.origin()); + assert(is(flattenedBinaryNode)); + assert(flattenedBinaryNode->ninputs() == 3); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/test-binary-FlattenAssociatedBinaryOperation_Success", + FlattenAssociativeBinaryOperation_Success) + +static int +NormalizeBinaryOperation_NoNewOperands() +{ + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + Graph graph; + auto i0 = &jlm::tests::GraphImport::Create(graph, valueType, "i0"); + auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); + + jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); + auto o1 = SimpleNode::create(graph.root(), binaryOperation, { i0, i1 }); + + auto & ex = jlm::tests::GraphExport::Create(*o1->output(0), "o2"); + + jlm::rvsdg::view(graph, stdout); + + // Act + auto node = TryGetOwnerNode(*ex.origin()); + auto success = ReduceNode(NormalizeBinaryOperation, *node); + + jlm::rvsdg::view(graph, stdout); + + // Assert + assert(success == false); + + return 0; } +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/test-binary-NormalizeBinaryOperation_NoNewOperands", + NormalizeBinaryOperation_NoNewOperands) + static int -test_main() +NormalizeBinaryOperation_SingleOperand() { - test_flattened_binary_reduction(); + using namespace jlm::rvsdg; + + // Arrange + auto valueType = jlm::tests::valuetype::Create(); + + jlm::tests::unary_op unaryOperation(valueType, valueType); + BinaryOperation binaryOperation(valueType, valueType, binary_op::flags::none); + + Graph graph; + auto s0 = &jlm::tests::GraphImport::Create(graph, valueType, "s0"); + auto s1 = &jlm::tests::GraphImport::Create(graph, valueType, "s1"); + + auto u1 = SimpleNode::create(graph.root(), unaryOperation, { s0 }); + auto u2 = SimpleNode::create(graph.root(), unaryOperation, { s1 }); + + auto o1 = SimpleNode::create(graph.root(), binaryOperation, { u1->output(0), u2->output(0) }); + + auto & ex = jlm::tests::GraphExport::Create(*o1->output(0), "ex"); + + jlm::rvsdg::view(graph, stdout); + + // Act + auto node = TryGetOwnerNode(*ex.origin()); + auto success = ReduceNode(NormalizeBinaryOperation, *node); + + jlm::rvsdg::view(graph, stdout); + + // Assert + assert(success == true); + assert(ex.origin() == u2->output(0)); return 0; } -JLM_UNIT_TEST_REGISTER("jlm/rvsdg/test-binary", test_main) +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/test-binary-NormalizeBinaryOperation_SingleOperand", + NormalizeBinaryOperation_SingleOperand) From 4821290d2e6cfe890b80a4c9ab3ee1a53af75906 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 21 Dec 2024 10:54:03 +0100 Subject: [PATCH 140/170] Add modernize-redundant-void-arg check to clang-tidy (#694) --- .clang-tidy | 2 + jlm/rvsdg/binary.cpp | 2 +- jlm/rvsdg/bitstring/concat.cpp | 2 +- jlm/rvsdg/gamma.cpp | 2 +- jlm/rvsdg/node.cpp | 2 +- jlm/rvsdg/nullary.cpp | 2 +- jlm/rvsdg/simple-normal-form.cpp | 2 +- jlm/rvsdg/statemux.cpp | 2 +- jlm/rvsdg/structural-normal-form.cpp | 2 +- jlm/rvsdg/unary.cpp | 2 +- jlm/util/intrusive-hash.hpp | 4 +- jlm/util/intrusive-list.hpp | 8 ++-- tests/jlm/rvsdg/bitstring/bitstring.cpp | 62 ++++++++++++------------- tests/jlm/rvsdg/test-gamma.cpp | 6 +-- tests/jlm/rvsdg/test-graph.cpp | 6 +-- tests/jlm/rvsdg/test-nodes.cpp | 2 +- tests/jlm/rvsdg/test-statemux.cpp | 2 +- tests/jlm/rvsdg/test-topdown.cpp | 2 +- tests/jlm/rvsdg/test-typemismatch.cpp | 2 +- tests/jlm/util/test-intrusive-hash.cpp | 6 +-- tests/jlm/util/test-intrusive-list.cpp | 6 +-- 21 files changed, 64 insertions(+), 62 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 2bcbaae63..dd474def7 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,8 +1,10 @@ --- Checks: '-*, modernize-deprecated-headers, + modernize-redundant-void-arg, ' WarningsAsErrors: ' modernize-deprecated-headers, + modernize-redundant-void-arg, ' \ No newline at end of file diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index 60c961e5d..db35d1360 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -522,7 +522,7 @@ flattened_binary_operation_get_default_normal_form_( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::binary_op), diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index e188db419..f250de767 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -237,7 +237,7 @@ get_default_normal_form( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::bitconcat_op), diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index ea2b71e9f..3cf53e929 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -471,7 +471,7 @@ gamma_node_get_default_normal_form_( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::GammaOperation), diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index f09dca107..46e4b2a7c 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -147,7 +147,7 @@ node_get_default_normal_form_( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::Operation), diff --git a/jlm/rvsdg/nullary.cpp b/jlm/rvsdg/nullary.cpp index c78350ec0..a8baa960f 100644 --- a/jlm/rvsdg/nullary.cpp +++ b/jlm/rvsdg/nullary.cpp @@ -44,7 +44,7 @@ nullary_operation_get_default_normal_form_( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::nullary_op), diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index 07ca14cdb..d2eeee55b 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -119,7 +119,7 @@ get_default_normal_form( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::SimpleOperation), diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 83e27ed92..79621b99d 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -210,7 +210,7 @@ create_mux_normal_form( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::mux_op), diff --git a/jlm/rvsdg/structural-normal-form.cpp b/jlm/rvsdg/structural-normal-form.cpp index 122a39a69..bf2aee5d9 100644 --- a/jlm/rvsdg/structural-normal-form.cpp +++ b/jlm/rvsdg/structural-normal-form.cpp @@ -31,7 +31,7 @@ get_default_normal_form( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::StructuralOperation), diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index e2f4a4304..45f5eb7d6 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -110,7 +110,7 @@ unary_operation_get_default_normal_form_( } static void __attribute__((constructor)) -register_node_normal_form(void) +register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::unary_op), diff --git a/jlm/util/intrusive-hash.hpp b/jlm/util/intrusive-hash.hpp index 54d979fc9..bbd8a76c3 100644 --- a/jlm/util/intrusive-hash.hpp +++ b/jlm/util/intrusive-hash.hpp @@ -173,7 +173,7 @@ class intrusive_hash {} inline const iterator & - operator++(void) noexcept + operator++() noexcept { ElementType * next = map_->accessor_.get_next(element_); if (next == nullptr) @@ -262,7 +262,7 @@ class intrusive_hash {} inline const const_iterator & - operator++(void) noexcept + operator++() noexcept { ElementType * next = map_->accessor_.get_next(element_); if (next == nullptr) diff --git a/jlm/util/intrusive-list.hpp b/jlm/util/intrusive-list.hpp index d26adb338..3b743b471 100644 --- a/jlm/util/intrusive-list.hpp +++ b/jlm/util/intrusive-list.hpp @@ -116,7 +116,7 @@ class intrusive_list {} inline const iterator & - operator++(void) noexcept + operator++() noexcept { element_ = list_->accessor_.get_next(element_); return *this; @@ -131,7 +131,7 @@ class intrusive_list } inline const iterator & - operator--(void) noexcept + operator--() noexcept { if (element_) { @@ -216,7 +216,7 @@ class intrusive_list {} inline const const_iterator & - operator++(void) noexcept + operator++() noexcept { element_ = list_->accessor_.get_next(element_); return *this; @@ -231,7 +231,7 @@ class intrusive_list } inline const const_iterator & - operator--(void) noexcept + operator--() noexcept { if (element_) { diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 93fda72cb..1a6dbcead 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -11,7 +11,7 @@ #include static int -types_bitstring_arithmetic_test_bitand(void) +types_bitstring_arithmetic_test_bitand() { using namespace jlm::rvsdg; @@ -39,7 +39,7 @@ types_bitstring_arithmetic_test_bitand(void) } static int -types_bitstring_arithmetic_test_bitashr(void) +types_bitstring_arithmetic_test_bitashr() { using namespace jlm::rvsdg; @@ -78,7 +78,7 @@ types_bitstring_arithmetic_test_bitashr(void) } static int -types_bitstring_arithmetic_test_bitdifference(void) +types_bitstring_arithmetic_test_bitdifference() { using namespace jlm::rvsdg; @@ -101,7 +101,7 @@ types_bitstring_arithmetic_test_bitdifference(void) } static int -types_bitstring_arithmetic_test_bitnegate(void) +types_bitstring_arithmetic_test_bitnegate() { using namespace jlm::rvsdg; @@ -129,7 +129,7 @@ types_bitstring_arithmetic_test_bitnegate(void) } static int -types_bitstring_arithmetic_test_bitnot(void) +types_bitstring_arithmetic_test_bitnot() { using namespace jlm::rvsdg; @@ -157,7 +157,7 @@ types_bitstring_arithmetic_test_bitnot(void) } static int -types_bitstring_arithmetic_test_bitor(void) +types_bitstring_arithmetic_test_bitor() { using namespace jlm::rvsdg; @@ -185,7 +185,7 @@ types_bitstring_arithmetic_test_bitor(void) } static int -types_bitstring_arithmetic_test_bitproduct(void) +types_bitstring_arithmetic_test_bitproduct() { using namespace jlm::rvsdg; @@ -214,7 +214,7 @@ types_bitstring_arithmetic_test_bitproduct(void) } static int -types_bitstring_arithmetic_test_bitshiproduct(void) +types_bitstring_arithmetic_test_bitshiproduct() { using namespace jlm::rvsdg; @@ -237,7 +237,7 @@ types_bitstring_arithmetic_test_bitshiproduct(void) } static int -types_bitstring_arithmetic_test_bitshl(void) +types_bitstring_arithmetic_test_bitshl() { using namespace jlm::rvsdg; @@ -269,7 +269,7 @@ types_bitstring_arithmetic_test_bitshl(void) } static int -types_bitstring_arithmetic_test_bitshr(void) +types_bitstring_arithmetic_test_bitshr() { using namespace jlm::rvsdg; @@ -301,7 +301,7 @@ types_bitstring_arithmetic_test_bitshr(void) } static int -types_bitstring_arithmetic_test_bitsmod(void) +types_bitstring_arithmetic_test_bitsmod() { using namespace jlm::rvsdg; @@ -330,7 +330,7 @@ types_bitstring_arithmetic_test_bitsmod(void) } static int -types_bitstring_arithmetic_test_bitsquotient(void) +types_bitstring_arithmetic_test_bitsquotient() { using namespace jlm::rvsdg; @@ -359,7 +359,7 @@ types_bitstring_arithmetic_test_bitsquotient(void) } static int -types_bitstring_arithmetic_test_bitsum(void) +types_bitstring_arithmetic_test_bitsum() { using namespace jlm::rvsdg; @@ -388,7 +388,7 @@ types_bitstring_arithmetic_test_bitsum(void) } static int -types_bitstring_arithmetic_test_bituhiproduct(void) +types_bitstring_arithmetic_test_bituhiproduct() { using namespace jlm::rvsdg; @@ -411,7 +411,7 @@ types_bitstring_arithmetic_test_bituhiproduct(void) } static int -types_bitstring_arithmetic_test_bitumod(void) +types_bitstring_arithmetic_test_bitumod() { using namespace jlm::rvsdg; @@ -440,7 +440,7 @@ types_bitstring_arithmetic_test_bitumod(void) } static int -types_bitstring_arithmetic_test_bituquotient(void) +types_bitstring_arithmetic_test_bituquotient() { using namespace jlm::rvsdg; @@ -469,7 +469,7 @@ types_bitstring_arithmetic_test_bituquotient(void) } static int -types_bitstring_arithmetic_test_bitxor(void) +types_bitstring_arithmetic_test_bitxor() { using namespace jlm::rvsdg; @@ -513,7 +513,7 @@ expect_static_false(jlm::rvsdg::output * port) } static int -types_bitstring_comparison_test_bitequal(void) +types_bitstring_comparison_test_bitequal() { using namespace jlm::rvsdg; @@ -547,7 +547,7 @@ types_bitstring_comparison_test_bitequal(void) } static int -types_bitstring_comparison_test_bitnotequal(void) +types_bitstring_comparison_test_bitnotequal() { using namespace jlm::rvsdg; @@ -581,7 +581,7 @@ types_bitstring_comparison_test_bitnotequal(void) } static int -types_bitstring_comparison_test_bitsgreater(void) +types_bitstring_comparison_test_bitsgreater() { using namespace jlm::rvsdg; @@ -619,7 +619,7 @@ types_bitstring_comparison_test_bitsgreater(void) } static int -types_bitstring_comparison_test_bitsgreatereq(void) +types_bitstring_comparison_test_bitsgreatereq() { using namespace jlm::rvsdg; @@ -660,7 +660,7 @@ types_bitstring_comparison_test_bitsgreatereq(void) } static int -types_bitstring_comparison_test_bitsless(void) +types_bitstring_comparison_test_bitsless() { using namespace jlm::rvsdg; @@ -698,7 +698,7 @@ types_bitstring_comparison_test_bitsless(void) } static int -types_bitstring_comparison_test_bitslesseq(void) +types_bitstring_comparison_test_bitslesseq() { using namespace jlm::rvsdg; @@ -739,7 +739,7 @@ types_bitstring_comparison_test_bitslesseq(void) } static int -types_bitstring_comparison_test_bitugreater(void) +types_bitstring_comparison_test_bitugreater() { using namespace jlm::rvsdg; @@ -777,7 +777,7 @@ types_bitstring_comparison_test_bitugreater(void) } static int -types_bitstring_comparison_test_bitugreatereq(void) +types_bitstring_comparison_test_bitugreatereq() { using namespace jlm::rvsdg; @@ -818,7 +818,7 @@ types_bitstring_comparison_test_bitugreatereq(void) } static int -types_bitstring_comparison_test_bituless(void) +types_bitstring_comparison_test_bituless() { using namespace jlm::rvsdg; @@ -856,7 +856,7 @@ types_bitstring_comparison_test_bituless(void) } static int -types_bitstring_comparison_test_bitulesseq(void) +types_bitstring_comparison_test_bitulesseq() { using namespace jlm::rvsdg; @@ -925,7 +925,7 @@ types_bitstring_comparison_test_bitulesseq(void) "11111111" static int -types_bitstring_test_constant(void) +types_bitstring_test_constant() { using namespace jlm::rvsdg; @@ -961,7 +961,7 @@ types_bitstring_test_constant(void) } static int -types_bitstring_test_normalize(void) +types_bitstring_test_normalize() { using namespace jlm::rvsdg; @@ -1020,7 +1020,7 @@ assert_constant(jlm::rvsdg::output * bitstr, size_t nbits, const char bits[]) } static int -types_bitstring_test_reduction(void) +types_bitstring_test_reduction() { using namespace jlm::rvsdg; @@ -1074,7 +1074,7 @@ types_bitstring_test_reduction(void) } static int -types_bitstring_test_slice_concat(void) +types_bitstring_test_slice_concat() { using namespace jlm::rvsdg; diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 1edfd31e6..40c9aa9c8 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -12,7 +12,7 @@ #include static void -test_gamma(void) +test_gamma() { using namespace jlm::rvsdg; @@ -49,7 +49,7 @@ test_gamma(void) } static void -test_predicate_reduction(void) +test_predicate_reduction() { using namespace jlm::rvsdg; @@ -81,7 +81,7 @@ test_predicate_reduction(void) } static void -test_invariant_reduction(void) +test_invariant_reduction() { using namespace jlm::rvsdg; diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index 6f8c206e7..c959a5d6e 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -66,7 +66,7 @@ test_recursive_prune() JLM_UNIT_TEST_REGISTER("rvsdg/test-graph_prune", test_recursive_prune) static int -test_empty_graph_pruning(void) +test_empty_graph_pruning() { jlm::rvsdg::Graph graph; @@ -84,7 +84,7 @@ test_empty_graph_pruning(void) JLM_UNIT_TEST_REGISTER("rvsdg/test-empty_graph_pruning", test_empty_graph_pruning) static int -test_prune_replace(void) +test_prune_replace() { using namespace jlm::rvsdg; @@ -113,7 +113,7 @@ test_prune_replace(void) JLM_UNIT_TEST_REGISTER("rvsdg/test-prune-replace", test_prune_replace) static int -test_graph(void) +test_graph() { using namespace jlm::rvsdg; diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 2da66e3da..859cce484 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -10,7 +10,7 @@ #include static void -test_node_copy(void) +test_node_copy() { using namespace jlm::rvsdg; using namespace jlm::tests; diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index 82ee7da42..3ab31de1b 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -81,7 +81,7 @@ test_multiple_origin_reduction() } static int -test_main(void) +test_main() { test_mux_mux_reduction(); test_multiple_origin_reduction(); diff --git a/tests/jlm/rvsdg/test-topdown.cpp b/tests/jlm/rvsdg/test-topdown.cpp index d69c2ba22..64575f88a 100644 --- a/tests/jlm/rvsdg/test-topdown.cpp +++ b/tests/jlm/rvsdg/test-topdown.cpp @@ -190,7 +190,7 @@ test_mutable_traverse() } static int -test_main(void) +test_main() { test_initialization(); test_basic_traversal(); diff --git a/tests/jlm/rvsdg/test-typemismatch.cpp b/tests/jlm/rvsdg/test-typemismatch.cpp index ab55cb267..db6d79101 100644 --- a/tests/jlm/rvsdg/test-typemismatch.cpp +++ b/tests/jlm/rvsdg/test-typemismatch.cpp @@ -9,7 +9,7 @@ #include "test-types.hpp" static int -test_main(void) +test_main() { using namespace jlm::rvsdg; diff --git a/tests/jlm/util/test-intrusive-hash.cpp b/tests/jlm/util/test-intrusive-hash.cpp index d1f90a9b5..fa1a470ca 100644 --- a/tests/jlm/util/test-intrusive-hash.cpp +++ b/tests/jlm/util/test-intrusive-hash.cpp @@ -82,7 +82,7 @@ struct my_stritem typedef jlm::util::intrusive_hash my_strhash; static void -test_int_hash(void) +test_int_hash() { my_hash m; @@ -118,7 +118,7 @@ test_int_hash(void) } static void -test_str_hash(void) +test_str_hash() { my_strhash m; @@ -154,7 +154,7 @@ test_str_hash(void) } static int -test_main(void) +test_main() { test_int_hash(); test_str_hash(); diff --git a/tests/jlm/util/test-intrusive-list.cpp b/tests/jlm/util/test-intrusive-list.cpp index cb8ebb9ff..c41c8617c 100644 --- a/tests/jlm/util/test-intrusive-list.cpp +++ b/tests/jlm/util/test-intrusive-list.cpp @@ -39,7 +39,7 @@ typedef jlm::util::intrusive_list my_list; typedef jlm::util::owner_intrusive_list my_owner_list; static void -test_simple_list(void) +test_simple_list() { my_list l; @@ -80,7 +80,7 @@ test_simple_list(void) } static void -test_owner_list(void) +test_owner_list() { int v1 = 1; int v2 = 2; @@ -143,7 +143,7 @@ test_owner_list(void) } static int -test_main(void) +test_main() { test_simple_list(); test_owner_list(); From 97fab3ba9b591bbf4ef5982ab72af1953cf357da Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Tue, 24 Dec 2024 12:28:09 +0100 Subject: [PATCH 141/170] Add unit test for HLS memory queue (#692) --- jlm/hls/Makefile.sub | 1 + jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 25 +- .../backend/rvsdg2rhls/MemoryQueueTests.cpp | 281 ++++++++++++++++++ 3 files changed, 292 insertions(+), 15 deletions(-) create mode 100644 tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp diff --git a/jlm/hls/Makefile.sub b/jlm/hls/Makefile.sub index 96ddd01e5..4177eb99b 100644 --- a/jlm/hls/Makefile.sub +++ b/jlm/hls/Makefile.sub @@ -82,6 +82,7 @@ libhls_HEADERS = \ libhls_TESTS += \ tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests \ tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests \ + tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests \ tests/jlm/hls/backend/rvsdg2rhls/TestFork \ tests/jlm/hls/backend/rvsdg2rhls/TestGamma \ tests/jlm/hls/backend/rvsdg2rhls/TestTheta \ diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index c23c7b7b2..9aa8f6d7d 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -165,8 +165,7 @@ route_to_region(jlm::rvsdg::Region * target, jlm::rvsdg::output * out) out_regions.pop_front(); } JLM_ASSERT(common_region != nullptr); - auto common_loop = dynamic_cast(common_region->node()); - JLM_ASSERT(common_loop); + auto common_loop = jlm::util::AssertedCast(common_region->node()); // route out to convergence point from out jlm::rvsdg::output * common_out = jlm::hls::route_request(common_region, out); // add a backedge to prevent cycles @@ -211,8 +210,7 @@ separate_load_edge( } else if (auto sti = dynamic_cast(user)) { - auto loop_node = dynamic_cast(sti->node()); - JLM_ASSERT(loop_node); + auto loop_node = jlm::util::AssertedCast(sti->node()); jlm::rvsdg::output * buffer; addr_edge = loop_node->add_loopvar(addr_edge, &buffer); @@ -272,10 +270,9 @@ separate_load_edge( } // create mux JLM_ASSERT(mem_edge->nusers() == 1); - auto mux_user = dynamic_cast(*mem_edge->begin()); - JLM_ASSERT(mux_user); - auto mux_op = dynamic_cast(&mux_user->node()->GetOperation()); - JLM_ASSERT(mux_op); + auto mux_user = jlm::util::AssertedCast(*mem_edge->begin()); + auto mux_op = + jlm::util::AssertedCast(&mux_user->node()->GetOperation()); addr_edge = jlm::hls::mux_op::create( *mux_user->node()->input(0)->origin(), load_branch_out, @@ -287,8 +284,7 @@ separate_load_edge( else { // end of loop - auto load_user_input = dynamic_cast(addr_edge_user); - JLM_ASSERT(load_user_input); + auto load_user_input = jlm::util::AssertedCast(addr_edge_user); JLM_ASSERT( dynamic_cast(&load_user_input->node()->GetOperation())); return nullptr; @@ -460,8 +456,7 @@ process_loops(jlm::rvsdg::output * state_edge) } else if (auto sti = dynamic_cast(user)) { - auto ln = dynamic_cast(sti->node()); - JLM_ASSERT(ln); + JLM_ASSERT(jlm::rvsdg::is(sti->node())); // update to output of loop auto mem_edge_after_loop = find_loop_output(sti); JLM_ASSERT(mem_edge_after_loop->nusers() == 1); @@ -525,7 +520,8 @@ process_loops(jlm::rvsdg::output * state_edge) void jlm::hls::mem_queue(jlm::rvsdg::Region * region) { - auto lambda = dynamic_cast(region->Nodes().begin().ptr()); + auto lambda = + jlm::util::AssertedCast(region->Nodes().begin().ptr()); auto state_arg = GetMemoryStateArgument(*lambda); if (!state_arg) { @@ -534,8 +530,7 @@ jlm::hls::mem_queue(jlm::rvsdg::Region * region) } JLM_ASSERT(state_arg->nusers() == 1); auto state_user = *state_arg->begin(); - auto entry_input = dynamic_cast(state_user); - JLM_ASSERT(entry_input); + auto entry_input = jlm::util::AssertedCast(state_user); auto entry_node = entry_input->node(); JLM_ASSERT(dynamic_cast( &entry_node->GetOperation())); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp new file mode 100644 index 000000000..c6f78f45f --- /dev/null +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp @@ -0,0 +1,281 @@ +/* + * Copyright 2024 Magnus Sjalander + * See COPYING for terms of redistribution. + */ + +#include "test-registry.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +static int +TestSingleLoad() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }, + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Theta + auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); + auto constant = jlm::rvsdg::create_bitconstant(theta->subregion(), 1, 1); + auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, constant); + theta->set_predicate(match); + + // Load node + auto functionArguments = lambda->GetFunctionArguments(); + auto loadAddress = theta->add_loopvar(functionArguments[0]); + auto memoryStateArgument = theta->add_loopvar(functionArguments[1]); + auto loadOutput = LoadNonVolatileNode::Create( + loadAddress->argument(), + { memoryStateArgument->argument() }, + PointerType::Create(), + 32); + loadAddress->result()->divert_to(loadOutput[0]); + memoryStateArgument->result()->divert_to(loadOutput[1]); + + auto lambdaOutput = lambda->finalize({ theta->output(0), theta->output(1) }); + GraphExport::Create(*lambdaOutput, "f"); + + auto lambdaRegion = lambda->subregion(); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Act + mem_sep_argument(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + auto * const entryMemoryStateSplitInput = *lambdaRegion->argument(1)->begin(); + auto * entryMemoryStateSplitNode = jlm::rvsdg::input::GetNode(*entryMemoryStateSplitInput); + jlm::util::AssertedCast( + &entryMemoryStateSplitNode->GetOperation()); + auto exitMemoryStateMergeNode = + jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); + jlm::util::AssertedCast( + &exitMemoryStateMergeNode->GetOperation()); + + // Act + ConvertThetaNodes(*rvsdgModule); + // Simple assert as ConvertThetaNodes() is tested in separate unit tests + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + // Act + mem_queue(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(!jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryQueueTests-SingleLoad", TestSingleLoad) + +static int +TestLoadStore() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::llvm::PointerType::Create(), + jlm::llvm::PointerType::Create(), + MemoryStateType::Create() }, + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Theta + auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); + auto constant = jlm::rvsdg::create_bitconstant(theta->subregion(), 1, 1); + auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, constant); + theta->set_predicate(match); + + // Load node + auto functionArguments = lambda->GetFunctionArguments(); + auto loadAddress = theta->add_loopvar(functionArguments[0]); + auto storeAddress = theta->add_loopvar(functionArguments[1]); + auto memoryStateArgument = theta->add_loopvar(functionArguments[2]); + auto loadOutput = LoadNonVolatileNode::Create( + loadAddress->argument(), + { memoryStateArgument->argument() }, + PointerType::Create(), + 32); + auto storeOutput = StoreNonVolatileNode::Create( + storeAddress->argument(), + jlm::rvsdg::create_bitconstant(theta->subregion(), 32, 1), + { loadOutput[1] }, + 32); + + loadAddress->result()->divert_to(loadOutput[0]); + memoryStateArgument->result()->divert_to(storeOutput[0]); + + auto lambdaOutput = lambda->finalize({ theta->output(0), theta->output(2) }); + GraphExport::Create(*lambdaOutput, "f"); + + auto lambdaRegion = lambda->subregion(); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Act + mem_sep_argument(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + auto * const entryMemoryStateSplitInput = *lambdaRegion->argument(2)->begin(); + auto * entryMemoryStateSplitNode = jlm::rvsdg::input::GetNode(*entryMemoryStateSplitInput); + jlm::util::AssertedCast( + &entryMemoryStateSplitNode->GetOperation()); + auto exitMemoryStateMergeNode = + jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); + jlm::util::AssertedCast( + &exitMemoryStateMergeNode->GetOperation()); + + // Act + ConvertThetaNodes(*rvsdgModule); + // Simple assert as ConvertThetaNodes() is tested in separate unit tests + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + // Act + mem_queue(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(!jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + return 0; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryQueueTests-LoadStore", TestLoadStore) + +static int +TestAddrQueue() +{ + using namespace jlm::llvm; + using namespace jlm::hls; + + auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); + auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + nf->set_mutable(false); + + // Setup the function + std::cout << "Function Setup" << std::endl; + auto functionType = FunctionType::Create( + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }, + { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); + + auto lambda = lambda::node::create( + rvsdgModule->Rvsdg().root(), + functionType, + "test", + linkage::external_linkage); + + // Theta + auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); + auto constant = jlm::rvsdg::create_bitconstant(theta->subregion(), 1, 1); + auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, constant); + theta->set_predicate(match); + + // Load node + auto functionArguments = lambda->GetFunctionArguments(); + auto address = theta->add_loopvar(functionArguments[0]); + auto memoryStateArgument = theta->add_loopvar(functionArguments[1]); + auto loadOutput = LoadNonVolatileNode::Create( + address->argument(), + { memoryStateArgument->argument() }, + PointerType::Create(), + 32); + auto storeOutput = + StoreNonVolatileNode::Create(address->argument(), loadOutput[0], { loadOutput[1] }, 32); + + address->result()->divert_to(loadOutput[0]); + memoryStateArgument->result()->divert_to(storeOutput[0]); + + auto lambdaOutput = lambda->finalize({ theta->output(0), theta->output(1) }); + GraphExport::Create(*lambdaOutput, "f"); + + auto lambdaRegion = lambda->subregion(); + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + + // Act + mem_sep_argument(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + auto * const entryMemoryStateSplitInput = *lambdaRegion->argument(1)->begin(); + auto * entryMemoryStateSplitNode = jlm::rvsdg::input::GetNode(*entryMemoryStateSplitInput); + jlm::util::AssertedCast( + &entryMemoryStateSplitNode->GetOperation()); + auto exitMemoryStateMergeNode = + jlm::util::AssertedCast(lambdaRegion->result(1)->origin())->node(); + jlm::util::AssertedCast( + &exitMemoryStateMergeNode->GetOperation()); + + // Act + ConvertThetaNodes(*rvsdgModule); + // Simple assert as ConvertThetaNodes() is tested in separate unit tests + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + // Act + mem_queue(*rvsdgModule); + // Assert + jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + assert(jlm::rvsdg::Region::Contains(*lambdaRegion, true)); + + for (auto & node : jlm::rvsdg::topdown_traverser(lambdaRegion)) + { + if (auto loopNode = dynamic_cast(node)) + { + for (auto & node : jlm::rvsdg::topdown_traverser(loopNode->subregion())) + { + if (auto storeNode = dynamic_cast(node)) + { + auto loadNode = + jlm::util::AssertedCast(storeNode->input(1)->origin()) + ->node(); + jlm::util::AssertedCast(loadNode); + auto stateGate = + jlm::util::AssertedCast(loadNode->input(0)->origin()) + ->node(); + jlm::util::AssertedCast(&stateGate->GetOperation()); + auto addrQueue = + jlm::util::AssertedCast(stateGate->input(0)->origin()) + ->node(); + jlm::util::AssertedCast(&addrQueue->GetOperation()); + return 0; + } + } + } + } + + return 1; +} +JLM_UNIT_TEST_REGISTER("jlm/hls/backend/rvsdg2rhls/MemoryQueueTests-AddrQueue", TestAddrQueue) From d5322aac50f377eadc0b5f2b0f02d727260651a2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 24 Dec 2024 13:17:43 +0100 Subject: [PATCH 142/170] Add misc-unused-parameters check to clang-tidy (#697) --- .clang-tidy | 2 + .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 2 +- jlm/hls/ir/hls.hpp | 8 +-- jlm/hls/opt/cne.cpp | 4 +- jlm/llvm/backend/jlm2llvm/instruction.cpp | 38 +++++----- jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp | 4 +- .../frontend/ControlFlowRestructuring.cpp | 3 +- .../InterProceduralGraphConversion.cpp | 13 ++-- .../frontend/LlvmInstructionConversion.cpp | 12 ++-- jlm/llvm/frontend/LlvmTypeConversion.cpp | 4 +- jlm/llvm/ir/RvsdgModule.cpp | 2 +- jlm/llvm/ir/aggregation.cpp | 4 +- jlm/llvm/ir/operators/Load.cpp | 2 +- jlm/llvm/ir/operators/operators.cpp | 69 ++++++++----------- jlm/llvm/opt/RvsdgTreePrinter.cpp | 1 + .../AgnosticMemoryNodeProvider.cpp | 8 +-- jlm/llvm/opt/cne.cpp | 4 +- jlm/llvm/opt/pull.cpp | 2 +- jlm/llvm/opt/unroll.cpp | 2 +- jlm/rvsdg/bitstring/bitoperation-classes.cpp | 2 +- jlm/rvsdg/bitstring/concat.cpp | 4 +- jlm/rvsdg/gamma.cpp | 2 +- jlm/rvsdg/node-normal-form.cpp | 2 +- jlm/rvsdg/traverser.cpp | 4 +- jlm/rvsdg/view.cpp | 2 +- jlm/tooling/Command.cpp | 4 +- jlm/util/GraphWriter.cpp | 6 +- tests/jlm/llvm/frontend/llvm/TestFNeg.cpp | 2 +- tests/jlm/llvm/ir/TestAnnotation.cpp | 2 +- tests/jlm/llvm/ir/operators/TestPhi.cpp | 4 +- .../opt/alias-analyses/TestPointsToGraph.cpp | 4 +- .../TestRegionAwareMemoryNodeProvider.cpp | 2 +- tests/jlm/rvsdg/RegionTests.cpp | 10 +-- tests/jlm/rvsdg/test-binary.cpp | 4 +- tests/jlm/rvsdg/test-nodes.cpp | 10 +-- tests/jlm/rvsdg/test-theta.cpp | 2 +- tests/test-operation.cpp | 15 ++-- 37 files changed, 123 insertions(+), 142 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index dd474def7..71006fce4 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,10 +1,12 @@ --- Checks: '-*, + misc-unused-parameters, modernize-deprecated-headers, modernize-redundant-void-arg, ' WarningsAsErrors: ' + misc-unused-parameters, modernize-deprecated-headers, modernize-redundant-void-arg, ' \ No newline at end of file diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 373edff25..9904022de 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -43,7 +43,7 @@ class RhlsToFirrtlConverter : public BaseHLS public: std::string - get_text(llvm::RvsdgModule & rvsdgModule) override + get_text(llvm::RvsdgModule &) override { return "MLIR/FIRRTL generator"; } diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index 56ce261b1..b67444dfc 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -546,7 +546,7 @@ class print_op final : public rvsdg::SimpleOperation } bool - operator==(const Operation & other) const noexcept override + operator==(const Operation &) const noexcept override { // auto ot = dynamic_cast(&other); // check predicate and value @@ -1176,7 +1176,7 @@ class mem_resp_op final : public rvsdg::SimpleOperation } static std::vector> - CreateInTypes(const std::vector> & output_types) + CreateInTypes(const std::vector> &) { size_t max_width = 64; // TODO: calculate size onece JlmSize is moved @@ -1284,7 +1284,7 @@ class mem_req_op final : public rvsdg::SimpleOperation static std::vector> CreateOutTypes( - const std::vector> & load_types, + const std::vector> &, const std::vector> & store_types) { size_t max_width = 64; @@ -1455,7 +1455,7 @@ class local_mem_op final : public rvsdg::SimpleOperation {} bool - operator==(const Operation & other) const noexcept override + operator==(const Operation &) const noexcept override { // TODO: // auto ot = dynamic_cast(&other); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index a4b7e53f6..1390dbe5e 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -405,7 +405,7 @@ mark_phi(const rvsdg::StructuralNode * node, cnectx & ctx) } static void -mark_delta(const rvsdg::StructuralNode * node, cnectx & ctx) +mark_delta(const rvsdg::StructuralNode * node, cnectx &) { JLM_ASSERT(jlm::rvsdg::is(node)); } @@ -567,7 +567,7 @@ divert_phi(rvsdg::StructuralNode * node, cnectx & ctx) } static void -divert_delta(rvsdg::StructuralNode * node, cnectx & ctx) +divert_delta(rvsdg::StructuralNode * node, cnectx &) { JLM_ASSERT(jlm::rvsdg::is(node)); } diff --git a/jlm/llvm/backend/jlm2llvm/instruction.cpp b/jlm/llvm/backend/jlm2llvm/instruction.cpp index 38c170c5b..5272ccc73 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.cpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.cpp @@ -35,7 +35,7 @@ static inline ::llvm::Value * convert_assignment( const rvsdg::SimpleOperation & op, const std::vector & args, - ::llvm::IRBuilder<> & builder, + ::llvm::IRBuilder<> &, context & ctx) { JLM_ASSERT(is(op)); @@ -115,7 +115,7 @@ convert_bitconstant( const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, - context & ctx) + context &) { JLM_ASSERT(dynamic_cast(&op)); auto value = static_cast(&op)->value(); @@ -133,7 +133,7 @@ convert_ctlconstant( const rvsdg::SimpleOperation & op, const std::vector &, ::llvm::IRBuilder<> & builder, - context & ctx) + context &) { JLM_ASSERT(is_ctlconstant_op(op)); auto & cop = *static_cast(&op); @@ -148,7 +148,7 @@ convert( const ConstantFP & op, const std::vector &, ::llvm::IRBuilder<> & builder, - context & ctx) + context &) { return ::llvm::ConstantFP::get(builder.getContext(), op.constant()); } @@ -157,7 +157,7 @@ static inline ::llvm::Value * convert_undef( const rvsdg::SimpleOperation & op, const std::vector &, - ::llvm::IRBuilder<> & builder, + ::llvm::IRBuilder<> &, context & ctx) { JLM_ASSERT(is(op)); @@ -251,8 +251,8 @@ static inline ::llvm::Value * convert_branch( const rvsdg::SimpleOperation & op, const std::vector &, - ::llvm::IRBuilder<> & builder, - context & ctx) + ::llvm::IRBuilder<> &, + context &) { JLM_ASSERT(is(op)); return nullptr; @@ -492,7 +492,7 @@ static ::llvm::Value * convert( const ConstantArray & op, const std::vector & operands, - ::llvm::IRBuilder<> & builder, + ::llvm::IRBuilder<> &, context & ctx) { JLM_ASSERT(is(op)); @@ -513,8 +513,8 @@ convert( static ::llvm::Value * convert( const ConstantAggregateZero & op, - const std::vector & args, - ::llvm::IRBuilder<> & builder, + const std::vector &, + ::llvm::IRBuilder<> &, context & ctx) { auto type = convert_type(*op.result(0), ctx); @@ -617,9 +617,9 @@ convert_fpneg( static inline ::llvm::Value * convert_valist( const rvsdg::SimpleOperation & op, - const std::vector & args, - ::llvm::IRBuilder<> & builder, - context & ctx) + const std::vector &, + ::llvm::IRBuilder<> &, + context &) { JLM_ASSERT(is(op)); return nullptr; @@ -629,7 +629,7 @@ static inline ::llvm::Value * convert( const ConstantStruct & op, const std::vector & args, - ::llvm::IRBuilder<> & builder, + ::llvm::IRBuilder<> &, context & ctx) { std::vector<::llvm::Constant *> operands; @@ -643,8 +643,8 @@ convert( static inline ::llvm::Value * convert( const ConstantPointerNullOperation & operation, - const std::vector & args, - ::llvm::IRBuilder<> & builder, + const std::vector &, + ::llvm::IRBuilder<> &, context & ctx) { auto pointerType = convert_type(operation.GetPointerType(), ctx); @@ -674,7 +674,7 @@ static inline ::llvm::Value * convert_ctl2bits( const rvsdg::SimpleOperation & op, const std::vector & args, - ::llvm::IRBuilder<> & builder, + ::llvm::IRBuilder<> &, context & ctx) { JLM_ASSERT(is(op)); @@ -685,7 +685,7 @@ static ::llvm::Value * convert_constantvector( const rvsdg::SimpleOperation & op, const std::vector & operands, - ::llvm::IRBuilder<> & builder, + ::llvm::IRBuilder<> &, context & ctx) { JLM_ASSERT(is(op)); @@ -820,7 +820,7 @@ convert_vectorbinary( static ::llvm::Value * convert( - const vectorselect_op & op, + const vectorselect_op &, const std::vector & operands, ::llvm::IRBuilder<> & builder, context & ctx) diff --git a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp index 108e0eb81..396a1ed1e 100644 --- a/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp +++ b/jlm/llvm/backend/jlm2llvm/jlm2llvm.cpp @@ -497,7 +497,7 @@ convert_linkage(const llvm::linkage & linkage) } static void -convert_ipgraph(const llvm::ipgraph & clg, context & ctx) +convert_ipgraph(context & ctx) { auto & jm = ctx.module(); auto & lm = ctx.llvm_module(); @@ -558,7 +558,7 @@ convert(ipgraph_module & im, ::llvm::LLVMContext & lctx) lm->setDataLayout(im.data_layout()); context ctx(im, *lm); - convert_ipgraph(im.ipgraph(), ctx); + convert_ipgraph(ctx); return lm; } diff --git a/jlm/llvm/frontend/ControlFlowRestructuring.cpp b/jlm/llvm/frontend/ControlFlowRestructuring.cpp index aaf196ab4..581ff96d3 100644 --- a/jlm/llvm/frontend/ControlFlowRestructuring.cpp +++ b/jlm/llvm/frontend/ControlFlowRestructuring.cpp @@ -195,7 +195,6 @@ static inline void restructure_loop_repetition( const sccstructure & s, cfg_node * new_nr, - cfg_node * new_nx, const tacvariable * ev, const tacvariable * rv) { @@ -275,7 +274,7 @@ restructure_loops(cfg_node * entry, cfg_node * exit, std::vector & loops restructure_loop_entry(*sccstruct, new_ne, ev); restructure_loop_exit(*sccstruct, new_nr, new_nx, exit, rv, xv); - restructure_loop_repetition(*sccstruct, new_nr, new_nr, ev, rv); + restructure_loop_repetition(*sccstruct, new_nr, ev, rv); restructure(new_ne, new_nr, loops); loops.push_back(extract_tcloop(new_ne, new_nr)); diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 10f6b4789..e4b6aee61 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -461,7 +461,7 @@ requiresExport(const ipgraph_node & ipgNode) static void ConvertAssignment( const llvm::tac & threeAddressCode, - rvsdg::Region & region, + rvsdg::Region &, llvm::VariableMap & variableMap) { JLM_ASSERT(is(threeAddressCode.operation())); @@ -492,10 +492,7 @@ ConvertSelect( } static void -ConvertBranch( - const llvm::tac & threeAddressCode, - rvsdg::Region & region, - llvm::VariableMap & variableMap) +ConvertBranch(const llvm::tac & threeAddressCode, rvsdg::Region &, llvm::VariableMap &) { JLM_ASSERT(is(threeAddressCode.operation())); /* @@ -645,7 +642,7 @@ Convert( static void Convert( const exitaggnode & exitAggregationNode, - const AnnotationMap & demandMap, + const AnnotationMap &, lambda::node & lambdaNode, RegionalizedVariableMap & regionalizedVariableMap) { @@ -663,8 +660,8 @@ Convert( static void Convert( const blockaggnode & blockAggregationNode, - const AnnotationMap & demandMap, - lambda::node & lambdaNode, + const AnnotationMap &, + lambda::node &, RegionalizedVariableMap & regionalizedVariableMap) { ConvertBasicBlock( diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index f3ec20ccb..72b9d8a58 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -66,7 +66,7 @@ static const variable * convert_int_constant( ::llvm::Constant * c, std::vector> & tacs, - context & ctx) + context &) { JLM_ASSERT(c->getValueID() == ::llvm::Value::ConstantIntVal); const ::llvm::ConstantInt * constant = static_cast(c); @@ -159,8 +159,8 @@ convert_constantPointerNull( static const variable * convert_blockAddress( ::llvm::Constant * constant, - std::vector> & tacs, - context & ctx) + std::vector> &, + context &) { JLM_ASSERT(constant->getValueID() == ::llvm::Value::BlockAddressVal); @@ -278,8 +278,8 @@ convert_constantVector( static inline const variable * convert_globalAlias( ::llvm::Constant * constant, - std::vector> & tacs, - context & ctx) + std::vector> &, + context &) { JLM_ASSERT(constant->getValueID() == ::llvm::Value::GlobalAliasVal); @@ -428,7 +428,7 @@ convert_switch_instruction(::llvm::Instruction * instruction, tacsvector_t & tac } static inline const variable * -convert_unreachable_instruction(::llvm::Instruction * i, tacsvector_t & tacs, context & ctx) +convert_unreachable_instruction(::llvm::Instruction * i, tacsvector_t &, context & ctx) { JLM_ASSERT(i->getOpcode() == ::llvm::Instruction::Unreachable); auto bb = ctx.get(i->getParent()); diff --git a/jlm/llvm/frontend/LlvmTypeConversion.cpp b/jlm/llvm/frontend/LlvmTypeConversion.cpp index db12cb7fc..b89c28894 100644 --- a/jlm/llvm/frontend/LlvmTypeConversion.cpp +++ b/jlm/llvm/frontend/LlvmTypeConversion.cpp @@ -32,7 +32,7 @@ ExtractFloatingPointSize(const ::llvm::Type * type) } static std::shared_ptr -convert_integer_type(const ::llvm::Type * t, context & ctx) +convert_integer_type(const ::llvm::Type * t, context &) { JLM_ASSERT(t->getTypeID() == ::llvm::Type::IntegerTyID); auto * type = static_cast(t); @@ -73,7 +73,7 @@ convert_function_type(const ::llvm::Type * t, context & ctx) } static std::shared_ptr -convert_fp_type(const ::llvm::Type * t, context & ctx) +convert_fp_type(const ::llvm::Type * t, context &) { static const std::unordered_map<::llvm::Type::TypeID, fpsize> map( { { ::llvm::Type::HalfTyID, fpsize::half }, diff --git a/jlm/llvm/ir/RvsdgModule.cpp b/jlm/llvm/ir/RvsdgModule.cpp index 47fb4fac4..2e5cba7c0 100644 --- a/jlm/llvm/ir/RvsdgModule.cpp +++ b/jlm/llvm/ir/RvsdgModule.cpp @@ -9,7 +9,7 @@ namespace jlm::llvm { GraphImport & -GraphImport::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) +GraphImport::Copy(rvsdg::Region & region, rvsdg::StructuralInput *) { return GraphImport::Create(*region.graph(), ValueType(), Name(), Linkage()); } diff --git a/jlm/llvm/ir/aggregation.cpp b/jlm/llvm/ir/aggregation.cpp index 69cdcfd6a..1ca1a107a 100644 --- a/jlm/llvm/ir/aggregation.cpp +++ b/jlm/llvm/ir/aggregation.cpp @@ -295,7 +295,7 @@ reduce_loop(const sccstructure & sccstruct, aggregation_map & map) * Only the split node and the individual branch nodes are reduced. The join node is not reduced. */ static cfg_node * -reduce_branch(cfg_node * split, cfg_node ** entry, cfg_node ** exit, aggregation_map & map) +reduce_branch(cfg_node * split, cfg_node ** entry, aggregation_map & map) { /* sanity checks */ JLM_ASSERT(split->noutedges() > 1); @@ -447,7 +447,7 @@ aggregate_acyclic_sese(cfg_node * node, cfg_node ** entry, cfg_node ** exit, agg */ if (is_branch(node)) { - auto sese = reduce_branch(node, entry, exit, map); + auto sese = reduce_branch(node, entry, map); aggregate_acyclic_sese(sese, entry, exit, map); return; } diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index ca8b64ca1..1ae3b98d7 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -357,7 +357,7 @@ is_load_store_reducible( static std::vector perform_load_store_reduction( - const LoadNonVolatileOperation & op, + const LoadNonVolatileOperation &, const std::vector & operands) { auto storenode = rvsdg::output::GetNode(*operands[1]); diff --git a/jlm/llvm/ir/operators/operators.cpp b/jlm/llvm/ir/operators/operators.cpp index d76cf1076..335bbce33 100644 --- a/jlm/llvm/ir/operators/operators.cpp +++ b/jlm/llvm/ir/operators/operators.cpp @@ -141,13 +141,13 @@ fp2ui_op::copy() const } rvsdg::unop_reduction_path_t -fp2ui_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +fp2ui_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -fp2ui_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +fp2ui_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented"); } @@ -177,13 +177,13 @@ fp2si_op::copy() const } rvsdg::unop_reduction_path_t -fp2si_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +fp2si_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -fp2si_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +fp2si_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -282,13 +282,13 @@ bits2ptr_op::copy() const } rvsdg::unop_reduction_path_t -bits2ptr_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +bits2ptr_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -bits2ptr_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +bits2ptr_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -318,13 +318,13 @@ ptr2bits_op::copy() const } rvsdg::unop_reduction_path_t -ptr2bits_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +ptr2bits_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -ptr2bits_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +ptr2bits_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -384,17 +384,14 @@ ptrcmp_op::copy() const } rvsdg::binop_reduction_path_t -ptrcmp_op::can_reduce_operand_pair(const rvsdg::output * op1, const rvsdg::output * op2) - const noexcept +ptrcmp_op::can_reduce_operand_pair(const rvsdg::output *, const rvsdg::output *) const noexcept { return rvsdg::binop_reduction_none; } rvsdg::output * -ptrcmp_op::reduce_operand_pair( - rvsdg::binop_reduction_path_t path, - rvsdg::output * op1, - rvsdg::output * op2) const +ptrcmp_op::reduce_operand_pair(rvsdg::binop_reduction_path_t, rvsdg::output *, rvsdg::output *) + const { JLM_UNREACHABLE("Not implemented!"); } @@ -519,17 +516,13 @@ fpcmp_op::copy() const } rvsdg::binop_reduction_path_t -fpcmp_op::can_reduce_operand_pair(const rvsdg::output * op1, const rvsdg::output * op2) - const noexcept +fpcmp_op::can_reduce_operand_pair(const rvsdg::output *, const rvsdg::output *) const noexcept { return rvsdg::binop_reduction_none; } rvsdg::output * -fpcmp_op::reduce_operand_pair( - rvsdg::binop_reduction_path_t path, - rvsdg::output * op1, - rvsdg::output * op2) const +fpcmp_op::reduce_operand_pair(rvsdg::binop_reduction_path_t, rvsdg::output *, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -608,17 +601,13 @@ fpbin_op::copy() const } rvsdg::binop_reduction_path_t -fpbin_op::can_reduce_operand_pair(const rvsdg::output * op1, const rvsdg::output * op2) - const noexcept +fpbin_op::can_reduce_operand_pair(const rvsdg::output *, const rvsdg::output *) const noexcept { return rvsdg::binop_reduction_none; } rvsdg::output * -fpbin_op::reduce_operand_pair( - rvsdg::binop_reduction_path_t path, - rvsdg::output * op1, - rvsdg::output * op2) const +fpbin_op::reduce_operand_pair(rvsdg::binop_reduction_path_t, rvsdg::output *, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -648,13 +637,13 @@ fpext_op::copy() const } rvsdg::unop_reduction_path_t -fpext_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +fpext_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -fpext_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +fpext_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -684,13 +673,13 @@ fpneg_op::copy() const } rvsdg::unop_reduction_path_t -fpneg_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +fpneg_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -fpneg_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +fpneg_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -720,13 +709,13 @@ fptrunc_op::copy() const } rvsdg::unop_reduction_path_t -fptrunc_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +fptrunc_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -fptrunc_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +fptrunc_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -794,13 +783,13 @@ bitcast_op::copy() const } rvsdg::unop_reduction_path_t -bitcast_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +bitcast_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -bitcast_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +bitcast_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -854,13 +843,13 @@ trunc_op::copy() const } rvsdg::unop_reduction_path_t -trunc_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +trunc_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -trunc_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +trunc_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -890,13 +879,13 @@ uitofp_op::copy() const } rvsdg::unop_reduction_path_t -uitofp_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +uitofp_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -uitofp_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +uitofp_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } @@ -926,13 +915,13 @@ sitofp_op::copy() const } rvsdg::unop_reduction_path_t -sitofp_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +sitofp_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -sitofp_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +sitofp_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { JLM_UNREACHABLE("Not implemented!"); } diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 2a5bfa118..269b50e08 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -55,6 +55,7 @@ RvsdgTreePrinter::run(RvsdgModule & rvsdgModule, util::StatisticsCollector & sta WriteTreeToFile(rvsdgModule, tree); statistics->Stop(); + statisticsCollector.CollectDemandedStatistics(std::move(statistics)); } void diff --git a/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp index 4894a626e..40a134882 100644 --- a/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/AgnosticMemoryNodeProvider.cpp @@ -42,25 +42,25 @@ class AgnosticMemoryNodeProvisioning final : public MemoryNodeProvisioning } [[nodiscard]] const util::HashSet & - GetRegionEntryNodes(const rvsdg::Region & region) const override + GetRegionEntryNodes(const rvsdg::Region &) const override { return MemoryNodes_; } [[nodiscard]] const util::HashSet & - GetRegionExitNodes(const rvsdg::Region & region) const override + GetRegionExitNodes(const rvsdg::Region &) const override { return MemoryNodes_; } [[nodiscard]] const util::HashSet & - GetCallEntryNodes(const CallNode & callNode) const override + GetCallEntryNodes(const CallNode &) const override { return MemoryNodes_; } [[nodiscard]] const util::HashSet & - GetCallExitNodes(const CallNode & callNode) const override + GetCallExitNodes(const CallNode &) const override { return MemoryNodes_; } diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 94e9016d0..23d9e24d7 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -367,7 +367,7 @@ mark_phi(const rvsdg::StructuralNode * node, cnectx & ctx) } static void -mark_delta(const rvsdg::StructuralNode * node, cnectx & ctx) +mark_delta(const rvsdg::StructuralNode * node, cnectx &) { JLM_ASSERT(jlm::rvsdg::is(node)); } @@ -520,7 +520,7 @@ divert_phi(rvsdg::StructuralNode * node, cnectx & ctx) } static void -divert_delta(rvsdg::StructuralNode * node, cnectx & ctx) +divert_delta(rvsdg::StructuralNode * node, cnectx &) { JLM_ASSERT(jlm::rvsdg::is(node)); } diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index 1b261b19a..edff19aa8 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -112,7 +112,7 @@ pullin_node(rvsdg::GammaNode * gamma, rvsdg::Node * node) } static void -cleanup(rvsdg::GammaNode * gamma, rvsdg::Node * node) +cleanup(rvsdg::GammaNode *, rvsdg::Node * node) { JLM_ASSERT(single_successor(node)); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index b81bfffef..197c34f60 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -348,7 +348,7 @@ create_unrolled_gamma_predicate(const unrollinfo & ui, size_t factor) static jlm::rvsdg::output * create_unrolled_theta_predicate( - rvsdg::Region * target, + rvsdg::Region *, const rvsdg::SubstitutionMap & smap, const unrollinfo & ui, size_t factor) diff --git a/jlm/rvsdg/bitstring/bitoperation-classes.cpp b/jlm/rvsdg/bitstring/bitoperation-classes.cpp index 4c4a7054b..297e00ec2 100644 --- a/jlm/rvsdg/bitstring/bitoperation-classes.cpp +++ b/jlm/rvsdg/bitstring/bitoperation-classes.cpp @@ -103,7 +103,7 @@ jlm::rvsdg::output * bitcompare_op::reduce_operand_pair( binop_reduction_path_t path, jlm::rvsdg::output * arg1, - jlm::rvsdg::output * arg2) const + jlm::rvsdg::output *) const { if (path == 1) { diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index f250de767..6b0555f4d 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -144,7 +144,7 @@ class concat_normal_form final : public simple_normal_form virtual std::vector normalized_create( rvsdg::Region * region, - const SimpleOperation & op, + const SimpleOperation &, const std::vector & arguments) const override { std::vector new_args; @@ -229,7 +229,7 @@ concat_normal_form::~concat_normal_form() noexcept static node_normal_form * get_default_normal_form( - const std::type_info & operator_class, + const std::type_info &, jlm::rvsdg::node_normal_form * parent, Graph * graph) { diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 3cf53e929..898d0227b 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -393,7 +393,7 @@ GammaNode::MapBranchResultExitVar(const rvsdg::input & input) const } GammaNode * -GammaNode::copy(rvsdg::Region * region, SubstitutionMap & smap) const +GammaNode::copy(rvsdg::Region *, SubstitutionMap & smap) const { auto gamma = create(smap.lookup(predicate()->origin()), nsubregions()); diff --git a/jlm/rvsdg/node-normal-form.cpp b/jlm/rvsdg/node-normal-form.cpp index a808f65d3..3864db7d4 100644 --- a/jlm/rvsdg/node-normal-form.cpp +++ b/jlm/rvsdg/node-normal-form.cpp @@ -16,7 +16,7 @@ node_normal_form::~node_normal_form() noexcept {} bool -node_normal_form::normalize_node(Node * node) const +node_normal_form::normalize_node(Node *) const { return true; } diff --git a/jlm/rvsdg/traverser.cpp b/jlm/rvsdg/traverser.cpp index 379c98ac1..7a7ee2437 100644 --- a/jlm/rvsdg/traverser.cpp +++ b/jlm/rvsdg/traverser.cpp @@ -100,7 +100,7 @@ topdown_traverser::node_create(Node * node) } void -topdown_traverser::input_change(input * in, output * old_origin, output * new_origin) +topdown_traverser::input_change(input * in, output *, output *) { if (in->region() != region() || !is(*in)) return; @@ -189,7 +189,7 @@ bottomup_traverser::node_destroy(Node * node) } void -bottomup_traverser::input_change(input * in, output * old_origin, output * new_origin) +bottomup_traverser::input_change(input * in, output * old_origin, output *) { if (in->region() != region() || !is(*in) || !is(old_origin)) return; diff --git a/jlm/rvsdg/view.cpp b/jlm/rvsdg/view.cpp index 94bd1d0a1..e07b3c0ce 100644 --- a/jlm/rvsdg/view.cpp +++ b/jlm/rvsdg/view.cpp @@ -248,7 +248,7 @@ region_starttag(const std::string & id) } static inline std::string -region_endtag(const std::string & id) +region_endtag(const std::string &) { return "\n"; } diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index 8432d6722..e72f92d1a 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -448,9 +448,7 @@ JlmOptCommand::ParseLlvmIrFile( } std::unique_ptr -JlmOptCommand::ParseMlirIrFile( - const util::filepath & mlirIrFile, - util::StatisticsCollector & statisticsCollector) const +JlmOptCommand::ParseMlirIrFile(const util::filepath & mlirIrFile, util::StatisticsCollector &) const { #ifdef ENABLE_MLIR jlm::mlir::MlirToJlmConverter rvsdggen; diff --git a/jlm/util/GraphWriter.cpp b/jlm/util/GraphWriter.cpp index 3fd3dcf6f..acdce4b76 100644 --- a/jlm/util/GraphWriter.cpp +++ b/jlm/util/GraphWriter.cpp @@ -675,7 +675,7 @@ InOutNode::InOutNode(Graph & graph, size_t inputPorts, size_t outputPorts) } void -InOutNode::SetShape(std::string shape) +InOutNode::SetShape(std::string) { throw jlm::util::error("InOutNodes can not have custom shapes set"); } @@ -934,7 +934,7 @@ ArgumentNode::SetOutsideSource(const Port & outsideSource) } void -ArgumentNode::OutputASCII(std::ostream & out, size_t indent) const +ArgumentNode::OutputASCII(std::ostream & out, size_t) const { // In ASCII the argument is printed as part of an ARG line out << GetFullId(); @@ -975,7 +975,7 @@ ResultNode::SetOutsideDestination(const Port & outsideDestination) } void -ResultNode::OutputASCII(std::ostream & out, size_t indent) const +ResultNode::OutputASCII(std::ostream & out, size_t) const { // In ASCII the result is printed as part of an RES line OutputIncomingEdgesASCII(out); diff --git a/tests/jlm/llvm/frontend/llvm/TestFNeg.cpp b/tests/jlm/llvm/frontend/llvm/TestFNeg.cpp index cddcdd35b..6d5e0ffe6 100644 --- a/tests/jlm/llvm/frontend/llvm/TestFNeg.cpp +++ b/tests/jlm/llvm/frontend/llvm/TestFNeg.cpp @@ -17,7 +17,7 @@ template static bool -Contains(const jlm::llvm::ipgraph_module & module, const std::string & fctname) +Contains(const jlm::llvm::ipgraph_module & module, const std::string &) { using namespace jlm; diff --git a/tests/jlm/llvm/ir/TestAnnotation.cpp b/tests/jlm/llvm/ir/TestAnnotation.cpp index fc28974ad..8e6e26f4f 100644 --- a/tests/jlm/llvm/ir/TestAnnotation.cpp +++ b/tests/jlm/llvm/ir/TestAnnotation.cpp @@ -66,7 +66,7 @@ TestLinearSubgraphAnnotation() /* * Arrange */ - auto SetupAggregationTree = [](ipgraph_module & module, jlm::llvm::argument & argument) + auto SetupAggregationTree = [](ipgraph_module &, jlm::llvm::argument & argument) { /* * Setup simple linear CFG: Entry -> B1 -> B2 -> Exit diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index 5e334f608..5e2b7a194 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -145,7 +145,7 @@ TestRemovePhiArgumentsWhere() // Remove everything that is dead, i.e., phiArgument3 numRemovedArguments = phiNode.RemovePhiArgumentsWhere( - [&](const jlm::rvsdg::RegionArgument & argument) + [&](const jlm::rvsdg::RegionArgument &) { return true; }); @@ -245,7 +245,7 @@ TestRemovePhiOutputsWhere() assert(phiOutput2->index() == 1); numRemovedOutputs = phiNode.RemovePhiOutputsWhere( - [&](const phi::rvoutput & output) + [&](const phi::rvoutput &) { return true; }); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp index 2230801cb..8a92dcffc 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp @@ -17,9 +17,7 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis { public: std::unique_ptr - Analyze( - const jlm::llvm::RvsdgModule & rvsdgModule, - jlm::util::StatisticsCollector & statisticsCollector) override + Analyze(const jlm::llvm::RvsdgModule & rvsdgModule, jlm::util::StatisticsCollector &) override { PointsToGraph_ = jlm::llvm::aa::PointsToGraph::Create(); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp b/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp index 13e639c72..90571defe 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp @@ -221,7 +221,7 @@ TestLoadFromUndef() */ auto ValidateProvider = [](const jlm::tests::LoadFromUndefTest & test, const jlm::llvm::aa::MemoryNodeProvisioning & provisioning, - const jlm::llvm::aa::PointsToGraph & pointsToGraph) + const jlm::llvm::aa::PointsToGraph &) { auto numLambdaEntryNodes = provisioning.GetLambdaEntryNodes(test.Lambda()).Size(); auto numLambdaExitNodes = provisioning.GetLambdaExitNodes(test.Lambda()).Size(); diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 8f7117094..5e7d35918 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -213,7 +213,7 @@ RemoveResultsWhere() assert(result2.index() == 1); region.RemoveResultsWhere( - [](const jlm::rvsdg::RegionResult & result) + [](const jlm::rvsdg::RegionResult &) { return false; }); @@ -222,7 +222,7 @@ RemoveResultsWhere() assert(result2.index() == 1); region.RemoveResultsWhere( - [](const jlm::rvsdg::RegionResult & result) + [](const jlm::rvsdg::RegionResult &) { return true; }); @@ -259,7 +259,7 @@ RemoveArgumentsWhere() assert(argument2.index() == 2); region.RemoveArgumentsWhere( - [](const jlm::rvsdg::RegionArgument & argument) + [](const jlm::rvsdg::RegionArgument &) { return true; }); @@ -268,7 +268,7 @@ RemoveArgumentsWhere() region.remove_node(node); region.RemoveArgumentsWhere( - [](const jlm::rvsdg::RegionArgument & argument) + [](const jlm::rvsdg::RegionArgument &) { return false; }); @@ -469,7 +469,7 @@ BottomNodeTests() // And it becomes dead again rvsdg.root()->RemoveResultsWhere( - [](const RegionResult & result) + [](const RegionResult &) { return true; }); diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 64ba71182..792a0eaaf 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -39,7 +39,7 @@ class BinaryOperation final : public jlm::rvsdg::binary_op jlm::rvsdg::output * reduce_operand_pair( jlm::rvsdg::unop_reduction_path_t path, - jlm::rvsdg::output * op1, + jlm::rvsdg::output *, jlm::rvsdg::output * op2) const override { @@ -58,7 +58,7 @@ class BinaryOperation final : public jlm::rvsdg::binary_op } bool - operator==(const Operation & other) const noexcept override + operator==(const Operation &) const noexcept override { JLM_UNREACHABLE("Not implemented."); } diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 859cce484..5856577cb 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -136,14 +136,14 @@ TestRemoveOutputsWhere() // Act & Assert node2.RemoveOutputsWhere( - [](const jlm::rvsdg::output & output) + [](const jlm::rvsdg::output &) { return false; }); assert(node2.noutputs() == 2); node1.RemoveOutputsWhere( - [](const jlm::rvsdg::output & output) + [](const jlm::rvsdg::output &) { return true; }); @@ -154,7 +154,7 @@ TestRemoveOutputsWhere() assert(node1.output(1)->index() == 1); node2.RemoveOutputsWhere( - [](const jlm::rvsdg::output & output) + [](const jlm::rvsdg::output &) { return true; }); @@ -172,7 +172,7 @@ TestRemoveOutputsWhere() assert(node1.output(0)->index() == 0); node1.RemoveOutputsWhere( - [](const jlm::rvsdg::output & output) + [](const jlm::rvsdg::output &) { return true; }); @@ -205,7 +205,7 @@ TestRemoveInputsWhere() assert(node.input(1) == input2); node.RemoveInputsWhere( - [](const jlm::rvsdg::input & input) + [](const jlm::rvsdg::input &) { return true; }); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index 11a6cddf2..0f4658bef 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -177,7 +177,7 @@ TestRemoveThetaInputsWhere() assert(thetaOutput2->argument()->index() == 1); deadOutputs = thetaNode->RemoveThetaInputsWhere( - [](const ThetaInput & input) + [](const ThetaInput &) { return true; }); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 9d93fb77d..52b390d96 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -9,7 +9,7 @@ namespace jlm::tests { GraphImport & -GraphImport::Copy(rvsdg::Region & region, rvsdg::StructuralInput * input) +GraphImport::Copy(rvsdg::Region & region, rvsdg::StructuralInput *) { return GraphImport::Create(*region.graph(), Type(), Name()); } @@ -32,13 +32,13 @@ unary_op::operator==(const Operation & other) const noexcept } rvsdg::unop_reduction_path_t -unary_op::can_reduce_operand(const rvsdg::output * operand) const noexcept +unary_op::can_reduce_operand(const rvsdg::output *) const noexcept { return rvsdg::unop_reduction_none; } rvsdg::output * -unary_op::reduce_operand(rvsdg::unop_reduction_path_t path, rvsdg::output * operand) const +unary_op::reduce_operand(rvsdg::unop_reduction_path_t, rvsdg::output *) const { return nullptr; } @@ -66,17 +66,14 @@ binary_op::operator==(const Operation & other) const noexcept } rvsdg::binop_reduction_path_t -binary_op::can_reduce_operand_pair(const rvsdg::output * op1, const rvsdg::output * op2) - const noexcept +binary_op::can_reduce_operand_pair(const rvsdg::output *, const rvsdg::output *) const noexcept { return rvsdg::binop_reduction_none; } rvsdg::output * -binary_op::reduce_operand_pair( - rvsdg::binop_reduction_path_t path, - rvsdg::output * op1, - rvsdg::output * op2) const +binary_op::reduce_operand_pair(rvsdg::binop_reduction_path_t, rvsdg::output *, rvsdg::output *) + const { return nullptr; } From 5dc47419a90c823531a970672f3d85f3b6304a17 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 24 Dec 2024 14:05:26 +0100 Subject: [PATCH 143/170] Remove unused state multiplexer operation (#691) --- jlm/llvm/opt/reduction.cpp | 12 -- jlm/rvsdg/Makefile.sub | 3 - jlm/rvsdg/statemux.cpp | 220 ---------------------- jlm/rvsdg/statemux.hpp | 131 ------------- scripts/run-hls-test.sh | 2 +- tests/jlm/llvm/ir/operators/LoadTests.cpp | 7 +- tests/jlm/rvsdg/test-statemux.cpp | 92 --------- 7 files changed, 4 insertions(+), 463 deletions(-) delete mode 100644 jlm/rvsdg/statemux.hpp diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index c5eb78837..d2336c6bb 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -7,7 +7,6 @@ #include #include #include -#include #include #include @@ -46,16 +45,6 @@ class redstat final : public util::Statistics } }; -static void -enable_mux_reductions(rvsdg::Graph & graph) -{ - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::mux_op)); - auto mnf = static_cast(nf); - mnf->set_mutable(true); - mnf->set_mux_mux_reducible(true); - mnf->set_multiple_origin_reducible(true); -} - static void enable_store_reductions(rvsdg::Graph & graph) { @@ -120,7 +109,6 @@ reduce(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) statistics->start(graph); - enable_mux_reductions(graph); enable_store_reductions(graph); enable_load_reductions(graph); enable_gamma_reductions(graph); diff --git a/jlm/rvsdg/Makefile.sub b/jlm/rvsdg/Makefile.sub index 697211747..5f441ee26 100644 --- a/jlm/rvsdg/Makefile.sub +++ b/jlm/rvsdg/Makefile.sub @@ -11,7 +11,6 @@ librvsdg_SOURCES = \ jlm/rvsdg/region.cpp \ jlm/rvsdg/simple-normal-form.cpp \ jlm/rvsdg/simple-node.cpp \ - jlm/rvsdg/statemux.cpp \ jlm/rvsdg/structural-normal-form.cpp \ jlm/rvsdg/structural-node.cpp \ jlm/rvsdg/theta.cpp \ @@ -46,7 +45,6 @@ librvsdg_HEADERS = \ jlm/rvsdg/bitstring/comparison.hpp \ jlm/rvsdg/view.hpp \ jlm/rvsdg/traverser.hpp \ - jlm/rvsdg/statemux.hpp \ jlm/rvsdg/graph.hpp \ jlm/rvsdg/substitution.hpp \ jlm/rvsdg/unary.hpp \ @@ -79,7 +77,6 @@ librvsdg_TESTS = \ tests/jlm/rvsdg/test-gamma \ tests/jlm/rvsdg/test-graph \ tests/jlm/rvsdg/test-nodes \ - tests/jlm/rvsdg/test-statemux \ tests/jlm/rvsdg/test-theta \ tests/jlm/rvsdg/test-topdown \ tests/jlm/rvsdg/test-typemismatch \ diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp index 79621b99d..e69de29bb 100644 --- a/jlm/rvsdg/statemux.cpp +++ b/jlm/rvsdg/statemux.cpp @@ -1,220 +0,0 @@ -/* - * Copyright 2010 2011 2012 2014 Helge Bahmann - * Copyright 2013 2014 2015 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include -#include - -namespace jlm::rvsdg -{ - -/* mux operator */ - -mux_op::~mux_op() noexcept -{} - -bool -mux_op::operator==(const Operation & other) const noexcept -{ - auto op = dynamic_cast(&other); - return op && op->narguments() == narguments() && op->nresults() == nresults() - && op->result(0) == result(0); -} - -std::string -mux_op::debug_string() const -{ - return "STATEMUX"; -} - -std::unique_ptr -mux_op::copy() const -{ - return std::make_unique(*this); -} - -/* mux normal form */ - -static Node * -is_mux_mux_reducible(const std::vector & ops) -{ - std::unordered_set operands(ops.begin(), ops.end()); - - for (const auto & operand : operands) - { - auto node = output::GetNode(*operand); - if (!node || !is_mux_op(node->GetOperation())) - continue; - - size_t n; - for (n = 0; n < node->noutputs(); n++) - { - auto output = node->output(n); - if (operands.find(output) == operands.end() || output->nusers() != 1) - break; - } - if (n == node->noutputs()) - return node; - } - - return nullptr; -} - -static bool -is_multiple_origin_reducible(const std::vector & operands) -{ - std::unordered_set set(operands.begin(), operands.end()); - return set.size() != operands.size(); -} - -static std::vector -perform_multiple_origin_reduction( - const jlm::rvsdg::mux_op & op, - const std::vector & operands) -{ - std::unordered_set set(operands.begin(), operands.end()); - return create_state_mux(op.result(0), { set.begin(), set.end() }, op.nresults()); -} - -static std::vector -perform_mux_mux_reduction( - const jlm::rvsdg::mux_op & op, - const Node * muxnode, - const std::vector & old_operands) -{ - JLM_ASSERT(is_mux_op(muxnode->GetOperation())); - - bool reduced = false; - std::vector new_operands; - for (const auto & operand : old_operands) - { - if (jlm::rvsdg::output::GetNode(*operand) == muxnode && !reduced) - { - reduced = true; - auto tmp = operands(muxnode); - new_operands.insert(new_operands.end(), tmp.begin(), tmp.end()); - continue; - } - - if (jlm::rvsdg::output::GetNode(*operand) != muxnode) - new_operands.push_back(operand); - } - - return create_state_mux(op.result(0), new_operands, op.nresults()); -} - -mux_normal_form::~mux_normal_form() noexcept -{} - -mux_normal_form::mux_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) noexcept - : simple_normal_form(opclass, parent, graph), - enable_mux_mux_(false), - enable_multiple_origin_(false) -{ - if (auto p = dynamic_cast(parent)) - enable_mux_mux_ = p->enable_mux_mux_; -} - -bool -mux_normal_form::normalize_node(Node * node) const -{ - JLM_ASSERT(dynamic_cast(&node->GetOperation())); - auto op = static_cast(&node->GetOperation()); - - if (!get_mutable()) - return true; - - auto muxnode = is_mux_mux_reducible(operands(node)); - if (get_mux_mux_reducible() && muxnode) - { - divert_users(node, perform_mux_mux_reduction(*op, muxnode, operands(node))); - remove(node); - return false; - } - - if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands(node))) - { - divert_users(node, perform_multiple_origin_reduction(*op, operands(node))); - remove(node); - return false; - } - - return simple_normal_form::normalize_node(node); -} - -std::vector -mux_normal_form::normalized_create( - rvsdg::Region * region, - const SimpleOperation & op, - const std::vector & operands) const -{ - JLM_ASSERT(dynamic_cast(&op)); - auto mop = static_cast(&op); - - if (!get_mutable()) - return simple_normal_form::normalized_create(region, op, operands); - - auto muxnode = is_mux_mux_reducible(operands); - if (get_mux_mux_reducible() && muxnode) - return perform_mux_mux_reduction(*mop, muxnode, operands); - - if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) - return perform_multiple_origin_reduction(*mop, operands); - - return simple_normal_form::normalized_create(region, op, operands); -} - -void -mux_normal_form::set_mux_mux_reducible(bool enable) -{ - if (get_mux_mux_reducible() == enable) - return; - - children_set(enable); - - enable_mux_mux_ = enable; - if (get_mutable() && enable) - graph()->mark_denormalized(); -} - -void -mux_normal_form::set_multiple_origin_reducible(bool enable) -{ - if (get_multiple_origin_reducible() == enable) - return; - - children_set(enable); - - enable_multiple_origin_ = enable; - if (get_mutable() && enable) - graph()->mark_denormalized(); -} - -} - -namespace -{ - -static jlm::rvsdg::node_normal_form * -create_mux_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::Graph * graph) -{ - return new jlm::rvsdg::mux_normal_form(opclass, parent, graph); -} - -static void __attribute__((constructor)) -register_node_normal_form() -{ - jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::mux_op), - create_mux_normal_form); -} - -} diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp deleted file mode 100644 index 77ceceb41..000000000 --- a/jlm/rvsdg/statemux.hpp +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2010 2011 2012 2014 Helge Bahmann - * Copyright 2014 2015 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#ifndef JLM_RVSDG_STATEMUX_HPP -#define JLM_RVSDG_STATEMUX_HPP - -#include -#include -#include - -namespace jlm::rvsdg -{ - -/* mux normal form */ - -class mux_normal_form final : public simple_normal_form -{ -public: - virtual ~mux_normal_form() noexcept; - - mux_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) noexcept; - - virtual bool - normalize_node(Node * node) const override; - - virtual std::vector - normalized_create( - rvsdg::Region * region, - const SimpleOperation & op, - const std::vector & arguments) const override; - - virtual void - set_mux_mux_reducible(bool enable); - - virtual void - set_multiple_origin_reducible(bool enable); - - inline bool - get_mux_mux_reducible() const noexcept - { - return enable_mux_mux_; - } - - inline bool - get_multiple_origin_reducible() const noexcept - { - return enable_multiple_origin_; - } - -private: - bool enable_mux_mux_; - bool enable_multiple_origin_; -}; - -/* mux operation */ - -class mux_op final : public SimpleOperation -{ -public: - virtual ~mux_op() noexcept; - - inline mux_op(std::shared_ptr type, size_t narguments, size_t nresults) - : SimpleOperation({ narguments, type }, { nresults, type }) - {} - - virtual bool - operator==(const Operation & other) const noexcept override; - - virtual std::string - debug_string() const override; - - [[nodiscard]] std::unique_ptr - copy() const override; - - static jlm::rvsdg::mux_normal_form * - normal_form(Graph * graph) noexcept - { - return static_cast(graph->node_normal_form(typeid(mux_op))); - } -}; - -static inline bool -is_mux_op(const Operation & op) -{ - return dynamic_cast(&op) != nullptr; -} - -static inline std::vector -create_state_mux( - std::shared_ptr type, - const std::vector & operands, - size_t nresults) -{ - if (operands.empty()) - throw jlm::util::error("Insufficient number of operands."); - - auto st = std::dynamic_pointer_cast(type); - if (!st) - throw jlm::util::error("Expected state type."); - - auto region = operands.front()->region(); - jlm::rvsdg::mux_op op(std::move(st), operands.size(), nresults); - return SimpleNode::create_normalized(region, op, operands); -} - -static inline jlm::rvsdg::output * -create_state_merge( - std::shared_ptr type, - const std::vector & operands) -{ - return create_state_mux(std::move(type), operands, 1)[0]; -} - -static inline std::vector -create_state_split( - std::shared_ptr type, - jlm::rvsdg::output * operand, - size_t nresults) -{ - return create_state_mux(std::move(type), { operand }, nresults); -} - -} - -#endif diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index ef821a447..25fc47f22 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=c81fc559afa3cca66efc908b0a932d81f9c90d49 +GIT_COMMIT=63c77eebf44c53b7d24ad2baa7c92e3533d2b20d # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index d932d32f9..b1265be2e 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -99,9 +98,9 @@ TestLoadAllocaReduction() auto alloca1 = alloca_op::create(bt, size, 4); auto alloca2 = alloca_op::create(bt, size, 4); - auto mux = jlm::rvsdg::create_state_mux(mt, { alloca1[1] }, 1); + auto mux = MemoryStateMergeOperation::Create({ alloca1[1] }); auto & loadNode = - LoadNonVolatileNode::CreateNode(*alloca1[0], { alloca1[1], alloca2[1], mux[0] }, bt, 4); + LoadNonVolatileNode::CreateNode(*alloca1[0], { alloca1[1], alloca2[1], mux }, bt, 4); auto & ex = GraphExport::Create(*loadNode.output(0), "l"); @@ -118,7 +117,7 @@ TestLoadAllocaReduction() assert(is(node)); assert(node->ninputs() == 3); assert(node->input(1)->origin() == alloca1[1]); - assert(node->input(2)->origin() == mux[0]); + assert(node->input(2)->origin() == mux); return 0; } diff --git a/tests/jlm/rvsdg/test-statemux.cpp b/tests/jlm/rvsdg/test-statemux.cpp index 3ab31de1b..e69de29bb 100644 --- a/tests/jlm/rvsdg/test-statemux.cpp +++ b/tests/jlm/rvsdg/test-statemux.cpp @@ -1,92 +0,0 @@ -/* - * Copyright 2010 2011 2012 2014 Helge Bahmann - * Copyright 2014 2015 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include "test-operation.hpp" -#include "test-registry.hpp" -#include "test-types.hpp" - -#include -#include - -static void -test_mux_mux_reduction() -{ - using namespace jlm::rvsdg; - - auto st = jlm::tests::statetype::Create(); - - Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::mux_op)); - auto mnf = static_cast(nf); - mnf->set_mutable(false); - mnf->set_mux_mux_reducible(false); - - auto x = &jlm::tests::GraphImport::Create(graph, st, "x"); - auto y = &jlm::tests::GraphImport::Create(graph, st, "y"); - auto z = &jlm::tests::GraphImport::Create(graph, st, "z"); - - auto mux1 = jlm::rvsdg::create_state_merge(st, { x, y }); - auto mux2 = jlm::rvsdg::create_state_split(st, z, 2); - auto mux3 = jlm::rvsdg::create_state_merge(st, { mux1, mux2[0], mux2[1], z }); - - auto & ex = jlm::tests::GraphExport::Create(*mux3, "m"); - - // jlm::rvsdg::view(graph.root(), stdout); - - mnf->set_mutable(true); - mnf->set_mux_mux_reducible(true); - graph.normalize(); - graph.prune(); - - // jlm::rvsdg::view(graph.root(), stdout); - - auto node = output::GetNode(*ex.origin()); - assert(node->ninputs() == 4); - assert(node->input(0)->origin() == x); - assert(node->input(1)->origin() == y); - assert(node->input(2)->origin() == z); - assert(node->input(3)->origin() == z); -} - -static void -test_multiple_origin_reduction() -{ - using namespace jlm::rvsdg; - - auto st = jlm::tests::statetype::Create(); - - Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::mux_op)); - auto mnf = static_cast(nf); - mnf->set_mutable(false); - mnf->set_multiple_origin_reducible(false); - - auto x = &jlm::tests::GraphImport::Create(graph, st, "x"); - auto mux1 = jlm::rvsdg::create_state_merge(st, { x, x }); - auto & ex = jlm::tests::GraphExport::Create(*mux1, "m"); - - view(graph.root(), stdout); - - mnf->set_mutable(true); - mnf->set_multiple_origin_reducible(true); - graph.normalize(); - graph.prune(); - - view(graph.root(), stdout); - - assert(output::GetNode(*ex.origin())->ninputs() == 1); -} - -static int -test_main() -{ - test_mux_mux_reduction(); - test_multiple_origin_reduction(); - - return 0; -} - -JLM_UNIT_TEST_REGISTER("jlm/rvsdg/test-statemux", test_main) From e59773a83e2bc38d44fafb741d270e7073e8ff86 Mon Sep 17 00:00:00 2001 From: HKrogstie Date: Tue, 24 Dec 2024 19:19:48 +0100 Subject: [PATCH 144/170] Hls harness rework (#696) Creates a new cleaned up harness for calling Verilated hls kernels from C code. It defines a class for the memory queues, which simplifies the usage code a great deal. I eventually "cracked the code" regarding writing harnesses for hardware: Only read model outputs right before posedge, only write inputs right after posedge (and eval again). Example diff between the old and new `test_load.hls.harness.cpp` can be seen [here](https://www.diffchecker.com/1vnmcZ6H/). The diff is not particularly easy to read, as I have re-arranged functions. I would recommend just reading the new harness, as it includes comments. The new harness passes the tests, but some of the cycle counts have increased by one, due to the old harness allowing the model to "false start" when some computation does not depend on input. I have created a commit in `hls-test-suite` with the updated numbers. I also cleaned up some other code to use range-based for loops over `.Arguments()` and `.Results()` and similar, instead of doing C-style for loops. This PR is created partly to see how it fares in CI. It is definitely possible to write some tests for this code, particularly covering how the lambda's arguments, memory queues, and context variables are handled. --- .../rhls2firrtl/RhlsToFirrtlConverter.cpp | 8 +- .../rhls2firrtl/RhlsToFirrtlConverter.hpp | 2 +- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 + jlm/hls/backend/rhls2firrtl/base-hls.hpp | 84 +- jlm/hls/backend/rhls2firrtl/dot-hls.cpp | 2 +- jlm/hls/backend/rhls2firrtl/dot-hls.hpp | 2 +- jlm/hls/backend/rhls2firrtl/json-hls.cpp | 6 +- jlm/hls/backend/rhls2firrtl/json-hls.hpp | 2 +- .../rhls2firrtl/verilator-harness-hls.cpp | 1268 ++++++++--------- .../rhls2firrtl/verilator-harness-hls.hpp | 37 +- scripts/run-hls-test.sh | 2 +- 11 files changed, 652 insertions(+), 763 deletions(-) diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp index daa0f3624..aab3b6900 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.cpp @@ -2838,8 +2838,8 @@ RhlsToFirrtlConverter::MlirGen(const llvm::lambda::node * lambdaNode) AddClockPort(&ports); AddResetPort(&ports); - auto reg_args = get_reg_args(lambdaNode); - auto reg_results = get_reg_results(lambdaNode); + auto reg_args = get_reg_args(*lambdaNode); + auto reg_results = get_reg_results(*lambdaNode); // Input bundle using BundleElement = circt::firrtl::BundleType::BundleElement; @@ -2882,8 +2882,8 @@ RhlsToFirrtlConverter::MlirGen(const llvm::lambda::node * lambdaNode) ports.push_back(oBundle); // Memory ports - auto mem_reqs = get_mem_reqs(lambdaNode); - auto mem_resps = get_mem_resps(lambdaNode); + auto mem_reqs = get_mem_reqs(*lambdaNode); + auto mem_resps = get_mem_resps(*lambdaNode); JLM_ASSERT(mem_resps.size() == mem_reqs.size()); for (size_t i = 0; i < mem_reqs.size(); ++i) { diff --git a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp index 9904022de..0d5088f75 100644 --- a/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp +++ b/jlm/hls/backend/rhls2firrtl/RhlsToFirrtlConverter.hpp @@ -43,7 +43,7 @@ class RhlsToFirrtlConverter : public BaseHLS public: std::string - get_text(llvm::RvsdgModule &) override + GetText(llvm::RvsdgModule &) override { return "MLIR/FIRRTL generator"; } diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 84fac591a..43e9d2d79 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -20,6 +20,8 @@ isForbiddenChar(char c) return true; } +BaseHLS::~BaseHLS() = default; + std::string BaseHLS::get_node_name(const jlm::rvsdg::Node * node) { diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.hpp b/jlm/hls/backend/rhls2firrtl/base-hls.hpp index 460f086e2..519b0abb5 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.hpp @@ -22,13 +22,15 @@ isForbiddenChar(char c); class BaseHLS { public: + virtual ~BaseHLS(); + std::string run(llvm::RvsdgModule & rm) { JLM_ASSERT(node_map.empty()); // ensure consistent naming across runs create_node_names(get_hls_lambda(rm)->subregion()); - return get_text(rm); + return GetText(rm); } static int @@ -67,68 +69,80 @@ class BaseHLS create_node_names(rvsdg::Region * r); virtual std::string - get_text(llvm::RvsdgModule & rm) = 0; + GetText(llvm::RvsdgModule & rm) = 0; static std::string get_base_file_name(const llvm::RvsdgModule & rm); - std::vector - get_mem_resps(const llvm::lambda::node * lambda) + /** + * Extracts all region arguments of the given kernel that represent memory responses. + * They can provide multiple values within a single execution of the region. + * @param lambda the lambda node holding the hls kernel + * @return the arguments that represent memory responses + */ + std::vector + get_mem_resps(const llvm::lambda::node & lambda) { - std::vector mem_resps; - for (size_t i = 0; i < lambda->subregion()->narguments(); ++i) + std::vector mem_resps; + for (auto arg : lambda.subregion()->Arguments()) { - auto arg = lambda->subregion()->argument(i); - if (dynamic_cast(&arg->type())) - { - mem_resps.push_back(lambda->subregion()->argument(i)); - } + if (rvsdg::is(arg->type())) + mem_resps.push_back(arg); } return mem_resps; } + /** + * Extracts all region results of the given kernel that represent memory requests. + * They can take multiple values within a single execution of the region. + * @param lambda the lambda node holding the hls kernel + * @return the results that represent memory requests + */ std::vector - get_mem_reqs(const llvm::lambda::node * lambda) + get_mem_reqs(const llvm::lambda::node & lambda) { std::vector mem_resps; - for (size_t i = 0; i < lambda->subregion()->nresults(); ++i) + for (auto result : lambda.subregion()->Results()) { - if (dynamic_cast(&lambda->subregion()->result(i)->type())) - { - mem_resps.push_back(lambda->subregion()->result(i)); - } + if (rvsdg::is(result->type())) + mem_resps.push_back(result); } return mem_resps; } - std::vector - get_reg_args(const llvm::lambda::node * lambda) + /** + * Extracts all region arguments of the given kernel that represent kernel inputs, + * which may include kernel arguments, state types, and context variables (always in that order). + * It will not return any arguments that represent memory responses. + * @param lambda the lambda node holding the hls kernel + * @return the arguments of the lambda that represent kernel inputs + */ + std::vector + get_reg_args(const llvm::lambda::node & lambda) { - std::vector args; - for (size_t i = 0; i < lambda->subregion()->narguments(); ++i) + std::vector args; + for (auto argument : lambda.subregion()->Arguments()) { - auto argtype = &lambda->subregion()->argument(i)->type(); - if (!dynamic_cast( - argtype) /*&& !dynamic_cast(argtype)*/) - { - args.push_back(lambda->subregion()->argument(i)); - } + if (!rvsdg::is(argument->type())) + args.push_back(argument); } return args; } + /** + * Extracts all region results from the given kernel that represent results from execution, + * as opposed to results used for making memory requests. + * @param lambda the lambda node holding the hls kernel + * @return the results of the lambda that represent the kernel outputs + */ std::vector - get_reg_results(const llvm::lambda::node * lambda) + get_reg_results(const llvm::lambda::node & lambda) { std::vector results; - for (size_t i = 0; i < lambda->subregion()->nresults(); ++i) + for (auto result : lambda.subregion()->Results()) { - auto argtype = &lambda->subregion()->result(i)->type(); - if (!dynamic_cast( - argtype) /*&& !dynamic_cast(argtype)*/) - { - results.push_back(lambda->subregion()->result(i)); - } + if (!rvsdg::is(result->type())) + results.push_back(result); } return results; } diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp index 8477f7818..267a2dc11 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.cpp @@ -20,7 +20,7 @@ DotHLS::extension() } std::string -DotHLS::get_text(llvm::RvsdgModule & rm) +DotHLS::GetText(llvm::RvsdgModule & rm) { return subregion_to_dot(get_hls_lambda(rm)->subregion()); } diff --git a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp index 1f8c292d8..5934167ac 100644 --- a/jlm/hls/backend/rhls2firrtl/dot-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/dot-hls.hpp @@ -18,7 +18,7 @@ class DotHLS : public BaseHLS extension() override; std::string - get_text(llvm::RvsdgModule & rm) override; + GetText(llvm::RvsdgModule & rm) override; private: std::string diff --git a/jlm/hls/backend/rhls2firrtl/json-hls.cpp b/jlm/hls/backend/rhls2firrtl/json-hls.cpp index 6d9089ce7..049e89060 100644 --- a/jlm/hls/backend/rhls2firrtl/json-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/json-hls.cpp @@ -10,11 +10,11 @@ namespace jlm::hls { std::string -JsonHLS::get_text(llvm::RvsdgModule & rm) +JsonHLS::GetText(llvm::RvsdgModule & rm) { std::ostringstream json; - auto ln = get_hls_lambda(rm); - auto function_name = ln->name(); + const auto & ln = *get_hls_lambda(rm); + auto function_name = ln.name(); auto file_name = get_base_file_name(rm); json << "{\n"; diff --git a/jlm/hls/backend/rhls2firrtl/json-hls.hpp b/jlm/hls/backend/rhls2firrtl/json-hls.hpp index 79c2af654..6a170a96e 100644 --- a/jlm/hls/backend/rhls2firrtl/json-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/json-hls.hpp @@ -21,7 +21,7 @@ class JsonHLS : public BaseHLS } std::string - get_text(llvm::RvsdgModule & rm) override; + GetText(llvm::RvsdgModule & rm) override; private: }; diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index 6f56039de..e11d21d62 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -1,743 +1,645 @@ /* * Copyright 2021 David Metz + * Copyright 2024 Håvard Krogstie * See COPYING for terms of redistribution. */ #include #include +#include + namespace jlm::hls { +// The number of cycles before a load is ready +static constexpr int MEMORY_RESPONSE_LATENCY = 10; + std::string -VerilatorHarnessHLS::get_text(llvm::RvsdgModule & rm) +ConvertToCType(const rvsdg::Type * type) { - std::ostringstream cpp; - auto ln = get_hls_lambda(rm); - auto function_name = ln->name(); - auto file_name = get_base_file_name(rm); - - auto mem_reqs = get_mem_reqs(ln); - auto mem_resps = get_mem_resps(ln); - JLM_ASSERT(mem_reqs.size() == mem_resps.size()); - cpp << "#define TRACE_CHUNK_SIZE 100000\n" - // "#define HLS_MEM_DEBUG 1\n" - "\n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#include \n" - "#ifdef FST\n" - "#include \"verilated_fst_c.h\"\n" - "#else\n" - "#include \"verilated_vcd_c.h\"\n" - "#endif\n" - // Include the Verilator generated header, which provides access to Verilog signals - // The name of the header is based on the Verilog filename used as input to Verilator - "#include \"V" - << GetVerilogFileName().base() << ".h\"\n" - << "#define V_NAME V" << GetVerilogFileName().base() << "\n" - << "#define TIMEOUT 10000000\n" - "#define xstr(s) str(s)\n" - "#define str(s) #s\n" - "void clock_cycle();\n" - "\n" - "\n" - "typedef struct MemAccess{\n" - " void * addr;\n" - " uint64_t data;\n" - " uint64_t width;\n" - " uint64_t ctr;\n" - "\n" - " bool operator==(const MemAccess& rhs) const {\n" - " return addr == rhs.addr && data == rhs.data && width == rhs.width;\n" - " }\n" - "} mem_access;\n\n" - "\n" - "uint64_t mem_access_ctr = 0;" - "\n" - "std::vector ref_loads;\n" - "std::vector ref_stores;\n" - "std::vector> ref_allocas;\n" - "std::vector hls_loads;\n" - "std::vector hls_stores;\n" - "std::map> load_map;\n" - "std::map> store_map;\n" - "\n" - "void access_mem_load(mem_access access){\n" - " hls_loads.push_back(access);\n" - " auto find = load_map.find(access.addr);\n" - " if(find == load_map.end()){\n" - " throw std::logic_error(\"unexpected load address\");\n" - " }\n" - " if(find->second.empty()){\n" - " throw std::logic_error(\"too many loads to address\");\n" - " }\n" - " if(!find->second.front().operator==(access)){\n" - " throw std::logic_error(\"wrong type of load to address\");\n" - " }\n" - " find->second.pop_front();\n" - "}\n" - "\n" - "void access_mem_store(mem_access access){\n" - " hls_stores.push_back(access);\n" - " auto find = store_map.find(access.addr);\n" - " if(find == store_map.end()){\n" - " throw std::logic_error(\"unexpected store address\");\n" - " }\n" - " if(find->second.empty()){\n" - " throw std::logic_error(\"too many stores to address\");\n" - " }\n" - " if(!find->second.front().operator==(access)){\n" - " throw std::logic_error(\"wrong type of store to address\");\n" - " }\n" - " find->second.pop_front();\n" - "}\n" - "// Current simulation time (64-bit unsigned)\n" - "vluint64_t main_time = 0;\n" - "// Called by $time in Verilog\n" - "double sc_time_stamp() {\n" - " return main_time; // Note does conversion to real, to match SystemC\n" - "}\n" - "V_NAME *top;\n" - "#ifdef TRACE_SIGNALS\n" - "#ifdef FST\n" - "VerilatedFstC *tfp;\n" - "#else\n" - "VerilatedVcdC *tfp;\n" - "#endif\n" - "#endif\n" - "bool terminate = false;\n" - "\n" - "void term(int signum) {\n" - " terminate = true;\n" - "}\n" - "\n" - "void verilator_finish() {\n" - " // Final model cleanup\n" - "#ifdef TRACE_SIGNALS\n" - " tfp->dump(main_time * 2);\n" - "#endif\n" - " top->final();\n" - "\n" - " // Coverage analysis (since test passed)\n" - "#if VM_COVERAGE\n" - " Verilated::mkdir(\"logs\");\n" - " VerilatedCov::write(\"logs/coverage.dat\");\n" - "#endif\n" - "#ifdef TRACE_SIGNALS\n" - " tfp->close();\n" - "#endif\n" - " // Destroy model\n" - // " delete top;\n" - // " top = NULL;\n" - "}\n" - "\n" - "typedef struct mem_resp_struct {\n" - " bool valid = false;\n" - " uint64_t data = 0xDEADBEEF;\n" - " uint8_t id = 0;\n" - "} mem_resp_struct;\n" - "\n" - "std::queue* mem_resp[" - << mem_resps.size() - << "];\n" - "const uint64_t mem_latency[" - << mem_resps.size() << "] = {"; - for (size_t i = 0; i < mem_resps.size(); ++i) - { - if (i != 0) - { - cpp << ", "; - } - cpp << "10"; - } - cpp << "};\n"; - cpp << "\n" - "void verilator_init(int argc, char **argv) {\n" - " // set up signaling so we can kill the program and still get waveforms\n" - " struct sigaction action;\n" - " memset(&action, 0, sizeof(struct sigaction));\n" - " action.sa_handler = term;\n" - " sigaction(SIGTERM, &action, NULL);\n" - " sigaction(SIGKILL, &action, NULL);\n" - " sigaction(SIGINT, &action, NULL);\n"; - - for (size_t i = 0; i < mem_resps.size(); ++i) - { - cpp << " mem_resp[" << i << "] = new std::queue();\n"; - } - for (size_t i = 0; i < mem_resps.size(); ++i) + if (auto t = dynamic_cast(type)) { - cpp << " for (size_t i = 0; i < mem_latency[" << i - << "]; ++i) {\n" - " mem_resp[" - << i - << "]->emplace();\n" - " }\n"; - } - cpp << "\n" - " atexit(verilator_finish);\n" - "\n" - " // Set debug level, 0 is off, 9 is highest presently used\n" - " // May be overridden by commandArgs\n" - " Verilated::debug(0);\n" - "\n" - " // Randomization reset policy\n" - " // May be overridden by commandArgs\n" - " Verilated::randReset(2);\n" - "\n" - " // Verilator must compute traced signals\n" - " Verilated::traceEverOn(true);\n" - "\n" - " // Pass arguments so Verilated code can see them, e.g. $value$plusargs\n" - " // This needs to be called before you create any model\n" - " Verilated::commandArgs(argc, argv);\n" - "\n" - " // Construct the Verilated model, from Vtop.h generated from Verilating \"top.v\"\n" - " top = new V_NAME; // Or use a const unique_ptr, or the VL_UNIQUE_PTR wrapper\n" - "#ifdef TRACE_SIGNALS\n" - "#ifdef FST\n" - " tfp = new VerilatedFstC;\n" - " top->trace(tfp, 99); // Trace 99 levels of hierarchy\n" - " tfp->open(xstr(V_NAME)\".fst\");\n" - "#else\n" - " tfp = new VerilatedVcdC;\n" - " top->trace(tfp, 99); // Trace 99 levels of hierarchy\n" - " tfp->open(xstr(V_NAME)\".vcd\");\n" - "#endif\n" - "#endif\n" - "\n" - " top->i_valid = 0;\n"; - // reset all data inputs to zero - for (size_t i = 0; i < ln->ninputs(); ++i) - { - cpp << " top->i_data_" << i << " = 0;\n"; - } - cpp << " top->reset = 1;\n" - "\n"; - for (size_t i = 0; i < mem_reqs.size(); ++i) - { - cpp << " top->mem_" << i - << "_req_ready = false;\n" - " top->mem_" - << i - << "_res_valid = false;\n" - " top->mem_" - << i << "_res_data_data = 0xDEADBEEF;\n"; + return "int" + util::strfmt(t->nbits()) + "_t"; } - cpp << " clock_cycle();\n" - " clock_cycle();\n" - " top->reset = 0;\n" - " clock_cycle();\n" - "}\n" - "\n" - "void posedge() {\n" - " if (terminate) {\n" - " std::cout << \"terminating\\n\";\n" - " verilator_finish();\n" - " exit(-1);\n" - " }\n" - " assert(!Verilated::gotFinish());\n" - " top->clk = 1;\n" - " top->eval(); //eval here to get a clean posedge with the old inputs\n" - "}\n" - "\n" - "void finish_clock_cycle() {\n"; - for (size_t i = 0; i < mem_reqs.size(); ++i) + if (jlm::rvsdg::is(*type)) { - cpp << " top->mem_" << i << "_res_data_data = mem_resp[" << i - << "]->front().data;\n" - " top->mem_" - << i << "_res_data_id = mem_resp[" << i - << "]->front().id;\n" - " top->mem_" - << i << "_res_valid = mem_resp[" << i - << "]->front().valid;\n" - " top->mem_" - << i << "_req_ready = true;\n"; + return "void*"; } - cpp << " top->eval();\n" - " // dump before trying to access memory\n" - "#ifdef TRACE_SIGNALS\n" - " tfp->dump(main_time * 2);\n" - "#ifdef VCD_FLUSH\n" - " tfp->flush();\n" - "#endif\n" - "#endif\n"; - for (size_t i = 0; i < mem_reqs.size(); ++i) + if (auto t = dynamic_cast(type)) { - cpp << " if (top->mem_" << i << "_res_valid && top->mem_" << i - << "_res_ready) {\n" - " mem_resp[" - << i - << "]->pop();\n" - " } else if (!mem_resp[" - << i - << "]->front().valid){\n" - " mem_resp[" - << i - << "]->pop();\n" - " }\n"; + return ConvertToCType(&t->element_type()) + "*"; } - for (size_t i = 0; i < mem_reqs.size(); ++i) + + JLM_UNREACHABLE("Unimplemented C type"); +} + +/** + * Takes an HLS kernel and determines the return type of the original C function. + * If the function did not have a return value, i.e., returns "void", nullopt is returned. + * @param kernel the lambda node representing the kernel + * @return the return type of the kernel as written in C, or nullopt if it has no return value. + */ +std::optional +GetReturnTypeAsC(const llvm::lambda::node & kernel) +{ + const auto & results = kernel.type().Results(); + + if (results.empty()) + return std::nullopt; + + const auto & type = results.front(); + + if (rvsdg::is(type)) + return std::nullopt; + + return ConvertToCType(type.get()); +} + +/** + * Takes an HLS kernel and determines the parameters of the original C function. + * Returns a tuple, the first element of which is the number of parameters. + * The second element is a string defining the C parameters, like "int32_t a0, void* a1, void* a2". + * The third element is a string for calling the C function, like "a0, a1, a2". + * @param kernel the lambda node representing the kernel + * @return a tuple (number of parameters, string of parameters, string of call arguments) + */ +std::tuple +GetParameterListAsC(const llvm::lambda::node & kernel) +{ + size_t argument_index = 0; + std::ostringstream parameters; + std::ostringstream arguments; + + for (auto & argType : kernel.type().Arguments()) { - cpp << " // mem_" << i - << "\n" - " if (!top->reset && top->mem_" - << i << "_req_valid && top->mem_" << i - << "_req_ready) {\n" - " mem_resp[" - << i - << "]->emplace();\n" - " mem_resp[" - << i - << "]->back().valid = true;\n" - " mem_resp[" - << i << "]->back().id = top->mem_" << i - << "_req_data_id;\n" - " void *addr = (void *) top->mem_" - << i - << "_req_data_addr;\n" - " uint64_t size = top->mem_" - << i - << "_req_data_size;\n" - " uint64_t data;\n"; - auto req_bt = dynamic_cast(&mem_reqs[i]->type()); - auto has_write = req_bt->get_element_type("write") != nullptr; - if (has_write) - { - cpp << " data = top->mem_" << i - << "_req_data_data;\n" - " if (top->mem_" - << i - << "_req_data_write) {\n" - "#ifdef HLS_MEM_DEBUG\n" - " std::cout << \"mem_" - << i - << " writing \" << data << \" to \" << addr << \"\\n\";\n" - "#endif\n" - " access_mem_store({addr, data, size, mem_access_ctr++});\n" - " switch (size) {\n" - " case 0:\n" - " *(uint8_t *) addr = data;\n" - " break;\n" - " case 1:\n" - " *(uint16_t *) addr = data;\n" - " break;\n" - " case 2:\n" - " *(uint32_t *) addr = data;\n" - " break;\n" - " case 3:\n" - " *(uint64_t *) addr = data;\n" - " break;\n" - " default:\n" - " assert(false);\n" - " }\n" - " mem_resp[" - << i - << "]->back().data = 0xFFFFFFFF;\n" - " } else {\n"; - } - else + if (rvsdg::is(argType)) + continue; + if (rvsdg::is(argType)) + continue; + + if (argument_index != 0) { - cpp << " if (true) {\n"; + parameters << ", "; + arguments << ", "; } - cpp << "#ifdef HLS_MEM_DEBUG\n" - " std::cout << \"mem_" - << i - << " reading from \" << addr << \"\\n\";\n" - "#endif\n" - " switch (size) {\n" - " case 0:\n" - " data = *(uint8_t *) addr;\n" - " break;\n" - " case 1:\n" - " data = *(uint16_t *) addr;\n" - " break;\n" - " case 2:\n" - " data = *(uint32_t *) addr;\n" - " break;\n" - " case 3:\n" - " data = *(uint64_t *) addr;\n" - " break;\n" - " default:\n" - " assert(false);\n" - " }\n" - " mem_resp[" - << i - << "]->back().data = data;\n" - " access_mem_load({addr, data, size, mem_access_ctr++});\n" - " }\n" - " } else if (mem_resp[" - << i << "]->size()emplace();\n" - " }\n"; + + parameters << ConvertToCType(argType.get()) << " a" << argument_index; + arguments << "a" << argument_index; + argument_index++; } - cpp << " assert(!Verilated::gotFinish());\n" - " top->clk = 0;\n" - " top->eval();\n" - "#ifdef TRACE_SIGNALS\n" - " tfp->dump(main_time * 2 + 1);\n" - "#ifdef VCD_FLUSH\n" - " tfp->flush();\n" - "#endif\n" - "#endif\n" - " main_time++;\n" - "}\n" - "\n" - "void clock_cycle() {\n" - " posedge();\n" - " finish_clock_cycle();\n" - "}\n" - "\n"; - - cpp << "extern \"C\"\n" // TODO: parameter for linkage type here - "{\n"; - // imports - auto root = rm.Rvsdg().root(); - for (size_t i = 0; i < root->narguments(); ++i) + + return std::make_tuple(argument_index, parameters.str(), arguments.str()); +} + +std::string +VerilatorHarnessHLS::GetText(llvm::RvsdgModule & rm) +{ + std::ostringstream cpp; + const auto & kernel = *get_hls_lambda(rm); + const auto & function_name = kernel.name(); + + // The request and response parts of memory queues + const auto mem_reqs = get_mem_reqs(kernel); + const auto mem_resps = get_mem_resps(kernel); + JLM_ASSERT(mem_reqs.size() == mem_resps.size()); + + // All inputs that are not memory queues + const auto reg_args = get_reg_args(kernel); + + // Extract info about the kernel's function signature in C + const auto c_return_type = GetReturnTypeAsC(kernel); + const auto [num_c_params, c_params, c_call_args] = GetParameterListAsC(kernel); + + cpp << R"( +#define TRACE_CHUNK_SIZE 100000 +#define TIMEOUT 10000000 + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef FST +#include "verilated_fst_c.h" +#else +#include "verilated_vcd_c.h" +#endif +#define xstr(s) str(s) +#define str(s) #s +)" << std::endl; + + cpp << "#include \"V" << VerilogFile_.base() << ".h\"" << std::endl; + cpp << "#define V_NAME V" << VerilogFile_.base() << std::endl; + + cpp << R"( +// ======== Global variables used for simulating the model ======== +// The verilated model being simulated +V_NAME *top; + +// Current simulation time, in number of cycles +uint64_t main_time = 0; + +// Can be set from signal handlers, to trigger gracefull early termination +bool terminate = false; + + +// ======== Global variables imported from other modules ======== +)"; + + for (const auto arg : rm.Rvsdg().root()->Arguments()) { - if (auto graphImport = dynamic_cast(root->argument(i))) - { - if (dynamic_cast(&graphImport->type())) - { - cpp << "extern " << convert_to_c_type(&graphImport->type()) << " " << graphImport->Name() - << ";\n"; - } - else - { - throw util::error("unexpected impport type"); - } - } + const auto graphImport = util::AssertedCast(arg); + cpp << "extern \"C\" char " << graphImport->Name() << ";" << std::endl; } + cpp << R"( - get_function_header(cpp, ln, "instrumented_ref"); - cpp << ";\n"; - cpp << "bool in_alloca(void *addr){\n" - " for (auto a: ref_allocas) {\n" - " if(addr >= a.first && addr < ((uint8_t*)a.first)+a.second){\n" - " return true;\n" - " }\n" - " }\n" - " return false;\n" - "}\n" - "\n" - "void reference_load(void *addr, uint64_t width) {\n" - " if(in_alloca(addr)){\n" - " return;\n" - " }\n" - " uint64_t data;\n" - " switch (width) {\n" - " case 0:\n" - " data = *(uint8_t *) addr;\n" - " break;\n" - " case 1:\n" - " data = *(uint16_t *) addr;\n" - " break;\n" - " case 2:\n" - " data = *(uint32_t *) addr;\n" - " break;\n" - " case 3:\n" - " data = *(uint64_t *) addr;\n" - " break;\n" - " default:\n" - " assert(false);\n" - " }\n" - " ref_loads.push_back({addr, data, width, mem_access_ctr++});\n" - "}\n" - "\n" - "void reference_store(void *addr, uint64_t data, uint64_t width) {\n" - " if(in_alloca(addr)){\n" - " return;\n" - " }\n" - " ref_stores.push_back({addr, data, width, mem_access_ctr++});\n" - "}\n" - "\n" - "void reference_alloca(void *addr, uint64_t size) {\n" - " std::cout << \"alloca \" << std::hex << addr << \" \" << std::dec << size << " - "std::endl;\n" - " ref_allocas.emplace_back(addr, size);\n" - "}\n" - "\n"; - get_function_header(cpp, ln, "run_hls"); - cpp << " {\n"; - cpp << " if(!top){\n" - " verilator_init(0, NULL);\n" - " }\n"; - size_t register_ix = 0; - for (size_t i = 0; i < ln->type().NumArguments(); ++i) - { - if (dynamic_cast(&ln->type().ArgumentType(i))) - { - register_ix++; - continue; +// ======== Tracing accesses to main memory ========== +struct mem_access { + void * addr; + bool write; + uint8_t width; // 2^width bytes + uint64_t data; + + bool operator==(const mem_access & other) const { + return addr == other.addr && write == other.write && width == other.width && data == other.data; } - else if (dynamic_cast(&ln->type().ArgumentType(i))) - { - continue; +}; + +// A log of memory accesses made by the kernel +std::vector memory_accesses; +// Accesses to regions in this vector of (start, length) pairs are not traced +std::vector> ignored_memory_regions; + +static void ignore_memory_region(void* start, size_t length) { + ignored_memory_regions.emplace_back(start, length); +} + +static bool in_ignored_region(void* addr) { + for (auto [start, length] : ignored_memory_regions) { + if (addr >= start && addr < (char*)start + length) + return true; } - cpp << " top->i_data_" << i << " = (uint64_t) a" << i << ";\n"; - register_ix++; - } - for (const auto & ctxvar : ln->GetContextVars()) - { - std::string name; - if (auto graphImport = dynamic_cast(ctxvar.input->origin())) - { - name = graphImport->Name(); + return false; +} + +static uint64_t instrumented_load(void* addr, uint8_t width) { + uint64_t data = 0; + assert(width <= 3); + memcpy(&data, addr, 1 << width); + if (!in_ignored_region(addr)) + memory_accesses.push_back({addr, false, width, data}); + return data; +} + +static void instrumented_store(void* addr, uint64_t data, uint8_t width) { + assert(width <= 3); + memcpy(addr, &data, 1 << width); + if(!in_ignored_region(addr)) + memory_accesses.push_back({addr, true, width, data}); +} + + +// ======== Implementation of external memory queues, adding latency to loads ======== +class MemoryQueue { + struct Response { + uint64_t request_time; + uint64_t data; + uint8_t id; + }; + int latency; + std::deque responses; + +public: + MemoryQueue(int latency) : latency(latency) {} + + // Called right before posedge, can only read from the model + void accept_request(uint8_t req_ready, uint8_t req_valid, uint8_t req_write, uint64_t req_addr, uint8_t req_size, uint64_t req_data, uint8_t req_id, uint8_t res_valid, uint8_t res_ready) { + if (top->reset) { + responses.clear(); + return; + } + + // If a response was consumed this cycle, remove it + if (res_ready && res_valid) { + assert(!responses.empty()); + responses.pop_front(); + } + + if (!req_ready || !req_valid) + return; + + if (req_write) { + // Stores are performed immediately + instrumented_store((void*) req_addr, req_data, req_size); + } else { + // Loads are performed immediately, but their response is placed in the queue + uint64_t data = instrumented_load((void*) req_addr, req_size); + responses.push_back({main_time, data, req_id}); + } } - else - { - throw util::error("Unsupported cvarg origin type type"); + + // Called right after posedge, can only write to the model + void produce_response(uint8_t& req_ready, uint8_t& res_valid, uint64_t& res_data, uint8_t& res_id) { + if (!responses.empty() && responses.front().request_time + latency <= main_time + 1) { + res_valid = 1; + res_data = responses.front().data; + res_id = responses.front().id; + } else { + res_valid = 0; + res_data = 0xDEADBEEF; + res_id = 0; + } + + // Always ready for requests + req_ready = 1; } - cpp << " top->i_data_" << register_ix++ << " = (uint64_t) &" << name << ";\n"; - cpp << "#ifdef HLS_MEM_DEBUG\n"; - cpp << " std::cout << \"" << name << ": \" << &" << name << " << \"\\n\";\n"; - cpp << "#endif\n"; - } - cpp << " int start = main_time;\n" - " for (int i = 0; i < TIMEOUT && !top->i_ready; i++) {\n" - " clock_cycle();\n" - " }\n" - " if (!top->i_ready) {\n" - " std::cout << \"i_ready not set\\n\";\n" - " verilator_finish();\n" - " exit(-1);\n" - " }\n" - " posedge();\n" - " top->i_valid = 1;\n" - " top->o_ready = 1;\n" - " finish_clock_cycle();\n" - " posedge();\n" - " top->i_valid = 0;\n"; - for (size_t i = 0; i < ln->type().NumArguments(); ++i) - { - if (!dynamic_cast(&ln->type().ArgumentType(i))) - { - cpp << " top->i_data_" << i << " = 0;\n"; + + bool empty() const { + return responses.empty(); } - } - cpp << " finish_clock_cycle();\n" - " for (int i = 0; i < TIMEOUT && !top->o_valid; i++) {\n" - " clock_cycle();\n" - " }\n" - " if (!top->o_valid) {\n" - " std::cout << \"o_valid not set\\n\";\n" - " //verilator_finish();\n" - " exit(-1);\n" - " }\n" - "\n" - " for (auto &pair: store_map) {\n" - " assert(pair.second.empty());\n" - " }\n" - " for (auto &pair: load_map) {\n" - " assert(pair.second.empty());\n" - " }\n" - " std::cout << \"finished - took \" << (main_time - start) << \"cycles\\n\";\n" - "\n" - " // empty loads and stores\n" - " ref_loads.erase(ref_loads.begin(), ref_loads.end());\n" - " ref_stores.erase(ref_stores.begin(), ref_stores.end());\n" - " hls_loads.erase(hls_loads.begin(), hls_loads.end());\n" - " hls_stores.erase(hls_stores.begin(), hls_stores.end());\n" - " mem_access_ctr = 0;\n"; - if (ln->type().NumResults() && !dynamic_cast(&ln->type().ResultType(0))) +}; +)" << std::endl; + + cpp << "MemoryQueue memory_queues[] = {"; + for (size_t i = 0; i < mem_reqs.size(); i++) + cpp << MEMORY_RESPONSE_LATENCY << ", "; + cpp << "};" + << R"( + +// ======== Variables and functions for tracing the verilated model ======== +#ifdef TRACE_SIGNALS +#ifdef FST +VerilatedFstC *tfp; +#else +VerilatedVcdC *tfp; +#endif +#endif + +static void init_tracing() { + #ifdef TRACE_SIGNALS + #ifdef FST + tfp = new VerilatedFstC; + top->trace(tfp, 99); // Trace 99 levels of hierarchy + tfp->open(xstr(V_NAME) ".fst"); + #else + tfp = new VerilatedVcdC; + top->trace(tfp, 99); // Trace 99 levels of hierarchy + tfp->open(xstr(V_NAME) ".vcd"); + #endif + #endif +} + +// Saves the current state of all wires and registers at the given timestep +static void capture_trace(uint64_t time) { + #ifdef TRACE_SIGNALS + tfp->dump(time); + #ifdef VCD_FLUSH + tfp->flush(); + #endif + #endif +} + +static void finish_trace() { + // Coverage analysis (since test passed) +#if VM_COVERAGE + Verilated::mkdir("logs"); + VerilatedCov::write("logs/coverage.dat"); +#endif +#ifdef TRACE_SIGNALS + tfp->close(); +#endif +} + +// ======== Setup and execution of the verilated model ======== +static void posedge(); +static void negedge(); +static void verilator_finish(); + +// Called by $time in Verilog. Converts to real, to match SystemC +double sc_time_stamp() { + return main_time; +} + +// Called once to initialize the verilated model +static void verilator_init(int argc, char **argv) { + // set up signaling so we can kill the program and still get waveforms + struct sigaction action; + memset(&action, 0, sizeof(struct sigaction)); + action.sa_handler = [](int sig){ terminate = true; }; + sigaction(SIGTERM, &action, NULL); + sigaction(SIGKILL, &action, NULL); + sigaction(SIGINT, &action, NULL); + + atexit(verilator_finish); + + // Set debug level, 0 is off, 9 is highest presently used + // May be overridden by commandArgs + Verilated::debug(0); + + // Randomization reset policy + // May be overridden by commandArgs + Verilated::randReset(2); + + // Verilator must compute traced signals + Verilated::traceEverOn(true); + + // Pass arguments so Verilated code can see them, e.g., $value$plusargs + // This needs to be called before you create any model + Verilated::commandArgs(argc, argv); + + // Construct the Verilated model + top = new V_NAME; + main_time = 0; + + init_tracing(); + + top->clk = 0; + top->reset = 1; + top->i_valid = 0; + top->o_ready = 0; +)" << std::endl; + + // Zero out all kernel inputs, except for context variables + size_t first_ctx_var = reg_args.size() - kernel.GetContextVars().size(); + for (size_t i = 0; i < first_ctx_var; i++) { - cpp << " return top->o_data_0;\n"; + cpp << " top->i_data_" << i << " = 0;" << std::endl; } - cpp << "}\n"; - - get_function_header(cpp, ln, "run_ref"); - cpp << " {\n"; - cpp << " int fd[2]; // channel 0 for reading and 1 for writing\n" - " size_t tmp = pipe(fd);\n" - " int pid = fork();\n" - " if(pid == 0) { // child\n" - " close(fd[0]); // close fd[0] since child will only write\n" - " "; - call_function(cpp, ln, "instrumented_ref"); - cpp << "\n" - " size_t cnt = ref_loads.size();\n" - " tmp = write(fd[1], &cnt, sizeof(size_t));\n" - " for (auto load:ref_loads) {\n" - " tmp = write(fd[1], &load, sizeof(mem_access));\n" - " }\n" - " cnt = ref_stores.size();\n" - " tmp = write(fd[1], &cnt, sizeof(size_t));\n" - " for (auto store:ref_stores) {\n" - " tmp = write(fd[1], &store, sizeof(mem_access));\n" - " }\n" - " close(fd[1]);\n" - " exit(0);\n" - " } else { // parent\n" - " close(fd[1]); // close fd[1] since parent will only read\n" - " size_t cnt;\n" - " size_t tmp = read(fd[0], &cnt, sizeof(size_t));\n" - " for (size_t i = 0; i < cnt; ++i) {\n" - " mem_access load;\n" - " tmp = read(fd[0], &load, sizeof(mem_access));\n" - " ref_loads.push_back(load);\n" - " if(load_map.find(load.addr) == load_map.end()){\n" - " load_map.emplace(load.addr, std::deque());\n" - " }\n" - " load_map.find(load.addr)->second.push_back(load);\n" - " }\n" - " tmp = read(fd[0], &cnt, sizeof(size_t));\n" - " for (size_t i = 0; i < cnt; ++i) {\n" - " mem_access store;\n" - " tmp = read(fd[0], &store, sizeof(mem_access));\n" - " ref_stores.push_back(store);\n" - " if(store_map.find(store.addr) == store_map.end()){\n" - " store_map.emplace(store.addr, std::deque());\n" - " }\n" - " store_map.find(store.addr)->second.push_back(store);\n" - " }\n" - " close(fd[0]);\n" - " }\n"; - - if (ln->type().NumResults() && !dynamic_cast(&ln->type().ResultType(0))) + for (const auto & ctx : kernel.GetContextVars()) { - cpp << " return 0;\n"; + // Context variables should always be external symbols imported by name + const auto import = util::AssertedCast(ctx.input->origin()); + cpp << " top->i_data_" << first_ctx_var << " = (uint64_t) &" << import->Name() << ";" + << std::endl; + first_ctx_var++; } - cpp << "}\n"; - get_function_header(cpp, ln, function_name); - cpp << " {\n" - " "; - call_function(cpp, ln, "run_ref"); - cpp << "\n"; - if (ln->type().NumResults() && !dynamic_cast(&ln->type().ResultType(0))) + + cpp << R"( + // Run some cycles with reset set HIGH + posedge(); + negedge(); + posedge(); + top->reset = 0; + negedge(); +} + +// Model outputs should be read right before posedge() +// Model inputs should be set right after posedge() +static void posedge() { + if (terminate) { + std::cout << "terminating\n"; + exit(-1); + } + assert(!Verilated::gotFinish()); + assert(top->clk == 0); + + // Read memory requests just before the rising edge +)"; + + // Emit calls to MemoryQueue::accept_request() + for (size_t i = 0; i < mem_reqs.size(); i++) { - cpp << " return "; + const auto req_bt = util::AssertedCast(&mem_reqs[i]->type()); + const auto has_write = req_bt->get_element_type("write") != nullptr; + + cpp << " memory_queues[" << i << "].accept_request("; + cpp << "top->mem_" << i << "_req_ready, "; + cpp << "top->mem_" << i << "_req_valid, "; + if (has_write) + cpp << " top->mem_" << i << "_req_data_write, "; + else + cpp << "0, "; + cpp << "top->mem_" << i << "_req_data_addr, "; + cpp << "top->mem_" << i << "_req_data_size, "; + if (has_write) + cpp << "top->mem_" << i << "_req_data_data, "; + else + cpp << "0, "; + cpp << "top->mem_" << i << "_req_data_id, "; + cpp << "top->mem_" << i << "_res_ready, "; + cpp << "top->mem_" << i << "_res_valid);" << std::endl; } - else + + cpp << R"( + top->clk = 1; + top->eval(); + // Capturing the posedge trace here would make external inputs appear on negedge + // capture_trace(main_time * 2); +} + +static void negedge() { + assert(!Verilated::gotFinish()); + assert(top->clk == 1); + + // Memory responses are ready before the negedge +)"; + + // Emit calls to MemoryQueue::produce_response + for (size_t i = 0; i < mem_reqs.size(); i++) { - cpp << " "; + cpp << " memory_queues[" << i << "].produce_response("; + cpp << "top->mem_" << i << "_req_ready, "; + cpp << "top->mem_" << i << "_res_valid, "; + cpp << "top->mem_" << i << "_res_data_data, "; + cpp << "top->mem_" << i << "_res_data_id);" << std::endl; } - call_function(cpp, ln, "run_hls"); - cpp << "\n"; - cpp << "}\n"; - cpp << "}\n"; - return cpp.str(); + + cpp << R"( + top->eval(); + + // Capturing the posedge trace here makes external inputs appear to update with the posedge + capture_trace(main_time * 2); + + top->clk = 0; + top->eval(); + capture_trace(main_time * 2 + 1); + main_time++; } -void -VerilatorHarnessHLS::call_function( - std::ostringstream & cpp, - const jlm::llvm::lambda::node * ln, - const std::string & function_name) -{ - cpp << function_name << "("; - for (size_t i = 0; i < ln->type().NumArguments(); ++i) - { - if (dynamic_cast(&ln->type().ArgumentType(i))) - { - continue; +static void verilator_finish() { + if (!top) + return; + top->final(); + finish_trace(); + // delete top; +} + +static )" + << c_return_type.value_or("void") << " run_hls(" << std::endl; + cpp << c_params << R"( +) { + if(!top) { + verilator_init(0, NULL); } - else if (dynamic_cast(&ln->type().ArgumentType(i))) - { - continue; + int start = main_time; + + // Run cycles until i_ready becomes HIGH + for (int i = 0; i < TIMEOUT && !top->i_ready; i++) { + posedge(); + negedge(); } - if (i != 0) - { - cpp << " ,"; + if (!top->i_ready) { + std::cout << "i_ready was not set within TIMEOUT" << std::endl; + exit(-1); } - cpp << "a" << i; - } - cpp << ");"; -} -void -VerilatorHarnessHLS::get_function_header( - std::ostringstream & cpp, - const jlm::llvm::lambda::node * ln, - const std::string & function_name) -{ - std::string return_type; - if (ln->type().NumResults() == 0) + posedge(); + + // Pass in input data for one cycle + top->i_valid = 1; +)"; + + for (size_t i = 0; i < num_c_params; i++) { - return_type = "void"; + cpp << "top->i_data_" << i << " = (uint64_t) a" << i << ";" << std::endl; } - else + + cpp << R"( + negedge(); + posedge(); + + top->o_ready = 1; + top->i_valid = 0; +)"; + + // Zero out the kernel inputs again + for (size_t i = 0; i < num_c_params; i++) { - auto type = &ln->type().ResultType(0); - if (dynamic_cast(type)) - { - return_type = "void"; - } - else if (dynamic_cast(type)) - { - return_type = "void"; - } - else - { - return_type = convert_to_c_type(type); - } + cpp << "top->i_data_" << i << " = 0;" << std::endl; } - cpp << return_type << " " << function_name << "(\n"; - for (size_t i = 0; i < ln->type().NumArguments(); ++i) - { - if (dynamic_cast(&ln->type().ArgumentType(i))) - { - continue; + + cpp << R"( + negedge(); + + // Cycle until o_valid becomes HIGH + for (int i = 0; i < TIMEOUT && !top->o_valid; i++) { + posedge(); + negedge(); } - else if (dynamic_cast(&ln->type().ArgumentType(i))) - { - continue; + if (!top->o_valid) { + std::cout << "o_valid was not set within TIMEOUT" << std::endl; + exit(-1); } - if (i != 0) - { - cpp << ",\n"; + + std::cout << "finished - took " << (main_time - start) << " cycles" << std::endl; + + // Ensure all memory queues are empty +)"; + for (size_t i = 0; i < mem_reqs.size(); i++) + cpp << "assert(memory_queues[" << i << "].empty());" << std::endl; + + if (c_return_type.has_value()) + cpp << "return top->o_data_0;" << std::endl; + + cpp << R"( +} + + +// ======== Running the kernel compiled as C, with intrumentation ======== +extern "C" )" + << c_return_type.value_or("void") << " instrumented_ref(" << c_params << ");" + << R"( + +extern "C" void reference_load(void* addr, uint64_t width) { + instrumented_load(addr, width); +} + +extern "C" void reference_store(void* addr, uint64_t data, uint64_t width) { + instrumented_store(addr, data, width); +} + +extern "C" void reference_alloca(void* start, uint64_t length) { + ignore_memory_region(start, length); +} + +std::vector ref_memory_accesses; + +// Calls instrumented_ref in a forked process and stores its memory accesses +static void run_ref( +)" << c_params + << R"( +) { + int fd[2]; // channel 0 for reading and 1 for writing + size_t tmp = pipe(fd); + int pid = fork(); + if(pid == 0) { // child + close(fd[0]); // close fd[0] since child will only write + + instrumented_ref()" + << c_call_args << R"(); + + // Send all memory accesses to the parent + size_t cnt = memory_accesses.size(); + tmp = write(fd[1], &cnt, sizeof(size_t)); + for (auto & access : memory_accesses) + tmp = write(fd[1], &access, sizeof(mem_access)); + + close(fd[1]); + exit(0); + } else { // parent + close(fd[1]); // close fd[1] since parent will only read + + // Retrieve all memory_accesses from the child + size_t cnt; + tmp = read(fd[0], &cnt, sizeof(size_t)); + ref_memory_accesses.resize(cnt); + for (auto & access : ref_memory_accesses) + tmp = read(fd[0], &access, sizeof(mem_access)); + + close(fd[0]); } - cpp << " " << convert_to_c_type(&ln->type().ArgumentType(i)) << " a" << i - << convert_to_c_type_postfix(&ln->type().ArgumentType(i)); - } - cpp << "\n" - ")"; } -std::string -VerilatorHarnessHLS::convert_to_c_type(const jlm::rvsdg::Type * type) -{ - if (auto t = dynamic_cast(type)) - { - return "int" + util::strfmt(t->nbits()) + "_t"; - } - else if (jlm::rvsdg::is(*type)) - { - return "void*"; - } - else if (auto t = dynamic_cast(type)) - { - return convert_to_c_type(&t->element_type()); - } - else - { - throw std::logic_error(type->debug_string() + " not implemented!"); - } +// Checks that memory_accesses and ref_memory_accesses are identical within each address +static void compare_memory_accesses() { + assert (memory_accesses.size() == ref_memory_accesses.size()); + + // Stable sort the memory accesses by only address, keeping order within each address. + auto addr_sort = [](const mem_access & a, const mem_access & b) { + return a.addr < b.addr; + }; + std::stable_sort(memory_accesses.begin(), memory_accesses.end(), addr_sort); + std::stable_sort(ref_memory_accesses.begin(), ref_memory_accesses.end(), addr_sort); + assert(memory_accesses == ref_memory_accesses); } -std::string -VerilatorHarnessHLS::convert_to_c_type_postfix(const jlm::rvsdg::Type * type) +// ======== Entry point for calling kernel from host device (C code) ======== +extern "C" )" + << c_return_type.value_or("void") << " " << function_name << "(" << c_params << ")" + << R"( { - if (auto t = dynamic_cast(type)) - { - return util::strfmt("[", t->nelements(), "]", convert_to_c_type(&t->element_type())); - } - else - { - return ""; - } + // Reset structures used for tracing memory operations + memory_accesses.clear(); + ignored_memory_regions.clear(); + + // Execute instrumented version of kernel compiled for the host in a fork + run_ref()" + << c_call_args << R"(); + + // Execute the verilated model in this process + )"; + if (c_return_type.has_value()) + cpp << "auto result = "; + cpp << "run_hls(" << c_call_args << ");" << std::endl; + + cpp << R"( + // Compare traced memory accesses + compare_memory_accesses(); +)"; + + if (c_return_type.has_value()) + cpp << " return result;" << std::endl; + + cpp << "}" << std::endl; + + return cpp.str(); } } // namespace jlm::hls diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp index cfe24aae7..676a55426 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.hpp @@ -14,6 +14,8 @@ namespace jlm::hls class VerilatorHarnessHLS : public BaseHLS { + const util::filepath VerilogFile_; + std::string extension() override { @@ -21,49 +23,18 @@ class VerilatorHarnessHLS : public BaseHLS } std::string - get_text(llvm::RvsdgModule & rm) override; + GetText(llvm::RvsdgModule & rm) override; public: /** * Construct a Verilator harness generator. * - * /param verilogFile The filename to the Verilog file that is to be used together with the + * @param verilogFile The filename to the Verilog file that is to be used together with the * generated harness as input to Verilator. */ explicit VerilatorHarnessHLS(util::filepath verilogFile) : VerilogFile_(std::move(verilogFile)) {} - -private: - const util::filepath VerilogFile_; - - /** - * \return The Verilog filename that is to be used together with the generated harness as input to - * Verilator. - */ - [[nodiscard]] const util::filepath & - GetVerilogFileName() const noexcept - { - return VerilogFile_; - } - - std::string - convert_to_c_type(const jlm::rvsdg::Type * type); - - std::string - convert_to_c_type_postfix(const jlm::rvsdg::Type * type); - - void - get_function_header( - std::ostringstream & cpp, - const llvm::lambda::node * ln, - const std::string & function_name); - - void - call_function( - std::ostringstream & cpp, - const llvm::lambda::node * ln, - const std::string & function_name); }; } diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index 25fc47f22..e6a6e226c 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=63c77eebf44c53b7d24ad2baa7c92e3533d2b20d +GIT_COMMIT=8ff67e118ab25ce7cbbdc8adfefb19340c54ce83 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" From 39c662bd6a0c54e0bd3db3b4685a2fb56c4c0124 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 25 Dec 2024 09:13:48 +0100 Subject: [PATCH 145/170] Adds additional cases for remove_redundant_buf optimization (#682) --- .../rvsdg2rhls/remove-redundant-buf.cpp | 20 ++++++++++++++++--- 1 file changed, 17 insertions(+), 3 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index 6291c5804..71211e64f 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -16,19 +16,33 @@ eliminate_buf(jlm::rvsdg::output * o) if (auto so = dynamic_cast(o)) { auto node = so->node(); - if (dynamic_cast(&node->GetOperation())) + if (jlm::rvsdg::is(node->GetOperation())) { return eliminate_buf(node->input(1)->origin()); } - else if (dynamic_cast(&node->GetOperation())) + else if (jlm::rvsdg::is(node->GetOperation())) + { + // part of memory disambiguation + return eliminate_buf(node->input(0)->origin()); + } + else if (jlm::rvsdg::is(node->GetOperation())) + { + return true; + } + else if (jlm::rvsdg::is(node->GetOperation())) { return true; } - else if (dynamic_cast(&node->GetOperation())) + else if (jlm::rvsdg::is(node->GetOperation())) + { + return true; + } + else if (jlm::rvsdg::is(node->GetOperation())) { return true; } } + return false; } From af379c0e17f331aa2cb6c5892a6d62c9e94f1974 Mon Sep 17 00:00:00 2001 From: urays Date: Thu, 26 Dec 2024 18:10:35 +0800 Subject: [PATCH 146/170] Update .gitignore (#699) build-* will tell git to ignore folders and filenames prefixed with build- when committing. However, this would cause the changes to scripts/build-circt.sh and build-mlir.sh to also be ignored. So, it seems like build-*/ is working as expected. --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 34e6b526f..18f65893e 100644 --- a/.gitignore +++ b/.gitignore @@ -3,5 +3,5 @@ Makefile.custom .idea docs/html build -build-* +build-*/ usr From 507b9b4cb8a58289e7adeaa801a8148648579e00 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Thu, 26 Dec 2024 15:53:32 +0100 Subject: [PATCH 147/170] Convert gamma reductions to new reduction interface (#700) --- jlm/rvsdg/gamma.cpp | 28 ++++++++++++++++++ jlm/rvsdg/gamma.hpp | 50 ++++++++++++++++++++++++++++++++ tests/jlm/rvsdg/test-gamma.cpp | 53 ++++++++++++++++++++++------------ 3 files changed, 113 insertions(+), 18 deletions(-) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 898d0227b..6554b020c 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -242,6 +242,34 @@ gamma_normal_form::set_control_constant_reduction(bool enable) graph()->mark_denormalized(); } +bool +ReduceGammaWithStaticallyKnownPredicate(Node & node) +{ + auto gammaNode = dynamic_cast(&node); + if (gammaNode && is_predicate_reducible(gammaNode)) + { + perform_predicate_reduction(gammaNode); + return true; + } + + return false; +} + +bool +ReduceGammaControlConstant(Node & node) +{ + auto gammaNode = dynamic_cast(&node); + if (gammaNode == nullptr) + return false; + + auto outputs = is_control_constant_reducible(gammaNode); + if (outputs.empty()) + return false; + + perform_control_constant_reduction(outputs); + return true; +} + GammaOperation::~GammaOperation() noexcept {} diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 244d3e7ab..63adc809c 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -367,6 +367,56 @@ GammaNode::RemoveGammaOutputsWhere(const F & match) } } +/** + * Reduces a gamma node with a statically known predicate to the respective subregion determined + * by the value of the predicate. + * + * c = gamma 0 + * [] + * x = 45 + * [c <= x] + * [] + * y = 37 + * [c <= y] + * ... = add c + 5 + * => + * c = 45 + * ... = add c + 5 + * + * @param node A gamma node that is supposed to be reduced. + * @return True, if transformation was successful, otherwise false. + */ +bool +ReduceGammaWithStaticallyKnownPredicate(Node & node); + +/** + * Reduces the predicate of a gamma node g1 from the constants that originate from another gamma + * node g2 to the predicate of g2. + * + * p2 = gamma p1 + * [] + * x = 0 + * [p2 <= x] + * [] + * y = 1 + * [p2 <= y] + * ... = gamma p2 + * => + * p2 = gamma p1 + * [] + * x = 0 + * [p2 <= x] + * [] + * y = 1 + * [p2 <= y] + * ... = gamma p1 + * + * @param node A gamma node that is supposed to be reduced. + * @return True, if the transformation was successful, otherwise false. + */ +bool +ReduceGammaControlConstant(Node & node); + } #endif diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 40c9aa9c8..8b20a5b87 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -53,8 +53,9 @@ test_predicate_reduction() { using namespace jlm::rvsdg; + // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_predicate_reduction(true); + GammaOperation::normal_form(&graph)->set_predicate_reduction(false); bittype bits2(2); @@ -62,7 +63,7 @@ test_predicate_reduction() auto v1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); auto v2 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); - auto pred = jlm::rvsdg::control_constant(graph.root(), 3, 1); + auto pred = control_constant(graph.root(), 3, 1); auto gamma = GammaNode::create(pred, 3); auto ev0 = gamma->AddEntryVar(v0); @@ -72,8 +73,14 @@ test_predicate_reduction() auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); - graph.normalize(); - // jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); + + // Act + auto gammaNode = TryGetOwnerNode(*r.origin()); + ReduceGammaWithStaticallyKnownPredicate(*gammaNode); + view(graph.root(), stdout); + + // Assert assert(r.origin() == v1); graph.prune(); @@ -112,8 +119,9 @@ test_control_constant_reduction() { using namespace jlm::rvsdg; + // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_control_constant_reduction(true); + GammaOperation::normal_form(&graph)->set_control_constant_reduction(false); auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(1), "x"); @@ -121,11 +129,11 @@ test_control_constant_reduction() auto gamma = GammaNode::create(c, 2); - auto t = jlm::rvsdg::control_true(gamma->subregion(0)); - auto f = jlm::rvsdg::control_false(gamma->subregion(1)); + auto t = control_true(gamma->subregion(0)); + auto f = control_false(gamma->subregion(1)); - auto n0 = jlm::rvsdg::control_constant(gamma->subregion(0), 3, 0); - auto n1 = jlm::rvsdg::control_constant(gamma->subregion(1), 3, 1); + auto n0 = control_constant(gamma->subregion(0), 3, 0); + auto n1 = control_constant(gamma->subregion(1), 3, 1); auto xv1 = gamma->AddExitVar({ t, f }); auto xv2 = gamma->AddExitVar({ n0, n1 }); @@ -133,10 +141,14 @@ test_control_constant_reduction() auto & ex1 = jlm::tests::GraphExport::Create(*xv1.output, ""); auto & ex2 = jlm::tests::GraphExport::Create(*xv2.output, ""); - jlm::rvsdg::view(graph.root(), stdout); - graph.normalize(); - jlm::rvsdg::view(graph.root(), stdout); + view(graph.root(), stdout); + + // Act + auto gammaNode = TryGetOwnerNode(*ex1.origin()); + ReduceGammaControlConstant(*gammaNode); + view(graph.root(), stdout); + // Assert auto match = output::GetNode(*ex1.origin()); assert(match && is(match->GetOperation())); auto & match_op = to_match_op(match->GetOperation()); @@ -150,8 +162,9 @@ test_control_constant_reduction2() { using namespace jlm::rvsdg; + // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_control_constant_reduction(true); + GammaOperation::normal_form(&graph)->set_control_constant_reduction(false); auto import = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), "import"); @@ -159,19 +172,23 @@ test_control_constant_reduction2() auto gamma = GammaNode::create(c, 4); - auto t1 = jlm::rvsdg::control_true(gamma->subregion(0)); - auto t2 = jlm::rvsdg::control_true(gamma->subregion(1)); - auto t3 = jlm::rvsdg::control_true(gamma->subregion(2)); - auto f = jlm::rvsdg::control_false(gamma->subregion(3)); + auto t1 = control_true(gamma->subregion(0)); + auto t2 = control_true(gamma->subregion(1)); + auto t3 = control_true(gamma->subregion(2)); + auto f = control_false(gamma->subregion(3)); auto xv = gamma->AddExitVar({ t1, t2, t3, f }); auto & ex = jlm::tests::GraphExport::Create(*xv.output, ""); jlm::rvsdg::view(graph.root(), stdout); - graph.normalize(); + + // Act + auto gammaNode = TryGetOwnerNode(*ex.origin()); + ReduceGammaControlConstant(*gammaNode); jlm::rvsdg::view(graph.root(), stdout); + // Assert auto match = output::GetNode(*ex.origin()); assert(is(match)); } From 5241cc79b2122a34a0459e070878d0b6488a4aeb Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 31 Dec 2024 00:19:44 +0100 Subject: [PATCH 148/170] Clean up graph class (#702) --- jlm/hls/backend/rhls2firrtl/base-hls.cpp | 2 +- .../rhls2firrtl/verilator-harness-hls.cpp | 11 +- .../rvsdg2rhls/DeadNodeElimination.cpp | 2 +- .../backend/rvsdg2rhls/GammaConversion.cpp | 2 +- .../backend/rvsdg2rhls/ThetaConversion.cpp | 2 +- .../backend/rvsdg2rhls/UnusedStateRemoval.cpp | 7 +- jlm/hls/backend/rvsdg2rhls/add-buffers.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-forks.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/add-sinks.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/check-rhls.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/dae-conv.cpp | 2 +- .../rvsdg2rhls/distribute-constants.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-conv.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 4 +- jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp | 2 +- .../rvsdg2rhls/remove-redundant-buf.cpp | 2 +- .../rvsdg2rhls/remove-unused-state.cpp | 6 +- jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 16 +- jlm/hls/ir/hls.cpp | 2 +- jlm/hls/opt/cne.cpp | 12 +- jlm/hls/util/view.cpp | 2 +- jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp | 8 +- .../InterProceduralGraphConversion.cpp | 10 +- jlm/llvm/ir/RvsdgModule.hpp | 4 +- jlm/llvm/ir/operators/Load.hpp | 2 +- jlm/llvm/ir/operators/Store.cpp | 8 +- jlm/llvm/ir/operators/Store.hpp | 2 +- jlm/llvm/ir/operators/call.cpp | 2 +- jlm/llvm/ir/operators/call.hpp | 2 +- jlm/llvm/opt/DeadNodeElimination.cpp | 18 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 2 +- jlm/llvm/opt/OptimizationSequence.cpp | 4 +- jlm/llvm/opt/RvsdgTreePrinter.cpp | 6 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 4 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 4 +- .../RegionAwareMemoryNodeProvider.cpp | 12 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 8 +- .../TopDownMemoryNodeEliminator.cpp | 6 +- jlm/llvm/opt/cne.cpp | 12 +- jlm/llvm/opt/inlining.cpp | 8 +- jlm/llvm/opt/inversion.cpp | 10 +- jlm/llvm/opt/pull.cpp | 6 +- jlm/llvm/opt/push.cpp | 6 +- jlm/llvm/opt/reduction.cpp | 10 +- jlm/llvm/opt/unroll.cpp | 8 +- jlm/mlir/backend/JlmToMlirConverter.cpp | 3 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 2 +- jlm/rvsdg/binary.cpp | 14 +- jlm/rvsdg/binary.hpp | 6 +- jlm/rvsdg/bitstring/concat.cpp | 4 +- jlm/rvsdg/gamma.cpp | 6 +- jlm/rvsdg/gamma.hpp | 2 +- jlm/rvsdg/graph.cpp | 45 +- jlm/rvsdg/graph.hpp | 58 ++- jlm/rvsdg/node-normal-form.cpp | 2 +- jlm/rvsdg/node.cpp | 4 +- jlm/rvsdg/operation.cpp | 6 +- jlm/rvsdg/region.cpp | 4 +- jlm/rvsdg/simple-node.cpp | 2 +- jlm/rvsdg/simple-node.hpp | 2 +- jlm/rvsdg/simple-normal-form.cpp | 2 +- jlm/rvsdg/statemux.hpp | 0 jlm/rvsdg/theta.cpp | 2 +- jlm/rvsdg/tracker.cpp | 1 + jlm/rvsdg/unary.cpp | 2 +- jlm/rvsdg/unary.hpp | 2 +- jlm/rvsdg/view.hpp | 2 +- jlm/tooling/Command.cpp | 8 +- tests/TestRvsdgs.cpp | 427 +++++++++++------- .../rvsdg2rhls/DeadNodeEliminationTests.cpp | 4 +- .../rvsdg2rhls/MemoryConverterTests.cpp | 22 +- .../backend/rvsdg2rhls/MemoryQueueTests.cpp | 12 +- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 14 +- .../jlm/hls/backend/rvsdg2rhls/TestGamma.cpp | 10 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 5 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 10 +- .../rvsdg2rhls/test-loop-passthrough.cpp | 4 +- tests/jlm/llvm/backend/dot/DotWriterTests.cpp | 8 +- .../jlm/llvm/backend/llvm/r2j/GammaTests.cpp | 14 +- .../backend/llvm/r2j/test-recursive-data.cpp | 2 +- .../llvm/ThreeAddressCodeConversionTests.cpp | 8 +- tests/jlm/llvm/frontend/llvm/test-export.cpp | 2 +- tests/jlm/llvm/ir/operators/LoadTests.cpp | 42 +- tests/jlm/llvm/ir/operators/StoreTests.cpp | 35 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 40 +- tests/jlm/llvm/ir/operators/TestLambda.cpp | 36 +- tests/jlm/llvm/ir/operators/TestPhi.cpp | 16 +- tests/jlm/llvm/ir/operators/test-delta.cpp | 10 +- tests/jlm/llvm/ir/operators/test-sext.cpp | 12 +- .../opt/InvariantValueRedirectionTests.cpp | 32 +- tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp | 6 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 45 +- tests/jlm/llvm/opt/TestLoadMuxReduction.cpp | 24 +- tests/jlm/llvm/opt/TestLoadStoreReduction.cpp | 8 +- .../TestAgnosticMemoryNodeProvider.cpp | 30 +- .../llvm/opt/alias-analyses/TestAndersen.cpp | 2 +- .../alias-analyses/TestMemoryStateEncoder.cpp | 4 +- .../alias-analyses/TestPointerObjectSet.cpp | 9 +- .../opt/alias-analyses/TestPointsToGraph.cpp | 4 +- .../TestRegionAwareMemoryNodeProvider.cpp | 42 +- .../opt/alias-analyses/TestSteensgaard.cpp | 60 +-- tests/jlm/llvm/opt/test-cne.cpp | 80 ++-- tests/jlm/llvm/opt/test-inlining.cpp | 22 +- tests/jlm/llvm/opt/test-inversion.cpp | 12 +- tests/jlm/llvm/opt/test-pull.cpp | 20 +- tests/jlm/llvm/opt/test-push.cpp | 16 +- tests/jlm/llvm/opt/test-unroll.cpp | 74 +-- tests/jlm/mlir/TestJlmToMlirToJlm.cpp | 6 +- .../mlir/backend/TestJlmToMlirConverter.cpp | 52 ++- .../mlir/frontend/TestMlirToJlmConverter.cpp | 12 +- tests/jlm/rvsdg/ArgumentTests.cpp | 6 +- tests/jlm/rvsdg/RegionTests.cpp | 62 +-- tests/jlm/rvsdg/ResultTests.cpp | 6 +- tests/jlm/rvsdg/TestStructuralNode.cpp | 2 +- tests/jlm/rvsdg/bitstring/bitstring.cpp | 301 ++++++------ tests/jlm/rvsdg/test-binary.cpp | 50 +- tests/jlm/rvsdg/test-bottomup.cpp | 26 +- tests/jlm/rvsdg/test-cse.cpp | 20 +- tests/jlm/rvsdg/test-gamma.cpp | 31 +- tests/jlm/rvsdg/test-graph.cpp | 60 +-- tests/jlm/rvsdg/test-nodes.cpp | 36 +- tests/jlm/rvsdg/test-theta.cpp | 17 +- tests/jlm/rvsdg/test-topdown.cpp | 52 ++- tests/jlm/rvsdg/test-typemismatch.cpp | 4 +- tests/test-operation.cpp | 2 +- tests/test-operation.hpp | 4 +- 133 files changed, 1315 insertions(+), 1106 deletions(-) create mode 100644 jlm/rvsdg/statemux.hpp diff --git a/jlm/hls/backend/rhls2firrtl/base-hls.cpp b/jlm/hls/backend/rhls2firrtl/base-hls.cpp index 43e9d2d79..cd3597829 100644 --- a/jlm/hls/backend/rhls2firrtl/base-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/base-hls.cpp @@ -161,7 +161,7 @@ BaseHLS::create_node_names(rvsdg::Region * r) const jlm::llvm::lambda::node * BaseHLS::get_hls_lambda(llvm::RvsdgModule & rm) { - auto region = rm.Rvsdg().root(); + auto region = &rm.Rvsdg().GetRootRegion(); auto ln = dynamic_cast(region->Nodes().begin().ptr()); if (region->nnodes() == 1 && ln) { diff --git a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp index e11d21d62..4d7d9fef2 100644 --- a/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp +++ b/jlm/hls/backend/rhls2firrtl/verilator-harness-hls.cpp @@ -153,7 +153,7 @@ bool terminate = false; // ======== Global variables imported from other modules ======== )"; - for (const auto arg : rm.Rvsdg().root()->Arguments()) + for (const auto arg : rm.Rvsdg().GetRootRegion().Arguments()) { const auto graphImport = util::AssertedCast(arg); cpp << "extern \"C\" char " << graphImport->Name() << ";" << std::endl; @@ -270,8 +270,7 @@ class MemoryQueue { cpp << "MemoryQueue memory_queues[] = {"; for (size_t i = 0; i < mem_reqs.size(); i++) cpp << MEMORY_RESPONSE_LATENCY << ", "; - cpp << "};" - << R"( + cpp << "};" << R"( // ======== Variables and functions for tracing the verilated model ======== #ifdef TRACE_SIGNALS @@ -544,8 +543,7 @@ static )" // ======== Running the kernel compiled as C, with intrumentation ======== extern "C" )" - << c_return_type.value_or("void") << " instrumented_ref(" << c_params << ");" - << R"( + << c_return_type.value_or("void") << " instrumented_ref(" << c_params << ");" << R"( extern "C" void reference_load(void* addr, uint64_t width) { instrumented_load(addr, width); @@ -612,8 +610,7 @@ static void compare_memory_accesses() { // ======== Entry point for calling kernel from host device (C code) ======== extern "C" )" - << c_return_type.value_or("void") << " " << function_name << "(" << c_params << ")" - << R"( + << c_return_type.value_or("void") << " " << function_name << "(" << c_params << ")" << R"( { // Reset structures used for tracing memory operations memory_accesses.clear(); diff --git a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp index e754aa0dc..dea8fa526 100644 --- a/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp +++ b/jlm/hls/backend/rvsdg2rhls/DeadNodeElimination.cpp @@ -113,7 +113,7 @@ EliminateDeadNodesInRegion(rvsdg::Region & region) void EliminateDeadNodes(llvm::RvsdgModule & rvsdgModule) { - auto & rootRegion = *rvsdgModule.Rvsdg().root(); + auto & rootRegion = rvsdgModule.Rvsdg().GetRootRegion(); if (rootRegion.nnodes() != 1) { diff --git a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp index 9fae4a13d..4868d4768 100644 --- a/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/GammaConversion.cpp @@ -167,7 +167,7 @@ ConvertGammaNodesInRegion(rvsdg::Region & region) void ConvertGammaNodes(llvm::RvsdgModule & rvsdgModule) { - ConvertGammaNodesInRegion(*rvsdgModule.Rvsdg().root()); + ConvertGammaNodesInRegion(rvsdgModule.Rvsdg().GetRootRegion()); } } diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index 8786a08ca..6ef9fc707 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -93,7 +93,7 @@ ConvertThetaNodesInRegion(rvsdg::Region & region) void ConvertThetaNodes(jlm::llvm::RvsdgModule & rvsdgModule) { - ConvertThetaNodesInRegion(*rvsdgModule.Rvsdg().root()); + ConvertThetaNodesInRegion(rvsdgModule.Rvsdg().GetRootRegion()); } } diff --git a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp index 2eb8e9dd6..fd164d1a2 100644 --- a/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp +++ b/jlm/hls/backend/rvsdg2rhls/UnusedStateRemoval.cpp @@ -103,8 +103,9 @@ RemoveUnusedStatesFromLambda(llvm::lambda::node & lambdaNode) auto newLambdaOutput = newLambda->finalize(newResults); // TODO handle functions at other levels? - JLM_ASSERT(lambdaNode.region() == lambdaNode.region()->graph()->root()); - JLM_ASSERT((*lambdaNode.output()->begin())->region() == lambdaNode.region()->graph()->root()); + JLM_ASSERT(lambdaNode.region() == &lambdaNode.region()->graph()->GetRootRegion()); + JLM_ASSERT( + (*lambdaNode.output()->begin())->region() == &lambdaNode.region()->graph()->GetRootRegion()); JLM_ASSERT(lambdaNode.output()->nusers() == 1); lambdaNode.region()->RemoveResult((*lambdaNode.output()->begin())->index()); @@ -228,7 +229,7 @@ RemoveUnusedStatesInRegion(rvsdg::Region & region) void RemoveUnusedStates(llvm::RvsdgModule & rvsdgModule) { - RemoveUnusedStatesInRegion(*rvsdgModule.Rvsdg().root()); + RemoveUnusedStatesInRegion(rvsdgModule.Rvsdg().GetRootRegion()); } } diff --git a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp index 806c30d1c..d16b1a8d8 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-buffers.cpp @@ -90,7 +90,7 @@ void add_buffers(llvm::RvsdgModule & rm, bool pass_through) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); add_buffers(root, pass_through); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp index 8266c7ab3..966f78223 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-forks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-forks.cpp @@ -59,7 +59,7 @@ void add_forks(llvm::RvsdgModule & rvsdgModule) { auto & graph = rvsdgModule.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); add_forks(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index 29f98c916..d4b8495fe 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -50,7 +50,7 @@ void add_prints(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); add_prints(root); } @@ -58,7 +58,7 @@ void convert_prints(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); // TODO: make this less hacky by using the correct state types auto fct = llvm::FunctionType::Create({ rvsdg::bittype::Create(64), rvsdg::bittype::Create(64) }, {}); diff --git a/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp b/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp index ffce1f6e0..2604f8ccd 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-sinks.cpp @@ -46,7 +46,7 @@ void add_sinks(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); add_sinks(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index 93e04e0af..b19624b40 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -74,8 +74,8 @@ add_lambda_argument(llvm::lambda::node * ln, std::shared_ptrfinalize(new_results); // TODO handle functions at other levels? - JLM_ASSERT(ln->region() == ln->region()->graph()->root()); - JLM_ASSERT((*ln->output()->begin())->region() == ln->region()->graph()->root()); + JLM_ASSERT(ln->region() == &ln->region()->graph()->GetRootRegion()); + JLM_ASSERT((*ln->output()->begin())->region() == &ln->region()->graph()->GetRootRegion()); // ln->output()->divert_users(new_out); ln->region()->RemoveResult((*ln->output()->begin())->index()); @@ -149,7 +149,7 @@ void add_triggers(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); add_triggers(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp index fbc02d960..da4ebd7da 100644 --- a/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/alloca-conv.cpp @@ -228,7 +228,7 @@ void alloca_conv(jlm::llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); alloca_conv(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp index 9a44ec580..7f5b9e27e 100644 --- a/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/check-rhls.cpp @@ -46,7 +46,7 @@ void check_rhls(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); if (root->nnodes() != 1) { throw jlm::util::error("Root should have only one node now"); diff --git a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp index 02cf0b147..f12bdf643 100644 --- a/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/dae-conv.cpp @@ -22,7 +22,7 @@ void dae_conv(jlm::llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); dae_conv(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 438be104a..6c4b64b05 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -118,7 +118,7 @@ void hls::distribute_constants(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); distribute_constants(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp index 5393959eb..253a09462 100644 --- a/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp +++ b/jlm/hls/backend/rvsdg2rhls/instrument-ref.cpp @@ -58,7 +58,7 @@ void instrument_ref(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); auto lambda = dynamic_cast(root->Nodes().begin().ptr()); auto newLambda = change_function_name(lambda, "instrumented_ref"); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp index cb664d3ac..967953d78 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-conv.cpp @@ -123,7 +123,7 @@ trace_call(const jlm::rvsdg::output * output) if (auto argument = dynamic_cast(output)) { auto graph = output->region()->graph(); - if (argument->region() == graph->root()) + if (argument->region() == &graph->GetRootRegion()) { return argument; } @@ -566,7 +566,7 @@ jlm::hls::MemoryConverter(jlm::llvm::RvsdgModule & rm) // arguments. // - auto root = rm.Rvsdg().root(); + auto root = &rm.Rvsdg().GetRootRegion(); auto lambda = dynamic_cast(root->Nodes().begin().ptr()); // diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index 9aa8f6d7d..67ed15714 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -23,7 +23,7 @@ void jlm::hls::mem_queue(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); mem_queue(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 2d6ab9fa1..883175cf8 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -26,7 +26,7 @@ void mem_sep_independent(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); mem_sep_independent(root); } @@ -34,7 +34,7 @@ void mem_sep_argument(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); mem_sep_argument(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp index 675b3ffbf..bd7ef1632 100644 --- a/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp +++ b/jlm/hls/backend/rvsdg2rhls/memstate-conv.cpp @@ -20,7 +20,7 @@ void memstate_conv(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); memstate_conv(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp index dbe680b55..7f427b0fa 100644 --- a/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp +++ b/jlm/hls/backend/rvsdg2rhls/merge-gamma.cpp @@ -17,7 +17,7 @@ void merge_gamma(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); merge_gamma(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp index 71211e64f..2fc2c4a05 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-redundant-buf.cpp @@ -80,7 +80,7 @@ void remove_redundant_buf(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); remove_redundant_buf(root); } diff --git a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp index a0fe61ce3..c07541662 100644 --- a/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp +++ b/jlm/hls/backend/rvsdg2rhls/remove-unused-state.cpp @@ -123,7 +123,7 @@ void remove_unused_state(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); remove_unused_state(root); } @@ -239,8 +239,8 @@ remove_lambda_passthrough(llvm::lambda::node * ln) auto new_out = new_lambda->finalize(new_results); // TODO handle functions at other levels? - JLM_ASSERT(ln->region() == ln->region()->graph()->root()); - JLM_ASSERT((*ln->output()->begin())->region() == ln->region()->graph()->root()); + JLM_ASSERT(ln->region() == &ln->region()->graph()->GetRootRegion()); + JLM_ASSERT((*ln->output()->begin())->region() == &ln->region()->graph()->GetRootRegion()); // ln->output()->divert_users(new_out); // can't divert since the type changed JLM_ASSERT(ln->output()->nusers() == 1); diff --git a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp index 22eb24b36..3241a9b70 100644 --- a/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rhls-dne.cpp @@ -325,7 +325,7 @@ void dne(llvm::RvsdgModule & rm) { auto & graph = rm.Rvsdg(); - auto root = graph.root(); + auto root = &graph.GetRootRegion(); if (root->nnodes() != 1) { throw util::error("Root should have only one node now"); diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 16373bf09..91856343e 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -91,7 +91,7 @@ void dump_xml(llvm::RvsdgModule & rvsdgModule, const std::string & file_name) { auto xml_file = fopen(file_name.c_str(), "w"); - jlm::rvsdg::view_xml(rvsdgModule.Rvsdg().root(), xml_file); + jlm::rvsdg::view_xml(&rvsdgModule.Rvsdg().GetRootRegion(), xml_file); fclose(xml_file); } @@ -121,7 +121,7 @@ trace_call(jlm::rvsdg::input * input) { result = input->origin(); } - else if (argument->region() == graph->root()) + else if (argument->region() == &graph->GetRootRegion()) { result = argument; } @@ -189,7 +189,7 @@ convert_alloca(rvsdg::Region * region) } else if (auto po = dynamic_cast(&(node->GetOperation()))) { - auto rr = region->graph()->root(); + auto rr = ®ion->graph()->GetRootRegion(); auto delta_name = jlm::util::strfmt("hls_alloca_", alloca_cnt++); auto delta_type = llvm::PointerType::Create(); std::cout << "alloca " << delta_name << ": " << po->value_type().debug_string() << "\n"; @@ -328,7 +328,7 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) // create a copy of rm auto rhls = llvm::RvsdgModule::Create(rm.SourceFileName(), rm.TargetTriple(), rm.DataLayout()); std::cout << "processing " << rm.SourceFileName().name() << "\n"; - auto root = rm.Rvsdg().root(); + auto root = &rm.Rvsdg().GetRootRegion(); for (auto node : jlm::rvsdg::topdown_traverser(root)) { if (auto ln = dynamic_cast(node)) @@ -386,7 +386,7 @@ split_hls_function(llvm::RvsdgModule & rm, const std::string & function_name) } } // copy function into rhls - auto new_ln = ln->copy(rhls->Rvsdg().root(), smap); + auto new_ln = ln->copy(&rhls->Rvsdg().GetRootRegion(), smap); new_ln = change_linkage(new_ln, llvm::linkage::external_linkage); auto oldExport = ln->ComputeCallSummary()->GetRvsdgExport(); jlm::llvm::GraphExport::Create(*new_ln->output(), oldExport ? oldExport->Name() : ""); @@ -449,13 +449,13 @@ dump_ref(llvm::RvsdgModule & rhls, std::string & path) auto reference = llvm::RvsdgModule::Create(rhls.SourceFileName(), rhls.TargetTriple(), rhls.DataLayout()); rvsdg::SubstitutionMap smap; - rhls.Rvsdg().root()->copy(reference->Rvsdg().root(), smap, true, true); + rhls.Rvsdg().GetRootRegion().copy(&reference->Rvsdg().GetRootRegion(), smap, true, true); pre_opt(*reference); instrument_ref(*reference); - for (size_t i = 0; i < reference->Rvsdg().root()->narguments(); ++i) + for (size_t i = 0; i < reference->Rvsdg().GetRootRegion().narguments(); ++i) { auto graphImport = - util::AssertedCast(reference->Rvsdg().root()->argument(i)); + util::AssertedCast(reference->Rvsdg().GetRootRegion().argument(i)); std::cout << "impport " << graphImport->Name() << ": " << graphImport->type().debug_string() << "\n"; } diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index bafa688cb..afb5eb0e1 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -101,7 +101,7 @@ loop_node::add_loopconst(jlm::rvsdg::output * origin) loop_node * loop_node::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { - auto nf = graph()->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph()->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto loop = create(region, false); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 1390dbe5e..94745c79c 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -33,8 +33,8 @@ class cnestat final : public util::Statistics void start_mark_stat(const Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(MarkTimerLabel_).start(); } @@ -53,8 +53,8 @@ class cnestat final : public util::Statistics void end_divert_stat(const Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); GetTimer(DivertTimerLabel_).stop(); } @@ -609,11 +609,11 @@ cne(jlm::llvm::RvsdgModule & rm, util::StatisticsCollector & statisticsCollector auto statistics = cnestat::Create(rm.SourceFileName()); statistics->start_mark_stat(graph); - mark(graph.root(), ctx); + mark(&graph.GetRootRegion(), ctx); statistics->end_mark_stat(); statistics->start_divert_stat(); - divert(graph.root(), ctx); + divert(&graph.GetRootRegion(), ctx); statistics->end_divert_stat(graph); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index 6c9f276c1..c1a7a8561 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -414,7 +414,7 @@ void dump_dot(jlm::llvm::RvsdgModule & rvsdgModule, const std::string & file_name) { auto dot_file = fopen(file_name.c_str(), "w"); - jlm::hls::view_dot(rvsdgModule.Rvsdg().root(), dot_file); + jlm::hls::view_dot(&rvsdgModule.Rvsdg().GetRootRegion(), dot_file); fclose(dot_file); } diff --git a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp index 02db63c2b..65db2935e 100644 --- a/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp +++ b/jlm/llvm/backend/rvsdg2jlm/rvsdg2jlm.cpp @@ -33,7 +33,7 @@ class rvsdg_destruction_stat final : public util::Statistics void start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } @@ -540,7 +540,7 @@ convert_node(const rvsdg::Node & node, context & ctx) static void convert_nodes(const rvsdg::Graph & graph, context & ctx) { - for (const auto & node : rvsdg::topdown_traverser(graph.root())) + for (const auto & node : rvsdg::topdown_traverser(&graph.GetRootRegion())) convert_node(*node, ctx); } @@ -549,9 +549,9 @@ convert_imports(const rvsdg::Graph & graph, ipgraph_module & im, context & ctx) { auto & ipg = im.ipgraph(); - for (size_t n = 0; n < graph.root()->narguments(); n++) + for (size_t n = 0; n < graph.GetRootRegion().narguments(); n++) { - auto graphImport = util::AssertedCast(graph.root()->argument(n)); + auto graphImport = util::AssertedCast(graph.GetRootRegion().argument(n)); if (auto ftype = is_function_import(graphImport)) { auto f = function_node::create(ipg, graphImport->Name(), ftype, graphImport->Linkage()); diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index e4b6aee61..d23f15b35 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -306,7 +306,7 @@ class InterProceduralGraphToRvsdgStatistics final : public util::Statistics End(const rvsdg::Graph & graph) noexcept { AddTimer(Label::Timer).stop(); - AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(&graph.GetRootRegion())); } static std::unique_ptr @@ -1124,7 +1124,7 @@ ConvertStronglyConnectedComponent( } phi::builder pb; - pb.begin(graph.root()); + pb.begin(&graph.GetRootRegion()); regionalizedVariableMap.PushRegion(*pb.subregion()); auto & outerVariableMap = @@ -1197,13 +1197,15 @@ ConvertInterProceduralGraphModule( std::move(interProceduralGraphModule.ReleaseStructTypeDeclarations())); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); /* FIXME: we currently cannot handle flattened_binary_op in jlm2llvm pass */ rvsdg::binary_op::normal_form(graph)->set_flatten(false); - RegionalizedVariableMap regionalizedVariableMap(interProceduralGraphModule, *graph->root()); + RegionalizedVariableMap regionalizedVariableMap( + interProceduralGraphModule, + graph->GetRootRegion()); auto stronglyConnectedComponents = interProceduralGraphModule.ipgraph().find_sccs(); for (const auto & stronglyConnectedComponent : stronglyConnectedComponents) diff --git a/jlm/llvm/ir/RvsdgModule.hpp b/jlm/llvm/ir/RvsdgModule.hpp index 1ca264b52..085696289 100644 --- a/jlm/llvm/ir/RvsdgModule.hpp +++ b/jlm/llvm/ir/RvsdgModule.hpp @@ -56,7 +56,7 @@ class GraphImport final : public rvsdg::GraphImport { auto graphImport = new GraphImport(graph, std::move(valueType), std::move(name), std::move(linkage)); - graph.root()->append_argument(graphImport); + graph.GetRootRegion().append_argument(graphImport); return *graphImport; } @@ -84,7 +84,7 @@ class GraphExport final : public rvsdg::GraphExport Create(rvsdg::output & origin, std::string name) { auto graphExport = new GraphExport(origin, std::move(name)); - origin.region()->graph()->root()->append_result(graphExport); + origin.region()->graph()->GetRootRegion().append_result(graphExport); return *graphExport; } }; diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index 1d5f9d197..d27554ada 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -467,7 +467,7 @@ class LoadNonVolatileOperation final : public LoadOperation GetNormalForm(rvsdg::Graph * graph) noexcept { return jlm::util::AssertedCast( - graph->node_normal_form(typeid(LoadNonVolatileOperation))); + graph->GetNodeNormalForm(typeid(LoadNonVolatileOperation))); } static std::unique_ptr diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 53cea7de9..51e20a036 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -428,7 +428,7 @@ store_normal_form::set_store_mux_reducible(bool enable) enable_store_mux_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -441,7 +441,7 @@ store_normal_form::set_store_store_reducible(bool enable) enable_store_store_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -454,7 +454,7 @@ store_normal_form::set_store_alloca_reducible(bool enable) enable_store_alloca_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -467,7 +467,7 @@ store_normal_form::set_multiple_origin_reducible(bool enable) enable_multiple_origin_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } std::optional> diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index fef0851c7..b48f8d79d 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -171,7 +171,7 @@ class StoreNonVolatileOperation final : public StoreOperation GetNormalForm(rvsdg::Graph * graph) noexcept { return util::AssertedCast( - graph->node_normal_form(typeid(StoreNonVolatileOperation))); + graph->GetNodeNormalForm(typeid(StoreNonVolatileOperation))); } static std::unique_ptr diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index cfd934bef..b77882da3 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -260,7 +260,7 @@ CallNode::ClassifyCall(const CallNode & callNode) return CallTypeClassifier::CreateRecursiveDirectCallClassifier(*argument); } - if (argument->region() == argument->region()->graph()->root()) + if (argument->region() == &argument->region()->graph()->GetRootRegion()) { return CallTypeClassifier::CreateExternalCallClassifier(*argument); } diff --git a/jlm/llvm/ir/operators/call.hpp b/jlm/llvm/ir/operators/call.hpp index f3214509b..53c44e180 100644 --- a/jlm/llvm/ir/operators/call.hpp +++ b/jlm/llvm/ir/operators/call.hpp @@ -261,7 +261,7 @@ class CallTypeClassifier final static std::unique_ptr CreateExternalCallClassifier(rvsdg::RegionArgument & argument) { - JLM_ASSERT(argument.region() == argument.region()->graph()->root()); + JLM_ASSERT(argument.region() == &argument.region()->graph()->GetRootRegion()); return std::make_unique(CallType::ExternalCall, argument); } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index 660667c11..d672d69a3 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -105,8 +105,8 @@ class DeadNodeElimination::Statistics final : public util::Statistics void StartMarkStatistics(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(MarkTimerLabel_).start(); } @@ -126,8 +126,8 @@ class DeadNodeElimination::Statistics final : public util::Statistics StopSweepStatistics(const rvsdg::Graph & graph) noexcept { GetTimer(SweepTimerLabel_).stop(); - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); } static std::unique_ptr @@ -161,7 +161,7 @@ DeadNodeElimination::run(RvsdgModule & module, jlm::util::StatisticsCollector & auto & rvsdg = module.Rvsdg(); auto statistics = Statistics::Create(module.SourceFileName()); statistics->StartMarkStatistics(rvsdg); - MarkRegion(*rvsdg.root()); + MarkRegion(rvsdg.GetRootRegion()); statistics->StopMarkStatistics(); statistics->StartSweepStatistics(); @@ -300,14 +300,14 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) void DeadNodeElimination::SweepRvsdg(rvsdg::Graph & rvsdg) const { - SweepRegion(*rvsdg.root()); + SweepRegion(rvsdg.GetRootRegion()); // Remove dead imports - for (size_t n = rvsdg.root()->narguments() - 1; n != static_cast(-1); n--) + for (size_t n = rvsdg.GetRootRegion().narguments() - 1; n != static_cast(-1); n--) { - if (!Context_->IsAlive(*rvsdg.root()->argument(n))) + if (!Context_->IsAlive(*rvsdg.GetRootRegion().argument(n))) { - rvsdg.root()->RemoveArgument(n); + rvsdg.GetRootRegion().RemoveArgument(n); } } } diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 85923567b..8b842532f 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -65,7 +65,7 @@ InvariantValueRedirection::RedirectInRootRegion(rvsdg::Graph & rvsdg) // We require a topdown traversal in the root region to ensure that a lambda node is visited // before its call nodes. This ensures that all invariant values are redirected in the lambda // subregion before we try to detect invariant call outputs. - for (auto node : rvsdg::topdown_traverser(rvsdg.root())) + for (auto node : rvsdg::topdown_traverser(&rvsdg.GetRootRegion())) { if (auto lambdaNode = dynamic_cast(node)) { diff --git a/jlm/llvm/opt/OptimizationSequence.cpp b/jlm/llvm/opt/OptimizationSequence.cpp index 3179a0ef3..b1ed91153 100644 --- a/jlm/llvm/opt/OptimizationSequence.cpp +++ b/jlm/llvm/opt/OptimizationSequence.cpp @@ -23,7 +23,7 @@ class OptimizationSequence::Statistics final : public util::Statistics void StartMeasuring(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } @@ -31,7 +31,7 @@ class OptimizationSequence::Statistics final : public util::Statistics EndMeasuring(const rvsdg::Graph & graph) noexcept { GetTimer(Label::Timer).stop(); - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); } static std::unique_ptr diff --git a/jlm/llvm/opt/RvsdgTreePrinter.cpp b/jlm/llvm/opt/RvsdgTreePrinter.cpp index 269b50e08..249b95b70 100644 --- a/jlm/llvm/opt/RvsdgTreePrinter.cpp +++ b/jlm/llvm/opt/RvsdgTreePrinter.cpp @@ -51,7 +51,7 @@ RvsdgTreePrinter::run(RvsdgModule & rvsdgModule, util::StatisticsCollector & sta statistics->Start(); auto annotationMap = ComputeAnnotationMap(rvsdgModule.Rvsdg()); - auto tree = rvsdg::Region::ToTree(*rvsdgModule.Rvsdg().root(), annotationMap); + auto tree = rvsdg::Region::ToTree(rvsdgModule.Rvsdg().GetRootRegion(), annotationMap); WriteTreeToFile(rvsdgModule, tree); statistics->Stop(); @@ -117,7 +117,7 @@ RvsdgTreePrinter::AnnotateNumRvsdgNodes( return numNodes; }; - annotateRegion(*rvsdg.root()); + annotateRegion(rvsdg.GetRootRegion()); } void @@ -177,7 +177,7 @@ RvsdgTreePrinter::AnnotateNumMemoryStateInputsOutputs( } }; - annotateRegion(*rvsdg.root()); + annotateRegion(rvsdg.GetRootRegion()); } void diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index bea16c108..d71a33515 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -274,7 +274,7 @@ class Andersen::Statistics final : public util::Statistics void StartAndersenStatistics(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(AnalysisTimer_).start(); } @@ -1201,7 +1201,7 @@ Andersen::AnalyzeRegion(rvsdg::Region & region) void Andersen::AnalyzeRvsdg(const rvsdg::Graph & graph) { - auto & rootRegion = *graph.root(); + auto & rootRegion = graph.GetRootRegion(); // Iterate over all arguments to the root region - symbols imported from other modules // These symbols can either be global variables or functions diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 1902e8c57..81f02bb8c 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -29,7 +29,7 @@ class EncodingStatistics final : public util::Statistics void Start(const rvsdg::Graph & graph) { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } @@ -471,7 +471,7 @@ MemoryStateEncoder::Encode( auto statistics = EncodingStatistics::Create(rvsdgModule.SourceFileName()); statistics->Start(rvsdgModule.Rvsdg()); - EncodeRegion(*rvsdgModule.Rvsdg().root()); + EncodeRegion(rvsdgModule.Rvsdg().GetRootRegion()); statistics->Stop(); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index e76cdc54f..62fa0fba5 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -44,8 +44,10 @@ class RegionAwareMemoryNodeProvider::Statistics final : public util::Statistics if (!IsDemanded()) return; - AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(rvsdgModule.Rvsdg().root())); - AddMeasurement(NumRvsdgRegionsLabel_, rvsdg::Region::NumRegions(*rvsdgModule.Rvsdg().root())); + AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(&rvsdgModule.Rvsdg().GetRootRegion())); + AddMeasurement( + NumRvsdgRegionsLabel_, + rvsdg::Region::NumRegions(rvsdgModule.Rvsdg().GetRootRegion())); AddMeasurement(Label::NumPointsToGraphMemoryNodes, pointsToGraph.NumMemoryNodes()); } @@ -632,7 +634,7 @@ RegionAwareMemoryNodeProvider::ProvisionMemoryNodes( auto statistics = Statistics::Create(statisticsCollector, rvsdgModule, pointsToGraph); statistics->StartAnnotationStatistics(); - AnnotateRegion(*rvsdgModule.Rvsdg().root()); + AnnotateRegion(rvsdgModule.Rvsdg().GetRootRegion()); statistics->StopAnnotationStatistics(); statistics->StartPropagationPass1Statistics(); @@ -856,7 +858,7 @@ RegionAwareMemoryNodeProvider::AnnotateStructuralNode(const rvsdg::StructuralNod void RegionAwareMemoryNodeProvider::Propagate(const RvsdgModule & rvsdgModule) { - rvsdg::topdown_traverser traverser(rvsdgModule.Rvsdg().root()); + rvsdg::topdown_traverser traverser(&rvsdgModule.Rvsdg().GetRootRegion()); for (auto & node : traverser) { if (auto lambdaNode = dynamic_cast(node)) @@ -1064,7 +1066,7 @@ RegionAwareMemoryNodeProvider::ToRegionTree( return subtree; }; - return toRegionTree(rvsdg.root(), 0); + return toRegionTree(&rvsdg.GetRootRegion(), 0); } } diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index c629a6b6d..ebf3ff311 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -919,7 +919,7 @@ class Steensgaard::Statistics final : public util::Statistics void StartSteensgaardStatistics(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(AnalysisTimerLabel_).start(); } @@ -1751,14 +1751,14 @@ void Steensgaard::AnalyzeRvsdg(const rvsdg::Graph & graph) { AnalyzeImports(graph); - AnalyzeRegion(*graph.root()); + AnalyzeRegion(graph.GetRootRegion()); AnalyzeExports(graph); } void Steensgaard::AnalyzeImports(const rvsdg::Graph & graph) { - auto rootRegion = graph.root(); + auto rootRegion = &graph.GetRootRegion(); for (size_t n = 0; n < rootRegion->narguments(); n++) { auto & graphImport = *util::AssertedCast(rootRegion->argument(n)); @@ -1775,7 +1775,7 @@ Steensgaard::AnalyzeImports(const rvsdg::Graph & graph) void Steensgaard::AnalyzeExports(const rvsdg::Graph & graph) { - auto rootRegion = graph.root(); + auto rootRegion = &graph.GetRootRegion(); for (size_t n = 0; n < rootRegion->nresults(); n++) { diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index 6e3fc2f25..d0f4313ea 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -25,7 +25,7 @@ class TopDownMemoryNodeEliminator::Statistics final : public util::Statistics void Start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodes, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } @@ -465,7 +465,7 @@ TopDownMemoryNodeEliminator::EliminateTopDown(const RvsdgModule & rvsdgModule) InitializeLiveNodesOfTailLambdas(rvsdgModule); // Start the processing of the RVSDG module - EliminateTopDownRootRegion(*rvsdgModule.Rvsdg().root()); + EliminateTopDownRootRegion(rvsdgModule.Rvsdg().GetRootRegion()); } void @@ -963,7 +963,7 @@ TopDownMemoryNodeEliminator::CheckInvariants( std::vector callNodes; std::vector regions; - collectRegionsAndCalls(*rvsdgModule.Rvsdg().root(), regions, callNodes); + collectRegionsAndCalls(rvsdgModule.Rvsdg().GetRootRegion(), regions, callNodes); for (auto region : regions) { diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index 23d9e24d7..def2530aa 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -30,8 +30,8 @@ class cnestat final : public util::Statistics void start_mark_stat(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(MarkTimerLabel_).start(); } @@ -50,8 +50,8 @@ class cnestat final : public util::Statistics void end_divert_stat(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); GetTimer(DivertTimerLabel_).stop(); } @@ -561,11 +561,11 @@ cne(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) auto statistics = cnestat::Create(rm.SourceFileName()); statistics->start_mark_stat(graph); - mark(graph.root(), ctx); + mark(&graph.GetRootRegion(), ctx); statistics->end_mark_stat(); statistics->start_divert_stat(); - divert(graph.root(), ctx); + divert(&graph.GetRootRegion(), ctx); statistics->end_divert_stat(graph); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index 5f9bcb7cc..e24406333 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -27,14 +27,14 @@ class ilnstat final : public util::Statistics void start(const rvsdg::Graph & graph) { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } void stop(const rvsdg::Graph & graph) { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); GetTimer(Label::Timer).stop(); } @@ -54,7 +54,7 @@ find_producer(jlm::rvsdg::input * input) if (argument == nullptr) return input->origin(); - if (argument->region() == graph->root()) + if (argument->region() == &graph->GetRootRegion()) return argument; JLM_ASSERT(argument->input() != nullptr); @@ -146,7 +146,7 @@ inlineCall(jlm::rvsdg::SimpleNode * call, const lambda::node * lambda) static void inlining(rvsdg::Graph & rvsdg) { - for (auto node : rvsdg::topdown_traverser(rvsdg.root())) + for (auto node : rvsdg::topdown_traverser(&rvsdg.GetRootRegion())) { if (auto lambda = dynamic_cast(node)) { diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index 6e319c2ab..e75d0cb7a 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -28,16 +28,16 @@ class ivtstat final : public util::Statistics void start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } void end(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); GetTimer(Label::Timer).stop(); } @@ -306,7 +306,7 @@ invert(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) auto statistics = ivtstat::Create(rm.SourceFileName()); statistics->start(rm.Rvsdg()); - invert(rm.Rvsdg().root()); + invert(&rm.Rvsdg().GetRootRegion()); statistics->end(rm.Rvsdg()); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/llvm/opt/pull.cpp b/jlm/llvm/opt/pull.cpp index edff19aa8..cb33512a8 100644 --- a/jlm/llvm/opt/pull.cpp +++ b/jlm/llvm/opt/pull.cpp @@ -26,14 +26,14 @@ class pullstat final : public util::Statistics void start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } void end(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); GetTimer(Label::Timer).stop(); } @@ -310,7 +310,7 @@ pull(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) auto statistics = pullstat::Create(rm.SourceFileName()); statistics->start(rm.Rvsdg()); - pull(rm.Rvsdg().root()); + pull(&rm.Rvsdg().GetRootRegion()); statistics->end(rm.Rvsdg()); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index 74436f853..aedb52ac8 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -29,14 +29,14 @@ class pushstat final : public util::Statistics void start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgInputsBefore, jlm::rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgInputsBefore, jlm::rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } void end(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgInputsAfter, jlm::rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgInputsAfter, jlm::rvsdg::ninputs(&graph.GetRootRegion())); GetTimer(Label::Timer).stop(); } @@ -419,7 +419,7 @@ push(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) auto statistics = pushstat::Create(rm.SourceFileName()); statistics->start(rm.Rvsdg()); - push(rm.Rvsdg().root()); + push(&rm.Rvsdg().GetRootRegion()); statistics->end(rm.Rvsdg()); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index d2336c6bb..0b7d621c4 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -25,16 +25,16 @@ class redstat final : public util::Statistics void start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } void end(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); GetTimer(Label::Timer).stop(); } @@ -115,7 +115,7 @@ reduce(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) enable_unary_reductions(graph); enable_binary_reductions(graph); - graph.normalize(); + graph.Normalize(); statistics->end(graph); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index 197c34f60..f5a7b62c9 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -26,14 +26,14 @@ class unrollstat final : public util::Statistics void start(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); AddTimer(Label::Timer).start(); } void end(const rvsdg::Graph & graph) noexcept { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(graph.root())); + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); GetTimer(Label::Timer).stop(); } @@ -470,7 +470,7 @@ unroll(rvsdg::ThetaNode * otheta, size_t factor) if (!ui) return; - auto nf = otheta->graph()->node_normal_form(typeid(rvsdg::Operation)); + auto nf = otheta->graph()->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); if (ui->is_known() && ui->niterations()) @@ -521,7 +521,7 @@ loopunroll::run(RvsdgModule & module, util::StatisticsCollector & statisticsColl auto statistics = unrollstat::Create(module.SourceFileName()); statistics->start(module.Rvsdg()); - unroll(graph.root(), factor_); + unroll(&graph.GetRootRegion(), factor_); statistics->end(module.Rvsdg()); statisticsCollector.CollectDemandedStatistics(std::move(statistics)); diff --git a/jlm/mlir/backend/JlmToMlirConverter.cpp b/jlm/mlir/backend/JlmToMlirConverter.cpp index c15163ed5..0ee788f7e 100644 --- a/jlm/mlir/backend/JlmToMlirConverter.cpp +++ b/jlm/mlir/backend/JlmToMlirConverter.cpp @@ -52,7 +52,8 @@ JlmToMlirConverter::ConvertOmega(const rvsdg::Graph & graph) auto omega = Builder_->create<::mlir::rvsdg::OmegaNode>(Builder_->getUnknownLoc()); auto & omegaBlock = omega.getRegion().emplaceBlock(); - ::llvm::SmallVector<::mlir::Value> regionResults = ConvertRegion(*graph.root(), omegaBlock); + ::llvm::SmallVector<::mlir::Value> regionResults = + ConvertRegion(graph.GetRootRegion(), omegaBlock); auto omegaResult = Builder_->create<::mlir::rvsdg::OmegaResult>(Builder_->getUnknownLoc(), regionResults); diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index 63bc82e80..a7005e0f2 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -36,7 +36,7 @@ std::unique_ptr MlirToJlmConverter::ConvertMlir(std::unique_ptr<::mlir::Block> & block) { auto rvsdgModule = llvm::RvsdgModule::Create(util::filepath(""), std::string(), std::string()); - ConvertBlock(*block, *rvsdgModule->Rvsdg().root()); + ConvertBlock(*block, rvsdgModule->Rvsdg().GetRootRegion()); return rvsdgModule; } diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index db35d1360..a576715e5 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -210,7 +210,7 @@ binary_normal_form::set_reducible(bool enable) enable_reducible_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -225,7 +225,7 @@ binary_normal_form::set_flatten(bool enable) enable_flatten_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -240,7 +240,7 @@ binary_normal_form::set_reorder(bool enable) enable_reorder_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -255,7 +255,7 @@ binary_normal_form::set_distribute(bool enable) enable_distribute_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -270,7 +270,7 @@ binary_normal_form::set_factorize(bool enable) enable_factorize_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } /* flattened binary normal form */ @@ -290,7 +290,7 @@ flattened_binary_normal_form::normalize_node(Node * node) const { const auto & op = static_cast(node->GetOperation()); const auto & bin_op = op.bin_operation(); - auto nf = graph()->node_normal_form(typeid(bin_op)); + auto nf = graph()->GetNodeNormalForm(typeid(bin_op)); return static_cast(nf)->normalize_node(node, bin_op); } @@ -304,7 +304,7 @@ flattened_binary_normal_form::normalized_create( const auto & op = static_cast(base_op); const auto & bin_op = op.bin_operation(); - auto nf = static_cast(graph()->node_normal_form(typeid(bin_op))); + auto nf = static_cast(graph()->GetNodeNormalForm(typeid(bin_op))); return nf->normalized_create(region, bin_op, arguments); } diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index 07f0fbe74..c75f29668 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -165,7 +165,7 @@ class binary_op : public SimpleOperation normal_form(Graph * graph) noexcept { return static_cast( - graph->node_normal_form(typeid(binary_op))); + graph->GetNodeNormalForm(typeid(binary_op))); } }; @@ -250,7 +250,7 @@ class flattened_binary_op final : public SimpleOperation normal_form(Graph * graph) noexcept { return static_cast( - graph->node_normal_form(typeid(flattened_binary_op))); + graph->GetNodeNormalForm(typeid(flattened_binary_op))); } jlm::rvsdg::output * @@ -264,7 +264,7 @@ class flattened_binary_op final : public SimpleOperation static inline void reduce(Graph * graph, const flattened_binary_op::reduction & reduction) { - reduce(graph->root(), reduction); + reduce(&graph->GetRootRegion(), reduction); } private: diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 6b0555f4d..4d534d022 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -189,7 +189,7 @@ class concat_normal_form final : public simple_normal_form enable_reducible_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } inline bool @@ -210,7 +210,7 @@ class concat_normal_form final : public simple_normal_form enable_flatten_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } inline bool diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 6554b020c..933d02893 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -210,7 +210,7 @@ gamma_normal_form::set_predicate_reduction(bool enable) enable_predicate_reduction_ = enable; if (enable && get_mutable()) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -226,7 +226,7 @@ gamma_normal_form::set_invariant_reduction(bool enable) enable_invariant_reduction_ = enable; if (enable && get_mutable()) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } void @@ -239,7 +239,7 @@ gamma_normal_form::set_control_constant_reduction(bool enable) enable_control_constant_reduction_ = enable; if (enable && get_mutable()) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } bool diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 63adc809c..5bb452388 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -99,7 +99,7 @@ class GammaOperation final : public StructuralOperation normal_form(Graph * graph) noexcept { return static_cast( - graph->node_normal_form(typeid(GammaOperation))); + graph->GetNodeNormalForm(typeid(GammaOperation))); } private: diff --git a/jlm/rvsdg/graph.cpp b/jlm/rvsdg/graph.cpp index c7286cf1f..921a9e61f 100644 --- a/jlm/rvsdg/graph.cpp +++ b/jlm/rvsdg/graph.cpp @@ -4,62 +4,59 @@ * See COPYING for terms of redistribution. */ -#include - #include #include +#include #include +#include namespace jlm::rvsdg { GraphImport::GraphImport(Graph & graph, std::shared_ptr type, std::string name) - : RegionArgument(graph.root(), nullptr, std::move(type)), + : RegionArgument(&graph.GetRootRegion(), nullptr, std::move(type)), Name_(std::move(name)) {} GraphExport::GraphExport(rvsdg::output & origin, std::string name) - : RegionResult(origin.region()->graph()->root(), &origin, nullptr, origin.Type()), + : RegionResult(&origin.region()->graph()->GetRootRegion(), &origin, nullptr, origin.Type()), Name_(std::move(name)) {} Graph::~Graph() { JLM_ASSERT(!has_active_trackers(this)); - - delete root_; } Graph::Graph() - : normalized_(false), - root_(new rvsdg::Region(nullptr, this)) + : Normalized_(false), + RootRegion_(new Region(nullptr, this)) {} std::unique_ptr -Graph::copy() const +Graph::Copy() const { - SubstitutionMap smap; - std::unique_ptr graph(new jlm::rvsdg::Graph()); - root()->copy(graph->root(), smap, true, true); + SubstitutionMap substitutionMap; + auto graph = std::make_unique(); + GetRootRegion().copy(&graph->GetRootRegion(), substitutionMap, true, true); return graph; } -jlm::rvsdg::node_normal_form * -Graph::node_normal_form(const std::type_info & type) noexcept +node_normal_form * +Graph::GetNodeNormalForm(const std::type_info & type) noexcept { - auto i = node_normal_forms_.find(std::type_index(type)); - if (i != node_normal_forms_.end()) + auto i = NodeNormalForms_.find(std::type_index(type)); + if (i != NodeNormalForms_.end()) return i.ptr(); const auto cinfo = dynamic_cast(&type); - auto parent_normal_form = cinfo ? node_normal_form(*cinfo->__base_type) : nullptr; + auto parent_normal_form = cinfo ? GetNodeNormalForm(*cinfo->__base_type) : nullptr; - std::unique_ptr nf( - jlm::rvsdg::node_normal_form::create(type, parent_normal_form, this)); + std::unique_ptr nf(node_normal_form::create(type, parent_normal_form, this)); - jlm::rvsdg::node_normal_form * result = nf.get(); - node_normal_forms_.insert(std::move(nf)); + node_normal_form * result = nf.get(); + NodeNormalForms_.insert(std::move(nf)); return result; } @@ -76,7 +73,7 @@ Graph::ExtractTailNodes(const Graph & rvsdg) return false; } - if (rvsdg::input::GetNode(*input)) + if (input::GetNode(*input)) { return false; } @@ -87,7 +84,7 @@ Graph::ExtractTailNodes(const Graph & rvsdg) return std::all_of(output.begin(), output.end(), IsRootRegionExport); }; - auto & rootRegion = *rvsdg.root(); + auto & rootRegion = rvsdg.GetRootRegion(); std::vector nodes; for (auto & bottomNode : rootRegion.BottomNodes()) @@ -100,7 +97,7 @@ Graph::ExtractTailNodes(const Graph & rvsdg) auto output = rootRegion.result(n)->origin(); if (IsOnlyExported(*output)) { - nodes.push_back(rvsdg::output::GetNode(*output)); + nodes.push_back(output::GetNode(*output)); } } diff --git a/jlm/rvsdg/graph.hpp b/jlm/rvsdg/graph.hpp index 0cd8378d8..0addab5e3 100644 --- a/jlm/rvsdg/graph.hpp +++ b/jlm/rvsdg/graph.hpp @@ -10,10 +10,6 @@ #include #include #include -#include -#include - -#include namespace jlm::rvsdg { @@ -56,42 +52,56 @@ class GraphExport : public RegionResult std::string Name_; }; -class Graph +/** + * Represents a Regionalized Value State Dependence Graph (RVSDG) + */ +class Graph final { public: ~Graph(); Graph(); - [[nodiscard]] rvsdg::Region * - root() const noexcept + /** + * @return The root region of the graph. + */ + [[nodiscard]] Region & + GetRootRegion() const noexcept { - return root_; + return *RootRegion_; } - inline void - mark_denormalized() noexcept + void + MarkDenormalized() noexcept { - normalized_ = false; + Normalized_ = false; } - inline void - normalize() + void + Normalize() { - root()->normalize(true); - normalized_ = true; + GetRootRegion().normalize(true); + Normalized_ = true; } + /** + * @return A copy of the RVSDG. + */ [[nodiscard]] std::unique_ptr - copy() const; + Copy() const; - jlm::rvsdg::node_normal_form * - node_normal_form(const std::type_info & type) noexcept; + node_normal_form * + GetNodeNormalForm(const std::type_info & type) noexcept; - inline void - prune() + /** + * Remove all dead nodes in the graph. + * + * @see Node::IsDead() + */ + void + PruneNodes() { - root()->prune(true); + GetRootRegion().prune(true); } /** @@ -107,9 +117,9 @@ class Graph ExtractTailNodes(const Graph & rvsdg); private: - bool normalized_; - rvsdg::Region * root_; - jlm::rvsdg::node_normal_form_hash node_normal_forms_; + bool Normalized_; + std::unique_ptr RootRegion_; + node_normal_form_hash NodeNormalForms_{}; }; } diff --git a/jlm/rvsdg/node-normal-form.cpp b/jlm/rvsdg/node-normal-form.cpp index 3864db7d4..fe94174b3 100644 --- a/jlm/rvsdg/node-normal-form.cpp +++ b/jlm/rvsdg/node-normal-form.cpp @@ -33,7 +33,7 @@ node_normal_form::set_mutable(bool enable) enable_mutable_ = enable; if (enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } namespace diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 46e4b2a7c..05e43c70a 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -65,7 +65,7 @@ input::divert_to(jlm::rvsdg::output * new_origin) if (is(*this)) static_cast(this)->node()->recompute_depth(); - region()->graph()->mark_denormalized(); + region()->graph()->MarkDenormalized(); on_input_change(this, old_origin, new_origin); } @@ -357,7 +357,7 @@ bool normalize(Node * node) { const auto & op = node->GetOperation(); - auto nf = node->graph()->node_normal_form(typeid(op)); + auto nf = node->graph()->GetNodeNormalForm(typeid(op)); return nf->normalize_node(node); } diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index 957b1fc81..e22da1ee2 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -16,7 +16,7 @@ Operation::~Operation() noexcept = default; jlm::rvsdg::node_normal_form * Operation::normal_form(Graph * graph) noexcept { - return graph->node_normal_form(typeid(Operation)); + return graph->GetNodeNormalForm(typeid(Operation)); } SimpleOperation::~SimpleOperation() noexcept = default; @@ -50,7 +50,7 @@ SimpleOperation::result(size_t index) const noexcept jlm::rvsdg::simple_normal_form * SimpleOperation::normal_form(Graph * graph) noexcept { - return static_cast(graph->node_normal_form(typeid(SimpleOperation))); + return static_cast(graph->GetNodeNormalForm(typeid(SimpleOperation))); } bool @@ -63,7 +63,7 @@ jlm::rvsdg::structural_normal_form * StructuralOperation::normal_form(Graph * graph) noexcept { return static_cast( - graph->node_normal_form(typeid(StructuralOperation))); + graph->GetNodeNormalForm(typeid(StructuralOperation))); } } diff --git a/jlm/rvsdg/region.cpp b/jlm/rvsdg/region.cpp index 815949db3..150675e79 100644 --- a/jlm/rvsdg/region.cpp +++ b/jlm/rvsdg/region.cpp @@ -368,14 +368,14 @@ Region::normalize(bool recursive) } const auto & op = node->GetOperation(); - graph()->node_normal_form(typeid(op))->normalize_node(node); + graph()->GetNodeNormalForm(typeid(op))->normalize_node(node); } } bool Region::IsRootRegion() const noexcept { - return this->graph()->root() == this; + return &this->graph()->GetRootRegion() == this; } size_t diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 633b655b6..457d91689 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -77,7 +77,7 @@ Node * SimpleNode::copy(rvsdg::Region * region, const std::vector & operands) const { auto node = create(region, GetOperation(), operands); - graph()->mark_denormalized(); + graph()->MarkDenormalized(); return node; } diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index eb20c0c6d..606e3e1c2 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -60,7 +60,7 @@ class SimpleNode : public Node const SimpleOperation & op, const std::vector & operands) { - auto nf = static_cast(region->graph()->node_normal_form(typeid(op))); + auto nf = static_cast(region->graph()->GetNodeNormalForm(typeid(op))); return nf->normalized_create(region, op, operands); } }; diff --git a/jlm/rvsdg/simple-normal-form.cpp b/jlm/rvsdg/simple-normal-form.cpp index d2eeee55b..0f79c8378 100644 --- a/jlm/rvsdg/simple-normal-form.cpp +++ b/jlm/rvsdg/simple-normal-form.cpp @@ -104,7 +104,7 @@ simple_normal_form::set_cse(bool enable) children_set(enable); if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } } diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp new file mode 100644 index 000000000..e69de29bb diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index 7cc13f345..d9001654c 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -114,7 +114,7 @@ ThetaNode::add_loopvar(jlm::rvsdg::output * origin) ThetaNode * ThetaNode::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { - auto nf = graph()->node_normal_form(typeid(Operation)); + auto nf = graph()->GetNodeNormalForm(typeid(Operation)); nf->set_mutable(false); rvsdg::SubstitutionMap rmap; diff --git a/jlm/rvsdg/tracker.cpp b/jlm/rvsdg/tracker.cpp index aaed8ec17..cef52a284 100644 --- a/jlm/rvsdg/tracker.cpp +++ b/jlm/rvsdg/tracker.cpp @@ -6,6 +6,7 @@ #include #include +#include using namespace std::placeholders; diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index 45f5eb7d6..dd1bbf6a3 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -87,7 +87,7 @@ unary_normal_form::set_reducible(bool enable) enable_reducible_ = enable; if (get_mutable() && enable) - graph()->mark_denormalized(); + graph()->MarkDenormalized(); } /* unary operator */ diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index f26e61dd3..43be3af9d 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -74,7 +74,7 @@ class unary_op : public SimpleOperation static jlm::rvsdg::unary_normal_form * normal_form(Graph * graph) noexcept { - return static_cast(graph->node_normal_form(typeid(unary_op))); + return static_cast(graph->GetNodeNormalForm(typeid(unary_op))); } }; diff --git a/jlm/rvsdg/view.hpp b/jlm/rvsdg/view.hpp index a59a8de5a..ade63b2d7 100644 --- a/jlm/rvsdg/view.hpp +++ b/jlm/rvsdg/view.hpp @@ -53,7 +53,7 @@ view(const rvsdg::Region * region, FILE * out); inline void view(const Graph & graph, FILE * out) { - return view(graph.root(), out); + return view(&graph.GetRootRegion(), out); } std::string diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index e72f92d1a..060d1ef90 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -485,7 +485,7 @@ JlmOptCommand::PrintAsAscii( const util::filepath & outputFile, util::StatisticsCollector &) { - auto ascii = rvsdg::view(rvsdgModule.Rvsdg().root()); + auto ascii = view(&rvsdgModule.Rvsdg().GetRootRegion()); if (outputFile == "") { @@ -507,7 +507,7 @@ JlmOptCommand::PrintAsXml( { auto fd = outputFile == "" ? stdout : fopen(outputFile.to_str().c_str(), "w"); - jlm::rvsdg::view_xml(rvsdgModule.Rvsdg().root(), fd); + view_xml(&rvsdgModule.Rvsdg().GetRootRegion(), fd); if (fd != stdout) fclose(fd); @@ -559,7 +559,7 @@ JlmOptCommand::PrintAsRvsdgTree( const util::filepath & outputFile, util::StatisticsCollector &) { - auto & rootRegion = *rvsdgModule.Rvsdg().root(); + auto & rootRegion = rvsdgModule.Rvsdg().GetRootRegion(); auto tree = rvsdg::Region::ToTree(rootRegion); if (outputFile == "") @@ -581,7 +581,7 @@ JlmOptCommand::PrintAsDot( const util::filepath & outputFile, util::StatisticsCollector &) { - auto & rootRegion = *rvsdgModule.Rvsdg().root(); + auto & rootRegion = rvsdgModule.Rvsdg().GetRootRegion(); util::GraphWriter writer; jlm::llvm::dot::WriteGraphs(writer, rootRegion, true); diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index e04e547fa..7a94a119d 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -19,10 +19,10 @@ StoreTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto csize = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 4); @@ -73,10 +73,10 @@ StoreTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto csize = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 4); @@ -135,10 +135,10 @@ LoadTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath("LoadTest1.c"), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto ld1 = LoadNonVolatileNode::Create( fct->GetFunctionArguments()[0], @@ -173,10 +173,10 @@ LoadTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto csize = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 4); @@ -241,10 +241,11 @@ LoadFromUndefTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - Lambda_ = lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); + Lambda_ = + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "f", linkage::external_linkage); auto undefValue = UndefValueOperation::Create(*Lambda_->subregion(), pointerType); auto loadResults = LoadNonVolatileNode::Create( @@ -272,7 +273,7 @@ GetElementPtrTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto & declaration = module->AddStructTypeDeclaration(StructType::Declaration::Create( @@ -285,7 +286,7 @@ GetElementPtrTest::SetupRvsdg() { PointerType::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), MemoryStateType::Create() }); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto zero = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 0); auto one = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 1); @@ -336,10 +337,10 @@ BitCastTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto cast = bitcast_op::create(fct->GetFunctionArguments()[0], pointerType); @@ -364,7 +365,7 @@ Bits2PtrTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto setupBit2PtrFunction = [&]() @@ -376,8 +377,11 @@ Bits2PtrTest::SetupRvsdg() { jlm::rvsdg::bittype::Create(64), iostatetype::Create(), MemoryStateType::Create() }, { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "bit2ptr", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "bit2ptr", + linkage::external_linkage); auto valueArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -397,8 +401,11 @@ Bits2PtrTest::SetupRvsdg() { jlm::rvsdg::bittype::Create(64), iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto valueArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -444,10 +451,10 @@ ConstantPointerNullTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto constantPointerNullResult = ConstantPointerNullOperation::Create(fct->subregion(), pointerType); @@ -478,7 +485,7 @@ CallTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupF = [&]() @@ -493,7 +500,8 @@ CallTest1::SetupRvsdg() MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "f", linkage::external_linkage); auto pointerArgument1 = lambda->GetFunctionArguments()[0]; auto pointerArgument2 = lambda->GetFunctionArguments()[1]; auto iOStateArgument = lambda->GetFunctionArguments()[2]; @@ -529,7 +537,8 @@ CallTest1::SetupRvsdg() MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "g", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "g", linkage::external_linkage); auto pointerArgument1 = lambda->GetFunctionArguments()[0]; auto pointerArgument2 = lambda->GetFunctionArguments()[1]; auto iOStateArgument = lambda->GetFunctionArguments()[2]; @@ -561,7 +570,8 @@ CallTest1::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "h", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "h", linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -634,7 +644,7 @@ CallTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupCreate = [&]() @@ -646,8 +656,11 @@ CallTest2::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }, { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "create", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "create", + linkage::external_linkage); auto valueArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -675,8 +688,11 @@ CallTest2::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "destroy", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "destroy", + linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -698,8 +714,11 @@ CallTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -772,13 +791,16 @@ IndirectCallTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupConstantFunction = [&](ssize_t n, const std::string & name) { - auto lambda = - lambda::node::create(graph->root(), constantFunctionType, name, linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + constantFunctionType, + name, + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -795,8 +817,11 @@ IndirectCallTest1::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "indcall", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "indcall", + linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -818,8 +843,11 @@ IndirectCallTest1::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -881,13 +909,13 @@ IndirectCallTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupG1 = [&]() { auto delta = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "g1", linkage::external_linkage, @@ -902,7 +930,7 @@ IndirectCallTest2::SetupRvsdg() auto SetupG2 = [&]() { auto delta = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "g2", linkage::external_linkage, @@ -916,8 +944,11 @@ IndirectCallTest2::SetupRvsdg() auto SetupConstantFunction = [&](ssize_t n, const std::string & name) { - auto lambda = - lambda::node::create(graph->root(), constantFunctionType, name, linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + constantFunctionType, + name, + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -934,7 +965,8 @@ IndirectCallTest2::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "i", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "i", linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -960,8 +992,11 @@ IndirectCallTest2::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, name, linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + name, + linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -992,8 +1027,11 @@ IndirectCallTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1052,8 +1090,11 @@ IndirectCallTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test2", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test2", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1125,7 +1166,7 @@ ExternalCallTest1::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -1155,7 +1196,8 @@ ExternalCallTest1::SetupRvsdg() MemoryStateType::Create() }, { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::external_linkage); auto pathArgument = lambda->GetFunctionArguments()[0]; auto modeArgument = lambda->GetFunctionArguments()[1]; auto iOStateArgument = lambda->GetFunctionArguments()[2]; @@ -1207,7 +1249,7 @@ ExternalCallTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -1246,7 +1288,8 @@ ExternalCallTest2::SetupRvsdg() ExternalFArgument_ = &GraphImport::Create(rvsdg, pointerType, "f", linkage::external_linkage); // Setup function g() - LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); + LambdaG_ = + lambda::node::create(&rvsdg.GetRootRegion(), lambdaGType, "g", linkage::external_linkage); auto iOStateArgument = LambdaG_->GetFunctionArguments()[0]; auto memoryStateArgument = LambdaG_->GetFunctionArguments()[1]; auto llvmLifetimeStartArgument = LambdaG_->AddContextVar(*llvmLifetimeStart).inner; @@ -1324,10 +1367,10 @@ GammaTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto zero = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 0); auto biteq = jlm::rvsdg::biteq_op::create(32, fct->GetFunctionArguments()[0], zero); @@ -1372,7 +1415,7 @@ GammaTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupLambdaF = [&]() @@ -1431,7 +1474,8 @@ GammaTest2::SetupRvsdg() MemoryStateType::Create() }, { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::external_linkage); auto cArgument = lambda->GetFunctionArguments()[0]; auto xArgument = lambda->GetFunctionArguments()[1]; auto yArgument = lambda->GetFunctionArguments()[2]; @@ -1485,8 +1529,11 @@ GammaTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(rvsdg->root(), functionType, functionName, linkage::external_linkage); + auto lambda = lambda::node::create( + &rvsdg->GetRootRegion(), + functionType, + functionName, + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto lambdaFArgument = lambda->AddContextVar(lambdaF).inner; @@ -1566,10 +1613,10 @@ ThetaTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto fct = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + auto fct = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto zero = jlm::rvsdg::create_bitconstant(fct->subregion(), 32, 0); @@ -1618,13 +1665,13 @@ DeltaTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupGlobalF = [&]() { auto dfNode = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "f", linkage::external_linkage, @@ -1645,7 +1692,8 @@ DeltaTest1::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "g", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "g", linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -1667,7 +1715,8 @@ DeltaTest1::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "h", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "h", linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1713,13 +1762,13 @@ DeltaTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupD1 = [&]() { auto delta = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "d1", linkage::external_linkage, @@ -1734,7 +1783,7 @@ DeltaTest2::SetupRvsdg() auto SetupD2 = [&]() { auto delta = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "d2", linkage::external_linkage, @@ -1754,8 +1803,11 @@ DeltaTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "f1", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "f1", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1774,8 +1826,11 @@ DeltaTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "f2", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "f2", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1823,13 +1878,13 @@ DeltaTest3::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupG1 = [&]() { auto delta = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "g1", linkage::external_linkage, @@ -1845,8 +1900,13 @@ DeltaTest3::SetupRvsdg() { auto pointerType = PointerType::Create(); - auto delta = - delta::node::Create(graph->root(), pointerType, "g2", linkage::external_linkage, "", false); + auto delta = delta::node::Create( + &graph->GetRootRegion(), + pointerType, + "g2", + linkage::external_linkage, + "", + false); auto g1Argument = delta->add_ctxvar(&g1); @@ -1861,7 +1921,8 @@ DeltaTest3::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(16), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "f", linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto g1CtxVar = lambda->AddContextVar(g1).inner; @@ -1887,8 +1948,11 @@ DeltaTest3::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1932,7 +1996,7 @@ ImportTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupF1 = [&](jlm::rvsdg::output * d1) @@ -1943,8 +2007,11 @@ ImportTest::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "f1", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "f1", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -1964,8 +2031,11 @@ ImportTest::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "f2", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "f2", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -2021,7 +2091,7 @@ PhiTest1::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pbit64 = PointerType::Create(); @@ -2039,7 +2109,7 @@ PhiTest1::SetupRvsdg() auto pt = PointerType::Create(); jlm::llvm::phi::builder pb; - pb.begin(graph->root()); + pb.begin(&graph->GetRootRegion()); auto fibrv = pb.add_recvar(pt); auto lambda = @@ -2136,8 +2206,11 @@ PhiTest1::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto fibcv = lambda->AddContextVar(*phiNode->output(0)).inner; @@ -2208,13 +2281,13 @@ PhiTest2::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupEight = [&]() { auto lambda = lambda::node::create( - graph->root(), + &graph->GetRootRegion(), constantFunctionType, "eight", linkage::external_linkage); @@ -2228,8 +2301,11 @@ PhiTest2::SetupRvsdg() auto SetupI = [&]() { - auto lambda = - lambda::node::create(graph->root(), functionIType, "i", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionIType, + "i", + linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -2399,7 +2475,7 @@ PhiTest2::SetupRvsdg() auto SetupPhi = [&](rvsdg::output & lambdaEight, rvsdg::output & lambdaI) { jlm::llvm::phi::builder phiBuilder; - phiBuilder.begin(graph->root()); + phiBuilder.begin(&graph->GetRootRegion()); auto lambdaARv = phiBuilder.add_recvar(pointerType); auto lambdaBRv = phiBuilder.add_recvar(pointerType); auto lambdaCRv = phiBuilder.add_recvar(pointerType); @@ -2451,8 +2527,11 @@ PhiTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -2536,7 +2615,7 @@ PhiWithDeltaTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -2546,7 +2625,7 @@ PhiWithDeltaTest::SetupRvsdg() auto arrayType = arraytype::Create(structType, 2); jlm::llvm::phi::builder pb; - pb.begin(rvsdg.root()); + pb.begin(&rvsdg.GetRootRegion()); auto myArrayRecVar = pb.add_recvar(pointerType); auto delta = delta::node::Create( @@ -2587,13 +2666,13 @@ ExternalMemoryTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); /** * Setup function f. */ - LambdaF = lambda::node::create(graph->root(), ft, "f", linkage::external_linkage); + LambdaF = lambda::node::create(&graph->GetRootRegion(), ft, "f", linkage::external_linkage); auto x = LambdaF->GetFunctionArguments()[0]; auto y = LambdaF->GetFunctionArguments()[1]; auto state = LambdaF->GetFunctionArguments()[2]; @@ -2618,13 +2697,13 @@ EscapedMemoryTest1::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto SetupDeltaA = [&]() { auto deltaNode = delta::node::Create( - rvsdg->root(), + &rvsdg->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "a", linkage::external_linkage, @@ -2639,7 +2718,7 @@ EscapedMemoryTest1::SetupRvsdg() auto SetupDeltaB = [&]() { auto deltaNode = delta::node::Create( - rvsdg->root(), + &rvsdg->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "b", linkage::external_linkage, @@ -2655,8 +2734,13 @@ EscapedMemoryTest1::SetupRvsdg() { auto pointerType = PointerType::Create(); - auto deltaNode = - delta::node::Create(rvsdg->root(), pointerType, "x", linkage::external_linkage, "", false); + auto deltaNode = delta::node::Create( + &rvsdg->GetRootRegion(), + pointerType, + "x", + linkage::external_linkage, + "", + false); auto contextVariableA = deltaNode->add_ctxvar(&deltaA); @@ -2667,8 +2751,13 @@ EscapedMemoryTest1::SetupRvsdg() { auto pointerType = PointerType::Create(); - auto deltaNode = - delta::node::Create(rvsdg->root(), pointerType, "y", linkage::external_linkage, "", false); + auto deltaNode = delta::node::Create( + &rvsdg->GetRootRegion(), + pointerType, + "y", + linkage::external_linkage, + "", + false); auto contextVariableX = deltaNode->add_ctxvar(&deltaX); @@ -2687,8 +2776,11 @@ EscapedMemoryTest1::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(rvsdg->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &rvsdg->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto pointerArgument = lambda->GetFunctionArguments()[0]; auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -2746,7 +2838,7 @@ EscapedMemoryTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -2789,7 +2881,7 @@ EscapedMemoryTest2::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdg->root(), + &rvsdg->GetRootRegion(), functionType, "ReturnAddress", linkage::external_linkage); @@ -2818,7 +2910,7 @@ EscapedMemoryTest2::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdg->root(), + &rvsdg->GetRootRegion(), functionType, "CallExternalFunction1", linkage::external_linkage); @@ -2854,7 +2946,7 @@ EscapedMemoryTest2::SetupRvsdg() { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdg->root(), + &rvsdg->GetRootRegion(), functionType, "CallExternalFunction2", linkage::external_linkage); @@ -2923,7 +3015,7 @@ EscapedMemoryTest3::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -2945,7 +3037,7 @@ EscapedMemoryTest3::SetupRvsdg() auto SetupGlobal = [&]() { auto delta = delta::node::Create( - rvsdg->root(), + &rvsdg->GetRootRegion(), jlm::rvsdg::bittype::Create(32), "global", linkage::external_linkage, @@ -2969,8 +3061,11 @@ EscapedMemoryTest3::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(rvsdg->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &rvsdg->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -3021,7 +3116,7 @@ MemcpyTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto arrayType = arraytype::Create(jlm::rvsdg::bittype::Create(32), 5); @@ -3029,7 +3124,7 @@ MemcpyTest::SetupRvsdg() auto SetupLocalArray = [&]() { auto delta = delta::node::Create( - rvsdg->root(), + &rvsdg->GetRootRegion(), arrayType, "localArray", linkage::external_linkage, @@ -3054,7 +3149,7 @@ MemcpyTest::SetupRvsdg() auto SetupGlobalArray = [&]() { auto delta = delta::node::Create( - rvsdg->root(), + &rvsdg->GetRootRegion(), arrayType, "globalArray", linkage::external_linkage, @@ -3078,7 +3173,8 @@ MemcpyTest::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -3115,7 +3211,8 @@ MemcpyTest::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg->root(), functionType, "g", linkage::external_linkage); + auto lambda = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "g", linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -3172,7 +3269,7 @@ MemcpyTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3192,7 +3289,8 @@ MemcpyTest2::SetupRvsdg() MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg->root(), functionType, "g", linkage::internal_linkage); + auto lambda = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "g", linkage::internal_linkage); auto s1Argument = lambda->GetFunctionArguments()[0]; auto s2Argument = lambda->GetFunctionArguments()[1]; auto iOStateArgument = lambda->GetFunctionArguments()[2]; @@ -3227,7 +3325,8 @@ MemcpyTest2::SetupRvsdg() MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::external_linkage); auto s1Argument = lambda->GetFunctionArguments()[0]; auto s2Argument = lambda->GetFunctionArguments()[1]; auto iOStateArgument = lambda->GetFunctionArguments()[2]; @@ -3274,7 +3373,7 @@ MemcpyTest3::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3288,7 +3387,8 @@ MemcpyTest3::SetupRvsdg() { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - Lambda_ = lambda::node::create(rvsdg->root(), functionType, "f", linkage::internal_linkage); + Lambda_ = + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::internal_linkage); auto pArgument = Lambda_->GetFunctionArguments()[0]; auto iOStateArgument = Lambda_->GetFunctionArguments()[1]; auto memoryStateArgument = Lambda_->GetFunctionArguments()[2]; @@ -3332,7 +3432,7 @@ LinkedListTest::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3343,7 +3443,7 @@ LinkedListTest::SetupRvsdg() auto SetupDeltaMyList = [&]() { auto delta = delta::node::Create( - rvsdg.root(), + &rvsdg.GetRootRegion(), pointerType, "MyList", linkage::external_linkage, @@ -3367,8 +3467,11 @@ LinkedListTest::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { PointerType::Create(), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(rvsdg.root(), functionType, "next", linkage::external_linkage); + auto lambda = lambda::node::create( + &rvsdg.GetRootRegion(), + functionType, + "next", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -3423,7 +3526,7 @@ AllMemoryNodesTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); // Create imported symbol "imported" @@ -3435,7 +3538,7 @@ AllMemoryNodesTest::SetupRvsdg() // Create global variable "global" Delta_ = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), pointerType, "global", linkage::external_linkage, @@ -3446,7 +3549,7 @@ AllMemoryNodesTest::SetupRvsdg() Delta_->finalize(constantPointerNullResult); // Start of function "f" - Lambda_ = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + Lambda_ = lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto entryMemoryState = Lambda_->GetFunctionArguments()[0]; auto deltaContextVar = Lambda_->AddContextVar(*Delta_->output()).inner; auto importContextVar = Lambda_->AddContextVar(*Import_).inner; @@ -3520,10 +3623,11 @@ NAllocaNodesTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - Function_ = lambda::node::create(graph->root(), fcttype, "f", linkage::external_linkage); + Function_ = + lambda::node::create(&graph->GetRootRegion(), fcttype, "f", linkage::external_linkage); auto allocaSize = jlm::rvsdg::create_bitconstant(Function_->subregion(), 32, 1); @@ -3565,10 +3669,10 @@ EscapingLocalFunctionTest::SetupRvsdg() auto module = RvsdgModule::Create(util::filepath(""), "", ""); const auto graph = &module->Rvsdg(); - graph->node_normal_form(typeid(rvsdg::Operation))->set_mutable(false); + graph->GetNodeNormalForm(typeid(rvsdg::Operation))->set_mutable(false); Global_ = delta::node::Create( - graph->root(), + &graph->GetRootRegion(), uint32Type, "global", linkage::internal_linkage, @@ -3578,7 +3682,7 @@ EscapingLocalFunctionTest::SetupRvsdg() const auto deltaOutput = Global_->finalize(constantZero); LocalFunc_ = lambda::node::create( - graph->root(), + &graph->GetRootRegion(), localFuncType, "localFunction", linkage::internal_linkage); @@ -3606,7 +3710,7 @@ EscapingLocalFunctionTest::SetupRvsdg() LocalFuncRegister_ = LocalFunc_->output(); ExportedFunc_ = lambda::node::create( - graph->root(), + &graph->GetRootRegion(), exportedFuncType, "exportedFunc", linkage::external_linkage); @@ -3633,11 +3737,14 @@ FreeNullTest::SetupRvsdg() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - LambdaMain_ = - lambda::node::create(graph->root(), functionType, "main", linkage::external_linkage); + LambdaMain_ = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "main", + linkage::external_linkage); auto iOStateArgument = LambdaMain_->GetFunctionArguments()[0]; auto memoryStateArgument = LambdaMain_->GetFunctionArguments()[1]; @@ -3661,7 +3768,7 @@ LambdaCallArgumentMismatch::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - rvsdg.node_normal_form(typeid(rvsdg::Operation))->set_mutable(false); + rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation))->set_mutable(false); auto setupLambdaG = [&]() { @@ -3671,7 +3778,8 @@ LambdaCallArgumentMismatch::SetupRvsdg() { iostatetype::Create(), MemoryStateType::Create() }, { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(rvsdg.root(), functionType, "g", linkage::internal_linkage); + auto lambda = + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "g", linkage::internal_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -3696,8 +3804,11 @@ LambdaCallArgumentMismatch::SetupRvsdg() MemoryStateType::Create() }, { rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(rvsdg.root(), functionTypeMain, "main", linkage::external_linkage); + auto lambda = lambda::node::create( + &rvsdg.GetRootRegion(), + functionTypeMain, + "main", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; auto lambdaGArgument = lambda->AddContextVar(lambdaG).inner; @@ -3744,7 +3855,7 @@ VariadicFunctionTest1::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - rvsdg.node_normal_form(typeid(rvsdg::Operation))->set_mutable(false); + rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation))->set_mutable(false); auto pointerType = PointerType::Create(); auto iOStateType = iostatetype::Create(); @@ -3768,7 +3879,8 @@ VariadicFunctionTest1::SetupRvsdg() // Setup f() { - LambdaF_ = lambda::node::create(rvsdg.root(), lambdaFType, "f", linkage::internal_linkage); + LambdaF_ = + lambda::node::create(&rvsdg.GetRootRegion(), lambdaFType, "f", linkage::internal_linkage); auto iArgument = LambdaF_->GetFunctionArguments()[0]; auto iOStateArgument = LambdaF_->GetFunctionArguments()[1]; auto memoryStateArgument = LambdaF_->GetFunctionArguments()[2]; @@ -3795,7 +3907,8 @@ VariadicFunctionTest1::SetupRvsdg() // Setup g() { - LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); + LambdaG_ = + lambda::node::create(&rvsdg.GetRootRegion(), lambdaGType, "g", linkage::external_linkage); auto iOStateArgument = LambdaG_->GetFunctionArguments()[0]; auto memoryStateArgument = LambdaG_->GetFunctionArguments()[1]; auto lambdaFArgument = LambdaG_->AddContextVar(*LambdaF_->output()).inner; @@ -3829,7 +3942,7 @@ VariadicFunctionTest2::SetupRvsdg() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto & rvsdg = rvsdgModule->Rvsdg(); - auto nf = rvsdg.node_normal_form(typeid(rvsdg::Operation)); + auto nf = rvsdg.GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -3879,8 +3992,11 @@ VariadicFunctionTest2::SetupRvsdg() // Setup function fst() { - LambdaFst_ = - lambda::node::create(rvsdg.root(), lambdaFstType, "fst", linkage::internal_linkage); + LambdaFst_ = lambda::node::create( + &rvsdg.GetRootRegion(), + lambdaFstType, + "fst", + linkage::internal_linkage); auto iOStateArgument = LambdaFst_->GetFunctionArguments()[2]; auto memoryStateArgument = LambdaFst_->GetFunctionArguments()[3]; auto llvmLifetimeStartArgument = LambdaFst_->AddContextVar(*llvmLifetimeStart).inner; @@ -3999,7 +4115,8 @@ VariadicFunctionTest2::SetupRvsdg() // Setup function g() { - LambdaG_ = lambda::node::create(rvsdg.root(), lambdaGType, "g", linkage::external_linkage); + LambdaG_ = + lambda::node::create(&rvsdg.GetRootRegion(), lambdaGType, "g", linkage::external_linkage); auto iOStateArgument = LambdaG_->GetFunctionArguments()[0]; auto memoryStateArgument = LambdaG_->GetFunctionArguments()[1]; auto lambdaFstArgument = LambdaG_->AddContextVar(*LambdaFst_->output()).inner; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp index 94cbbc141..67cdcb9f8 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp @@ -24,7 +24,7 @@ TestDeadLoopNode() auto & rvsdg = rvsdgModule.Rvsdg(); auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "f", jlm::llvm::linkage::external_linkage); @@ -55,7 +55,7 @@ TestDeadLoopNodeOutput() auto & rvsdg = rvsdgModule.Rvsdg(); auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "f", jlm::llvm::linkage::external_linkage); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index 37a4b3ad9..24bc435bb 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -22,7 +22,7 @@ TestTraceArgument() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -35,7 +35,7 @@ TestTraceArgument() { MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); @@ -82,7 +82,7 @@ TestLoad() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -92,7 +92,7 @@ TestLoad() { jlm::rvsdg::bittype::Create(32), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); @@ -115,7 +115,7 @@ TestLoad() jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); // Memory Converter replaces the lambda so we start from the root of the graph - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); assert(region->nnodes() == 1); lambda = jlm::util::AssertedCast(region->Nodes().begin().ptr()); @@ -164,7 +164,7 @@ TestLoadStore() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -176,7 +176,7 @@ TestLoadStore() { MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); @@ -201,7 +201,7 @@ TestLoadStore() jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); // Memory Converter replaces the lambda so we start from the root of the graph - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); assert(region->nnodes() == 1); lambda = jlm::util::AssertedCast(region->Nodes().begin().ptr()); @@ -251,7 +251,7 @@ TestThetaLoad() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -265,7 +265,7 @@ TestThetaLoad() { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); @@ -341,7 +341,7 @@ TestThetaLoad() jlm::rvsdg::view(rvsdgModule->Rvsdg(), stdout); // Memory Converter replaces the lambda so we start from the root of the graph - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); assert(region->nnodes() == 1); lambda = jlm::util::AssertedCast(region->Nodes().begin().ptr()); lambdaRegion = lambda->subregion(); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp index c6f78f45f..6a522ea7d 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp @@ -21,7 +21,7 @@ TestSingleLoad() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -31,7 +31,7 @@ TestSingleLoad() { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); @@ -97,7 +97,7 @@ TestLoadStore() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -109,7 +109,7 @@ TestLoadStore() { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); @@ -182,7 +182,7 @@ TestAddrQueue() using namespace jlm::hls; auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); - auto nf = rvsdgModule->Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule->Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); // Setup the function @@ -192,7 +192,7 @@ TestAddrQueue() { jlm::llvm::PointerType::Create(), MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "test", linkage::external_linkage); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 285647ca8..6c53a3f76 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -22,10 +22,11 @@ TestFork() auto ft = FunctionType::Create({ b32, b32, b32 }, { b32, b32, b32 }); RvsdgModule rm(util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::Operation)); + auto nf = rm.Rvsdg().GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rm.Rvsdg().GetRootRegion(), ft, "f", linkage::external_linkage); rvsdg::bitult_op ult(32); rvsdg::bitadd_op add(32); @@ -56,7 +57,7 @@ TestFork() // Assert { - auto omegaRegion = rm.Rvsdg().root(); + auto omegaRegion = &rm.Rvsdg().GetRootRegion(); assert(omegaRegion->nnodes() == 1); auto lambda = util::AssertedCast(omegaRegion->Nodes().begin().ptr()); assert(is(lambda)); @@ -90,10 +91,11 @@ TestConstantFork() auto ft = FunctionType::Create({ b32 }, { b32 }); RvsdgModule rm(util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::Operation)); + auto nf = rm.Rvsdg().GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rm.Rvsdg().GetRootRegion(), ft, "f", linkage::external_linkage); auto lambdaRegion = lambda->subregion(); rvsdg::bitult_op ult(32); @@ -122,7 +124,7 @@ TestConstantFork() // Assert { - auto omegaRegion = rm.Rvsdg().root(); + auto omegaRegion = &rm.Rvsdg().GetRootRegion(); assert(omegaRegion->nnodes() == 1); auto lambda = util::AssertedCast(omegaRegion->Nodes().begin().ptr()); assert(is(lambda)); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp index 993b99433..d0bf1a765 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestGamma.cpp @@ -21,12 +21,13 @@ TestWithMatch() auto ft = FunctionType::Create({ jlm::rvsdg::bittype::Create(1), vt, vt }, { vt }); RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rm.Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); /* Setup graph */ - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rm.Rvsdg().GetRootRegion(), ft, "f", linkage::external_linkage); auto match = jlm::rvsdg::match(1, { { 0, 0 } }, 1, 2, lambda->GetFunctionArguments()[0]); auto gamma = jlm::rvsdg::GammaNode::create(match, 2); @@ -58,12 +59,13 @@ TestWithoutMatch() auto ft = FunctionType::Create({ jlm::rvsdg::ControlType::Create(2), vt, vt }, { vt }); RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rm.Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); /* Setup graph */ - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rm.Rvsdg().GetRootRegion(), ft, "f", linkage::external_linkage); auto gamma = jlm::rvsdg::GammaNode::create(lambda->GetFunctionArguments()[0], 2); auto ev1 = gamma->AddEntryVar(lambda->GetFunctionArguments()[1]); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index 18575ede2..1b1191645 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -21,10 +21,11 @@ TestUnknownBoundaries() auto ft = FunctionType::Create({ b32, b32, b32 }, { b32, b32, b32 }); RvsdgModule rm(jlm::util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rm.Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto lambda = lambda::node::create(rm.Rvsdg().root(), ft, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&rm.Rvsdg().GetRootRegion(), ft, "f", linkage::external_linkage); jlm::rvsdg::bitult_op ult(32); jlm::rvsdg::bitsgt_op sgt(32); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 7fa36f34e..8ca61d7ba 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -88,7 +88,7 @@ TestTheta() auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); - auto thetaNode = jlm::rvsdg::ThetaNode::create(rvsdg.root()); + auto thetaNode = jlm::rvsdg::ThetaNode::create(&rvsdg.GetRootRegion()); auto thetaOutput0 = thetaNode->add_loopvar(p); auto thetaOutput1 = thetaNode->add_loopvar(x); @@ -100,7 +100,7 @@ TestTheta() thetaNode->set_predicate(thetaOutput0->argument()); auto result = jlm::tests::SimpleNode::Create( - *rvsdg.root(), + rvsdg.GetRootRegion(), { thetaOutput0, thetaOutput1, thetaOutput2, thetaOutput3 }, { valueType }) .output(0); @@ -136,7 +136,7 @@ TestLambda() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "f", linkage::external_linkage); auto argument0 = lambdaNode->GetFunctionArguments()[0]; auto argument1 = lambdaNode->GetFunctionArguments()[1]; auto argument2 = lambdaNode->AddContextVar(*x).inner; @@ -157,8 +157,8 @@ TestLambda() jlm::hls::RemoveUnusedStates(*rvsdgModule); // Assert - assert(rvsdg.root()->nnodes() == 1); - auto & newLambdaNode = dynamic_cast(*rvsdg.root()->Nodes().begin()); + assert(rvsdg.GetRootRegion().nnodes() == 1); + auto & newLambdaNode = dynamic_cast(*rvsdg.GetRootRegion().Nodes().begin()); assert(newLambdaNode.ninputs() == 2); assert(newLambdaNode.subregion()->narguments() == 3); assert(newLambdaNode.subregion()->nresults() == 2); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp index 36122faca..d3b2011f4 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp @@ -39,13 +39,13 @@ test() { rvsdg::bittype::Create(8) }); jlm::llvm::RvsdgModule rm(util::filepath(""), "", ""); - auto nf = rm.Rvsdg().node_normal_form(typeid(rvsdg::Operation)); + auto nf = rm.Rvsdg().GetNodeNormalForm(typeid(rvsdg::Operation)); nf->set_mutable(false); /* setup graph */ auto lambda = jlm::llvm::lambda::node::create( - rm.Rvsdg().root(), + &rm.Rvsdg().GetRootRegion(), ft, "f", jlm::llvm::linkage::external_linkage); diff --git a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp index 4d8badbd9..b931412aa 100644 --- a/tests/jlm/llvm/backend/dot/DotWriterTests.cpp +++ b/tests/jlm/llvm/backend/dot/DotWriterTests.cpp @@ -23,13 +23,15 @@ TestWriteGraphs() // Act GraphWriter writer; - dot::WriteGraphs(writer, *gammaTest.graph().root(), false); + dot::WriteGraphs(writer, gammaTest.graph().GetRootRegion(), false); writer.OutputAllGraphs(std::cout, GraphOutputFormat::Dot); // Assert auto & rootGraph = writer.GetGraph(0); - assert(rootGraph.GetProgramObject() == reinterpret_cast(gammaTest.graph().root())); + assert( + rootGraph.GetProgramObject() + == reinterpret_cast(&gammaTest.graph().GetRootRegion())); assert(rootGraph.NumNodes() == 1); // Only the lambda node for "f" assert(rootGraph.NumResultNodes() == 1); // Exporting the function "f" auto & lambdaNode = *AssertedCast(&rootGraph.GetNode(0)); @@ -85,7 +87,7 @@ TestTypeGraph() // Act GraphWriter writer; - dot::WriteGraphs(writer, *gammaTest.graph().root(), true); + dot::WriteGraphs(writer, gammaTest.graph().GetRootRegion(), true); writer.Finalize(); writer.OutputAllGraphs(std::cout, GraphOutputFormat::Dot); diff --git a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp index 0bb4425dd..90b37e5ce 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/GammaTests.cpp @@ -29,11 +29,11 @@ GammaWithMatch() FunctionType::Create({ jlm::rvsdg::bittype::Create(1), valueType, valueType }, { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); - auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule.Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambdaNode = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "lambdaOutput", linkage::external_linkage); @@ -84,11 +84,11 @@ GammaWithoutMatch() { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); - auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule.Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambdaNode = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "lambdaOutput", linkage::external_linkage); @@ -139,11 +139,11 @@ EmptyGammaWithThreeSubregions() { valueType }); RvsdgModule rvsdgModule(filepath(""), "", ""); - auto nf = rvsdgModule.Rvsdg().node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdgModule.Rvsdg().GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto lambdaNode = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "lambdaOutput", linkage::external_linkage); @@ -197,7 +197,7 @@ PartialEmptyGamma() RvsdgModule rvsdgModule(filepath(""), "", ""); auto lambdaNode = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "lambdaOutput", linkage::external_linkage); diff --git a/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp b/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp index 63fc9f232..f0641a76e 100644 --- a/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp +++ b/tests/jlm/llvm/backend/llvm/r2j/test-recursive-data.cpp @@ -30,7 +30,7 @@ test() auto imp = &GraphImport::Create(rm.Rvsdg(), vt, "", linkage::external_linkage); phi::builder pb; - pb.begin(rm.Rvsdg().root()); + pb.begin(&rm.Rvsdg().GetRootRegion()); auto region = pb.subregion(); auto r1 = pb.add_recvar(pt); auto r2 = pb.add_recvar(pt); diff --git a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp index 14fbf1e05..4839235b7 100644 --- a/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp +++ b/tests/jlm/llvm/frontend/llvm/ThreeAddressCodeConversionTests.cpp @@ -93,10 +93,10 @@ LoadVolatileConversion() // Act jlm::util::StatisticsCollector statisticsCollector; auto rvsdgModule = ConvertInterProceduralGraphModule(*ipgModule, statisticsCollector); - std::cout << jlm::rvsdg::view(rvsdgModule->Rvsdg().root()) << std::flush; + std::cout << jlm::rvsdg::view(&rvsdgModule->Rvsdg().GetRootRegion()) << std::flush; // Assert - auto lambdaOutput = rvsdgModule->Rvsdg().root()->result(0)->origin(); + auto lambdaOutput = rvsdgModule->Rvsdg().GetRootRegion().result(0)->origin(); auto lambda = dynamic_cast(jlm::rvsdg::output::GetNode(*lambdaOutput)); auto loadVolatileNode = lambda->subregion()->Nodes().begin().ptr(); @@ -122,10 +122,10 @@ StoreVolatileConversion() // Act jlm::util::StatisticsCollector statisticsCollector; auto rvsdgModule = ConvertInterProceduralGraphModule(*ipgModule, statisticsCollector); - std::cout << jlm::rvsdg::view(rvsdgModule->Rvsdg().root()) << std::flush; + std::cout << jlm::rvsdg::view(&rvsdgModule->Rvsdg().GetRootRegion()) << std::flush; // Assert - auto lambdaOutput = rvsdgModule->Rvsdg().root()->result(0)->origin(); + auto lambdaOutput = rvsdgModule->Rvsdg().GetRootRegion().result(0)->origin(); auto lambda = dynamic_cast(jlm::rvsdg::output::GetNode(*lambdaOutput)); auto storeVolatileNode = lambda->subregion()->Nodes().begin().ptr(); diff --git a/tests/jlm/llvm/frontend/llvm/test-export.cpp b/tests/jlm/llvm/frontend/llvm/test-export.cpp index 13c610946..92c0c50d8 100644 --- a/tests/jlm/llvm/frontend/llvm/test-export.cpp +++ b/tests/jlm/llvm/frontend/llvm/test-export.cpp @@ -39,7 +39,7 @@ test() node should be converted to RVSDG imports as they do not have a body, i.e., either a CFG or a initialization. */ - assert(rvsdgModule->Rvsdg().root()->nresults() == 0); + assert(rvsdgModule->Rvsdg().GetRootRegion().nresults() == 0); return 0; } diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index b1265be2e..06ef6e7d4 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -68,7 +68,7 @@ TestCopy() // Act auto node = jlm::rvsdg::output::GetNode(*loadResults[0]); auto loadNode = jlm::util::AssertedCast(node); - auto copiedNode = loadNode->copy(graph.root(), { address2, memoryState2 }); + auto copiedNode = loadNode->copy(&graph.GetRootRegion(), { address2, memoryState2 }); // Assert auto copiedLoadNode = dynamic_cast(copiedNode); @@ -104,13 +104,13 @@ TestLoadAllocaReduction() auto & ex = GraphExport::Create(*loadNode.output(0), "l"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act jlm::rvsdg::ReduceNode(NormalizeLoadAlloca, loadNode); - graph.prune(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert auto node = jlm::rvsdg::output::GetNode(*ex.origin()); @@ -151,13 +151,13 @@ TestLoadMuxReduction() auto & ex = GraphExport::Create(*loadNode.output(0), "l"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeLoadMux, loadNode); - graph.prune(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert assert(success); @@ -205,13 +205,13 @@ TestDuplicateStateReduction() auto & exS4 = GraphExport::Create(*loadNode.output(4), "exS4"); auto & exS5 = GraphExport::Create(*loadNode.output(5), "exS5"); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeLoadDuplicateState, loadNode); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Assert assert(success); @@ -260,16 +260,16 @@ TestLoadStoreStateReduction() auto & ex1 = GraphExport::Create(*loadNode1.output(0), "l1"); auto & ex2 = GraphExport::Create(*loadNode2.output(0), "l2"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto success1 = jlm::rvsdg::ReduceNode(NormalizeLoadStoreState, loadNode1); auto success2 = jlm::rvsdg::ReduceNode(NormalizeLoadStoreState, loadNode2); - graph.prune(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert assert(success1); @@ -314,17 +314,17 @@ TestLoadStoreReduction() auto & x1 = GraphExport::Create(*loadNode.output(0), "value"); auto & x2 = GraphExport::Create(*loadNode.output(1), "state"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeLoadStore, loadNode); - graph.normalize(); + graph.Normalize(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert assert(success); - assert(graph.root()->nnodes() == 1); + assert(graph.GetRootRegion().nnodes() == 1); assert(x1.origin() == v); assert(x2.origin() == s1); @@ -367,17 +367,17 @@ TestLoadLoadReduction() auto & x2 = GraphExport::Create(*loadNode.output(2), "s"); auto & x3 = GraphExport::Create(*loadNode.output(3), "s"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeLoadLoadState, loadNode); - graph.prune(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert assert(success); - assert(graph.root()->nnodes() == 6); + assert(graph.GetRootRegion().nnodes() == 6); auto ld = jlm::rvsdg::output::GetNode(*x1.origin()); assert(is(ld)); @@ -504,7 +504,7 @@ NodeCopy() LoadVolatileNode::CreateNode(address1, iOState1, { &memoryState1 }, valueType, 4); // Act - auto copiedNode = loadNode.copy(graph.root(), { &address2, &iOState2, &memoryState2 }); + auto copiedNode = loadNode.copy(&graph.GetRootRegion(), { &address2, &iOState2, &memoryState2 }); // Assert auto copiedLoadNode = dynamic_cast(copiedNode); diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index ed75cd70a..4d23e525f 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -156,7 +156,8 @@ StoreVolatileNodeCopy() StoreVolatileNode::CreateNode(address1, value1, ioState1, { &memoryState1 }, 4); // Act - auto copiedNode = storeNode.copy(graph.root(), { &address2, &value2, &ioState2, &memoryState2 }); + auto copiedNode = + storeNode.copy(&graph.GetRootRegion(), { &address2, &value2, &ioState2, &memoryState2 }); // Assert auto copiedStoreNode = dynamic_cast(copiedNode); @@ -196,7 +197,7 @@ TestCopy() // Act auto node = jlm::rvsdg::output::GetNode(*storeResults[0]); auto storeNode = jlm::util::AssertedCast(node); - auto copiedNode = storeNode->copy(graph.root(), { address2, value2, memoryState2 }); + auto copiedNode = storeNode->copy(&graph.GetRootRegion(), { address2, value2, memoryState2 }); // Assert auto copiedStoreNode = dynamic_cast(copiedNode); @@ -234,13 +235,13 @@ TestStoreMuxNormalization() auto & ex = GraphExport::Create(*storeNode.output(0), "s"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeStoreMux, storeNode); - graph.prune(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert assert(success); @@ -290,14 +291,14 @@ TestDuplicateStateReduction() auto & exS4 = GraphExport::Create(*storeNode.output(3), "exS4"); auto & exS5 = GraphExport::Create(*storeNode.output(4), "exS5"); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeStoreDuplicateState, storeNode); - graph.prune(); + graph.PruneNodes(); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Assert assert(success); @@ -348,23 +349,23 @@ TestStoreAllocaReduction() GraphExport::Create(*storeNode2.output(1), "s2"); GraphExport::Create(*storeNode2.output(2), "s3"); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Act auto success1 = jlm::rvsdg::ReduceNode(NormalizeStoreAlloca, storeNode1); auto success2 = jlm::rvsdg::ReduceNode(NormalizeStoreAlloca, storeNode2); - graph.prune(); + graph.PruneNodes(); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Assert assert(success1 && success2); bool has_add_import = false; - for (size_t n = 0; n < graph.root()->nresults(); n++) + for (size_t n = 0; n < graph.GetRootRegion().nresults(); n++) { - if (graph.root()->result(n)->origin() == s) + if (graph.GetRootRegion().result(n)->origin() == s) has_add_import = true; } assert(has_add_import); @@ -397,17 +398,17 @@ TestStoreStoreReduction() auto & ex = GraphExport::Create(*storeNode2.output(0), "state"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto success = jlm::rvsdg::ReduceNode(NormalizeStoreStore, storeNode2); - graph.prune(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert assert(success); - assert(graph.root()->nnodes() == 1); + assert(graph.GetRootRegion().nnodes() == 1); assert(jlm::rvsdg::output::GetNode(*ex.origin())->input(1)->origin() == v2); return 0; diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 6b1fa3a1a..8308196e0 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -42,7 +42,8 @@ TestCopy() // Act auto node = jlm::rvsdg::output::GetNode(*callResults[0]); auto callNode = jlm::util::AssertedCast(node); - auto copiedNode = callNode->copy(rvsdg.root(), { function2, value2, iOState2, memoryState2 }); + auto copiedNode = + callNode->copy(&rvsdg.GetRootRegion(), { function2, value2, iOState2, memoryState2 }); // Assert auto copiedCallNode = dynamic_cast(copiedNode); @@ -112,12 +113,13 @@ TestCallTypeClassifierIndirectCall() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto SetupFunction = [&]() { - auto lambda = lambda::node::create(graph->root(), fcttype2, "fct", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), fcttype2, "fct", linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[1]; auto memoryStateArgument = lambda->GetFunctionArguments()[2]; @@ -164,7 +166,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto vt = jlm::tests::valuetype::Create(); @@ -177,8 +179,11 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto SetupFunctionG = [&]() { - auto lambda = - lambda::node::create(graph->root(), functionTypeG, "g", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionTypeG, + "g", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -219,7 +224,8 @@ TestCallTypeClassifierNonRecursiveDirectCall() { iostatetype::Create(), MemoryStateType::Create() }, { vt, iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "f", linkage::external_linkage); auto functionGArgument = lambda->AddContextVar(*g).inner; auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -241,7 +247,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() GraphExport::Create(*f->output(), "f"); - // jlm::rvsdg::view(graph->root(), stdout); + // jlm::rvsdg::view(&graph->GetRootRegion(), stdout); // Act auto callTypeClassifier = CallNode::ClassifyCall(*callNode); @@ -260,7 +266,7 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto vt = jlm::tests::valuetype::Create(); @@ -273,8 +279,11 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto SetupFunctionG = [&]() { - auto lambda = - lambda::node::create(graph->root(), functionTypeG, "g", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionTypeG, + "g", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -332,7 +341,8 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() { iostatetype::Create(), MemoryStateType::Create() }, { vt, iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph->root(), functionType, "f", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph->GetRootRegion(), functionType, "f", linkage::external_linkage); auto functionG = lambda->AddContextVar(*g).inner; auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -355,7 +365,7 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() auto [f, callNode] = SetupFunctionF(g); GraphExport::Create(*f, "f"); - jlm::rvsdg::view(graph->root(), stdout); + jlm::rvsdg::view(&graph->GetRootRegion(), stdout); // Act auto callTypeClassifier = CallNode::ClassifyCall(*callNode); @@ -374,7 +384,7 @@ TestCallTypeClassifierRecursiveDirectCall() auto module = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &module->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto SetupFib = [&]() @@ -391,7 +401,7 @@ TestCallTypeClassifierRecursiveDirectCall() auto pt = PointerType::Create(); jlm::llvm::phi::builder pb; - pb.begin(graph->root()); + pb.begin(&graph->GetRootRegion()); auto fibrv = pb.add_recvar(pt); auto lambda = diff --git a/tests/jlm/llvm/ir/operators/TestLambda.cpp b/tests/jlm/llvm/ir/operators/TestLambda.cpp index 6a2195bc0..ed1370840 100644 --- a/tests/jlm/llvm/ir/operators/TestLambda.cpp +++ b/tests/jlm/llvm/ir/operators/TestLambda.cpp @@ -23,7 +23,7 @@ TestArgumentIterators() auto functionType = FunctionType::Create({ vt }, { vt }); auto lambda = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "f", linkage::external_linkage); @@ -41,7 +41,7 @@ TestArgumentIterators() auto functionType = FunctionType::Create({}, { vt }); auto lambda = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "f", linkage::external_linkage); @@ -59,7 +59,7 @@ TestArgumentIterators() auto functionType = FunctionType::Create({ vt, vt, vt }, { vt, vt }); auto lambda = lambda::node::create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), functionType, "f", linkage::external_linkage); @@ -91,8 +91,8 @@ TestInvalidOperandRegion() auto rvsdg = &rvsdgModule->Rvsdg(); auto lambdaNode = - lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); - auto result = jlm::tests::create_testop(rvsdg->root(), {}, { vt })[0]; + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::external_linkage); + auto result = jlm::tests::create_testop(&rvsdg->GetRootRegion(), {}, { vt })[0]; bool invalidRegionErrorCaught = false; try @@ -125,7 +125,7 @@ TestRemoveLambdaInputsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "f", linkage::external_linkage); auto lambdaBinder0 = lambdaNode->AddContextVar(*x); auto lambdaBinder1 = lambdaNode->AddContextVar(*x); @@ -194,7 +194,7 @@ TestPruneLambdaInputs() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "f", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "f", linkage::external_linkage); lambdaNode->AddContextVar(*x); auto lambdaInput1 = lambdaNode->AddContextVar(*x); @@ -234,7 +234,7 @@ TestCallSummaryComputationDead() auto & rvsdg = rvsdgModule->Rvsdg(); auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "f", jlm::llvm::linkage::external_linkage); @@ -268,7 +268,7 @@ TestCallSummaryComputationExport() auto & rvsdg = rvsdgModule->Rvsdg(); auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "f", jlm::llvm::linkage::external_linkage); @@ -307,7 +307,7 @@ TestCallSummaryComputationDirectCalls() auto SetupLambdaX = [&]() { auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "x", jlm::llvm::linkage::external_linkage); @@ -322,7 +322,7 @@ TestCallSummaryComputationDirectCalls() auto SetupLambdaY = [&](rvsdg::output & lambdaX) { auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "y", jlm::llvm::linkage::external_linkage); @@ -344,7 +344,7 @@ TestCallSummaryComputationDirectCalls() auto SetupLambdaZ = [&](rvsdg::output & lambdaX, rvsdg::output & lambdaY) { auto lambdaNode = jlm::llvm::lambda::node::create( - rvsdg.root(), + &rvsdg.GetRootRegion(), functionType, "y", jlm::llvm::linkage::external_linkage); @@ -455,18 +455,18 @@ TestCallSummaryComputationFunctionPointerInDelta() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto rvsdg = &rvsdgModule->Rvsdg(); - auto nf = rvsdg->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdg->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto valueType = jlm::tests::valuetype::Create(); auto functionType = FunctionType::Create({ valueType }, { valueType }); auto lambdaNode = - lambda::node::create(rvsdg->root(), functionType, "f", linkage::external_linkage); + lambda::node::create(&rvsdg->GetRootRegion(), functionType, "f", linkage::external_linkage); lambdaNode->finalize({ lambdaNode->GetFunctionArguments()[0] }); auto deltaNode = delta::node::Create( - rvsdg->root(), + &rvsdg->GetRootRegion(), PointerType::Create(), "fp", linkage::external_linkage, @@ -493,7 +493,7 @@ TestCallSummaryComputationLambdaResult() // Arrange jlm::rvsdg::Graph rvsdg; - auto nf = rvsdg.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = rvsdg.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto pointerType = PointerType::Create(); @@ -502,11 +502,11 @@ TestCallSummaryComputationLambdaResult() auto functionTypeF = FunctionType::Create({ valueType }, { PointerType::Create() }); auto lambdaNodeG = - lambda::node::create(rvsdg.root(), functionTypeG, "g", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionTypeG, "g", linkage::external_linkage); auto lambdaOutputG = lambdaNodeG->finalize({ lambdaNodeG->GetFunctionArguments()[0] }); auto lambdaNodeF = - lambda::node::create(rvsdg.root(), functionTypeF, "f", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionTypeF, "f", linkage::external_linkage); auto lambdaGArgument = lambdaNodeF->AddContextVar(*lambdaOutputG).inner; auto lambdaOutputF = lambdaNodeF->finalize({ lambdaGArgument }); diff --git a/tests/jlm/llvm/ir/operators/TestPhi.cpp b/tests/jlm/llvm/ir/operators/TestPhi.cpp index 5e2b7a194..087de0fd0 100644 --- a/tests/jlm/llvm/ir/operators/TestPhi.cpp +++ b/tests/jlm/llvm/ir/operators/TestPhi.cpp @@ -53,7 +53,7 @@ TestPhiCreation() }; phi::builder pb; - pb.begin(graph.root()); + pb.begin(&graph.GetRootRegion()); auto rv1 = pb.add_recvar(PointerType::Create()); auto rv2 = pb.add_recvar(PointerType::Create()); auto rv3 = pb.add_recvar(PointerType::Create()); @@ -69,10 +69,10 @@ TestPhiCreation() auto phi = pb.end(); GraphExport::Create(*phi->output(0), "dummy"); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stderr); + jlm::rvsdg::view(&graph.GetRootRegion(), stderr); } static void @@ -88,7 +88,7 @@ TestRemovePhiArgumentsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); phi::builder phiBuilder; - phiBuilder.begin(rvsdgModule.Rvsdg().root()); + phiBuilder.begin(&rvsdgModule.Rvsdg().GetRootRegion()); auto phiOutput0 = phiBuilder.add_recvar(valueType); auto phiOutput1 = phiBuilder.add_recvar(valueType); @@ -171,7 +171,7 @@ TestPrunePhiArguments() auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); phi::builder phiBuilder; - phiBuilder.begin(rvsdgModule.Rvsdg().root()); + phiBuilder.begin(&rvsdgModule.Rvsdg().GetRootRegion()); auto phiOutput0 = phiBuilder.add_recvar(valueType); auto phiOutput1 = phiBuilder.add_recvar(valueType); @@ -215,7 +215,7 @@ TestRemovePhiOutputsWhere() RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); phi::builder phiBuilder; - phiBuilder.begin(rvsdgModule.Rvsdg().root()); + phiBuilder.begin(&rvsdgModule.Rvsdg().GetRootRegion()); auto phiOutput0 = phiBuilder.add_recvar(valueType); auto phiOutput1 = phiBuilder.add_recvar(valueType); @@ -264,7 +264,7 @@ TestPrunePhiOutputs() RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); phi::builder phiBuilder; - phiBuilder.begin(rvsdgModule.Rvsdg().root()); + phiBuilder.begin(&rvsdgModule.Rvsdg().GetRootRegion()); auto phiOutput0 = phiBuilder.add_recvar(valueType); auto phiOutput1 = phiBuilder.add_recvar(valueType); diff --git a/tests/jlm/llvm/ir/operators/test-delta.cpp b/tests/jlm/llvm/ir/operators/test-delta.cpp index cea0ac86f..1fd6fd73e 100644 --- a/tests/jlm/llvm/ir/operators/test-delta.cpp +++ b/tests/jlm/llvm/ir/operators/test-delta.cpp @@ -25,7 +25,7 @@ TestDeltaCreation() auto imp = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); auto delta1 = delta::node::Create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), valueType, "test-delta1", linkage::external_linkage, @@ -36,7 +36,7 @@ TestDeltaCreation() delta1->finalize(jlm::tests::create_testop(delta1->subregion(), { dep }, { valueType })[0]); auto delta2 = delta::node::Create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), valueType, "test-delta2", linkage::internal_linkage, @@ -50,7 +50,7 @@ TestDeltaCreation() jlm::rvsdg::view(rvsdgModule.Rvsdg(), stdout); // Assert - assert(rvsdgModule.Rvsdg().root()->nnodes() == 2); + assert(rvsdgModule.Rvsdg().GetRootRegion().nnodes() == 2); assert(delta1->linkage() == linkage::external_linkage); assert(delta1->constant() == true); @@ -73,7 +73,7 @@ TestRemoveDeltaInputsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); auto deltaNode = delta::node::Create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), valueType, "delta", linkage::external_linkage, @@ -140,7 +140,7 @@ TestPruneDeltaInputs() auto x = &jlm::tests::GraphImport::Create(rvsdgModule.Rvsdg(), valueType, ""); auto deltaNode = delta::node::Create( - rvsdgModule.Rvsdg().root(), + &rvsdgModule.Rvsdg().GetRootRegion(), valueType, "delta", linkage::external_linkage, diff --git a/tests/jlm/llvm/ir/operators/test-sext.cpp b/tests/jlm/llvm/ir/operators/test-sext.cpp index a376c1a95..8138f732f 100644 --- a/tests/jlm/llvm/ir/operators/test-sext.cpp +++ b/tests/jlm/llvm/ir/operators/test-sext.cpp @@ -32,8 +32,8 @@ test_bitunary_reduction() // jlm::rvsdg::view(graph, stdout); nf->set_mutable(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); // jlm::rvsdg::view(graph, stdout); @@ -60,8 +60,8 @@ test_bitbinary_reduction() // jlm::rvsdg::view(graph, stdout); nf->set_mutable(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); // jlm::rvsdg::view(graph, stdout); @@ -89,8 +89,8 @@ test_inverse_reduction() jlm::rvsdg::view(graph, stdout); nf->set_mutable(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); jlm::rvsdg::view(graph, stdout); diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index 6a6ec2212..a58238d9f 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -44,7 +44,7 @@ TestGamma() auto & rvsdg = rvsdgModule->Rvsdg(); auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "test", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "test", linkage::external_linkage); auto c = lambdaNode->GetFunctionArguments()[0]; auto x = lambdaNode->GetFunctionArguments()[1]; @@ -97,7 +97,7 @@ TestTheta() auto & rvsdg = rvsdgModule->Rvsdg(); auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "test", linkage::external_linkage); + lambda::node::create(&rvsdg.GetRootRegion(), functionType, "test", linkage::external_linkage); auto c = lambdaNode->GetFunctionArguments()[0]; auto x = lambdaNode->GetFunctionArguments()[1]; @@ -153,8 +153,11 @@ TestCall() jlm::rvsdg::output * lambdaOutputTest1; { - auto lambdaNode = - lambda::node::create(rvsdg.root(), functionTypeTest1, "test1", linkage::external_linkage); + auto lambdaNode = lambda::node::create( + &rvsdg.GetRootRegion(), + functionTypeTest1, + "test1", + linkage::external_linkage); auto controlArgument = lambdaNode->GetFunctionArguments()[0]; auto xArgument = lambdaNode->GetFunctionArguments()[1]; @@ -188,8 +191,11 @@ TestCall() { valueType, valueType, ioStateType, memoryStateType }, { valueType, valueType, ioStateType, memoryStateType }); - auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "test2", linkage::external_linkage); + auto lambdaNode = lambda::node::create( + &rvsdg.GetRootRegion(), + functionType, + "test2", + linkage::external_linkage); auto xArgument = lambdaNode->GetFunctionArguments()[0]; auto yArgument = lambdaNode->GetFunctionArguments()[1]; auto ioStateArgument = lambdaNode->GetFunctionArguments()[2]; @@ -242,8 +248,11 @@ TestCallWithMemoryStateNodes() jlm::rvsdg::output * lambdaOutputTest1; { - auto lambdaNode = - lambda::node::create(rvsdg.root(), functionTypeTest1, "test1", linkage::external_linkage); + auto lambdaNode = lambda::node::create( + &rvsdg.GetRootRegion(), + functionTypeTest1, + "test1", + linkage::external_linkage); auto controlArgument = lambdaNode->GetFunctionArguments()[0]; auto xArgument = lambdaNode->GetFunctionArguments()[1]; @@ -277,8 +286,11 @@ TestCallWithMemoryStateNodes() { valueType, ioStateType, memoryStateType }, { valueType, ioStateType, memoryStateType }); - auto lambdaNode = - lambda::node::create(rvsdg.root(), functionType, "test2", linkage::external_linkage); + auto lambdaNode = lambda::node::create( + &rvsdg.GetRootRegion(), + functionType, + "test2", + linkage::external_linkage); auto xArgument = lambdaNode->GetFunctionArguments()[0]; auto ioStateArgument = lambdaNode->GetFunctionArguments()[1]; auto memoryStateArgument = lambdaNode->GetFunctionArguments()[2]; diff --git a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp index 83e386703..e3f2c6190 100644 --- a/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp +++ b/tests/jlm/llvm/opt/RvsdgTreePrinterTests.cpp @@ -35,7 +35,7 @@ PrintRvsdgTree() auto functionType = FunctionType::Create({ MemoryStateType::Create() }, { MemoryStateType::Create() }); auto lambda = lambda::node::create( - rvsdgModule->Rvsdg().root(), + &rvsdgModule->Rvsdg().GetRootRegion(), functionType, "f", linkage::external_linkage); @@ -73,7 +73,7 @@ PrintNumRvsdgNodesAnnotation() // Arrange std::string fileName = "PrintNumRvsdgNodesAnnotationTest"; auto rvsdgModule = RvsdgModule::Create({ fileName }, "", ""); - auto rootRegion = rvsdgModule->Rvsdg().root(); + auto rootRegion = &rvsdgModule->Rvsdg().GetRootRegion(); auto structuralNode = jlm::tests::structural_node::create(rootRegion, 2); jlm::tests::test_op::create(structuralNode->subregion(0), {}, {}); @@ -125,7 +125,7 @@ PrintNumMemoryStateInputsOutputsAnnotation() auto & x = jlm::tests::GraphImport::Create(rvsdg, memoryStateType, "x"); auto & y = jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 2); + auto structuralNode = jlm::tests::structural_node::create(&rvsdg.GetRootRegion(), 2); auto & ix = structuralNode->AddInputWithArguments(x); auto & iy = structuralNode->AddInputWithArguments(y); diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index a217bae1e..4a4be8c84 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -38,11 +38,11 @@ TestRoot() auto y = &jlm::tests::GraphImport::Create(graph, jlm::tests::valuetype::Create(), "y"); GraphExport::Create(*y, "z"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); RunDeadNodeElimination(rm); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - assert(graph.root()->narguments() == 1); + assert(graph.GetRootRegion().narguments() == 1); } static void @@ -73,15 +73,15 @@ TestGamma() GraphExport::Create(*gamma->output(0), "z"); GraphExport::Create(*gamma->output(2), "w"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); RunDeadNodeElimination(rm); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert(gamma->noutputs() == 2); assert(gamma->subregion(1)->nnodes() == 0); assert(gamma->subregion(1)->narguments() == 2); assert(gamma->ninputs() == 3); - assert(graph.root()->narguments() == 2); + assert(graph.GetRootRegion().narguments() == 2); } static void @@ -111,7 +111,7 @@ TestGamma2() RunDeadNodeElimination(rm); // jlm::rvsdg::view(graph, stdout); - assert(graph.root()->narguments() == 1); + assert(graph.GetRootRegion().narguments() == 1); } static void @@ -128,7 +128,7 @@ TestTheta() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lv1 = theta->add_loopvar(x); auto lv2 = theta->add_loopvar(y); @@ -148,13 +148,13 @@ TestTheta() GraphExport::Create(*theta->output(0), "a"); GraphExport::Create(*theta->output(3), "b"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); RunDeadNodeElimination(rm); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert(theta->noutputs() == 3); assert(theta->subregion()->nnodes() == 1); - assert(graph.root()->narguments() == 2); + assert(graph.GetRootRegion().narguments() == 2); } static void @@ -171,7 +171,7 @@ TestNestedTheta() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto otheta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto otheta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lvo1 = otheta->add_loopvar(c); auto lvo2 = otheta->add_loopvar(x); @@ -217,7 +217,7 @@ TestEvolvingTheta() auto x3 = &jlm::tests::GraphImport::Create(graph, vt, "x3"); auto x4 = &jlm::tests::GraphImport::Create(graph, vt, "x4"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lv0 = theta->add_loopvar(c); auto lv1 = theta->add_loopvar(x1); @@ -253,7 +253,7 @@ TestLambda() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto lambda = lambda::node::create( - graph.root(), + &graph.GetRootRegion(), FunctionType::Create({ vt }, { vt, vt }), "f", linkage::external_linkage); @@ -269,12 +269,12 @@ TestLambda() GraphExport::Create(*output, "f"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); RunDeadNodeElimination(rm); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert(lambda->subregion()->nnodes() == 0); - assert(graph.root()->narguments() == 1); + assert(graph.GetRootRegion().narguments() == 1); } static void @@ -345,7 +345,7 @@ TestPhi() }; phi::builder phiBuilder; - phiBuilder.begin(rvsdg.root()); + phiBuilder.begin(&rvsdg.GetRootRegion()); auto & phiSubregion = *phiBuilder.subregion(); auto rv1 = phiBuilder.add_recvar(PointerType::Create()); @@ -406,8 +406,13 @@ TestDelta() auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); auto z = &jlm::tests::GraphImport::Create(rvsdg, valueType, "z"); - auto deltaNode = - delta::node::Create(rvsdg.root(), valueType, "delta", linkage::external_linkage, "", false); + auto deltaNode = delta::node::Create( + &rvsdg.GetRootRegion(), + valueType, + "delta", + linkage::external_linkage, + "", + false); auto xArgument = deltaNode->add_ctxvar(x); deltaNode->add_ctxvar(y); diff --git a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp index 9aff8228f..575597032 100644 --- a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp @@ -39,15 +39,15 @@ TestSuccess() auto & ex1 = GraphExport::Create(*ld[0], "v"); auto & ex2 = GraphExport::Create(*ld[1], "s"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); // Act nf->set_mutable(true); nf->set_load_mux_reducible(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); // Assert auto load = jlm::rvsdg::output::GetNode(*ex1.origin()); @@ -93,15 +93,15 @@ TestWrongNumberOfOperands() auto & ex2 = GraphExport::Create(*ld[1], "s1"); auto & ex3 = GraphExport::Create(*ld[2], "s2"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act nf->set_mutable(true); nf->set_load_mux_reducible(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert @@ -133,15 +133,15 @@ TestLoadWithoutStates() auto & ex = GraphExport::Create(*loadResults[0], "v"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act nf->set_mutable(true); nf->set_load_mux_reducible(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert auto load = jlm::rvsdg::output::GetNode(*ex.origin()); diff --git a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp index 780f1458d..195adea5c 100644 --- a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp +++ b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp @@ -41,15 +41,15 @@ TestLoadStoreReductionWithDifferentValueOperandType() auto & exportedValue = GraphExport::Create(*loadResults[0], "v"); GraphExport::Create(*loadResults[1], "s"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act nf->set_mutable(true); nf->set_load_store_reducible(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert auto load = jlm::rvsdg::output::GetNode(*exportedValue.origin()); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider.cpp index 0fa8ec20f..9887c2eb8 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAgnosticMemoryNodeProvider.cpp @@ -43,7 +43,7 @@ TestStore1() }; jlm::tests::StoreTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -78,7 +78,7 @@ TestStore2() }; jlm::tests::StoreTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -113,7 +113,7 @@ TestLoad1() }; jlm::tests::LoadTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -148,7 +148,7 @@ TestLoad2() }; jlm::tests::LoadTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); @@ -182,7 +182,7 @@ TestLoadFromUndef() }; jlm::tests::LoadFromUndefTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -256,7 +256,7 @@ TestCall1() }; jlm::tests::CallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -342,7 +342,7 @@ TestCall2() }; jlm::tests::CallTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -433,7 +433,7 @@ TestIndirectCall() }; jlm::tests::IndirectCallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -472,7 +472,7 @@ TestGamma() }; jlm::tests::GammaTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -509,7 +509,7 @@ TestTheta() }; jlm::tests::ThetaTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -564,7 +564,7 @@ TestDelta1() }; jlm::tests::DeltaTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -619,7 +619,7 @@ TestDelta2() }; jlm::tests::DeltaTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -674,7 +674,7 @@ TestImports() }; jlm::tests::ImportTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*ptg); @@ -741,7 +741,7 @@ TestPhi1() }; jlm::tests::PhiTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -803,7 +803,7 @@ TestMemcpy() }; jlm::tests::MemcpyTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index b448b2d9d..4237dde43 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -63,7 +63,7 @@ TestStore1() const auto ptg = RunAndersen(test.module()); // std::unordered_map outputMap; - // std::cout << jlm::rvsdg::view(test.graph().root(), outputMap) << std::endl; + // std::cout << jlm::rvsdg::view(test.graph().GetRootRegion(), outputMap) << std::endl; // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*ptg, outputMap) << std::endl; assert(ptg->NumAllocaNodes() == 4); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index 41a48dd6b..f55d5e5a6 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -46,7 +46,7 @@ ValidateTest(std::function validateEncoding) Test test; auto & rvsdgModule = test.module(); - jlm::rvsdg::view(rvsdgModule.Rvsdg().root(), stdout); + jlm::rvsdg::view(&rvsdgModule.Rvsdg().GetRootRegion(), stdout); jlm::util::StatisticsCollector statisticsCollector; @@ -60,7 +60,7 @@ ValidateTest(std::function validateEncoding) jlm::llvm::aa::MemoryStateEncoder encoder; encoder.Encode(rvsdgModule, *provisioning, statisticsCollector); - jlm::rvsdg::view(rvsdgModule.Rvsdg().root(), stdout); + jlm::rvsdg::view(&rvsdgModule.Rvsdg().GetRootRegion(), stdout); validateEncoding(test); } diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp index 716a08fe1..cd33d127a 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointerObjectSet.cpp @@ -155,11 +155,11 @@ TestPointerObjectUnification() assert(set.GetUnificationRoot(dummy0) == root); assert(set.GetUnificationRoot(dummy1) == root); - // Exactly one of the PointerObjects is the root + // Exactly one of the PointerObjects is the GetRootRegion assert((root == dummy0) != (root == dummy1)); assert(set.IsUnificationRoot(root)); - // Trying to unify again gives the same root + // Trying to unify again gives the same GetRootRegion assert(set.UnifyPointerObjects(dummy0, dummy1) == root); auto notRoot = dummy0 + dummy1 - root; @@ -778,10 +778,11 @@ TestDrawSubsetGraph() // Assert assert(graph.NumNodes() == set.NumPointerObjects()); - // Check that the unified node that is not the root, contains the index of the root + // Check that the unified node that is not the GetRootRegion, contains the index of the + // GetRootRegion assert(StringContains(graph.GetNode(nonRoot).GetLabel(), "#" + std::to_string(root))); - // Check that the unification root's label indicates pointing to external + // Check that the unification GetRootRegion's label indicates pointing to external assert(StringContains(graph.GetNode(root).GetLabel(), "{+}")); // Check that allocaReg0 points to alloca0 diff --git a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp index 8a92dcffc..32bf2ecae 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestPointsToGraph.cpp @@ -22,7 +22,7 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis PointsToGraph_ = jlm::llvm::aa::PointsToGraph::Create(); AnalyzeImports(rvsdgModule.Rvsdg()); - AnalyzeRegion(*rvsdgModule.Rvsdg().root()); + AnalyzeRegion(rvsdgModule.Rvsdg().GetRootRegion()); return std::move(PointsToGraph_); } @@ -89,7 +89,7 @@ class TestAnalysis final : public jlm::llvm::aa::AliasAnalysis { using namespace jlm::llvm; - auto & rootRegion = *rvsdg.root(); + auto & rootRegion = rvsdg.GetRootRegion(); for (size_t n = 0; n < rootRegion.narguments(); n++) { auto & graphImport = *jlm::util::AssertedCast(rootRegion.argument(n)); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp b/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp index 90571defe..b784eb7b6 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestRegionAwareMemoryNodeProvider.cpp @@ -62,7 +62,7 @@ TestStore1() }; jlm::tests::StoreTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -110,7 +110,7 @@ TestStore2() }; jlm::tests::StoreTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -148,7 +148,7 @@ TestLoad1() }; jlm::tests::LoadTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -196,7 +196,7 @@ TestLoad2() }; jlm::tests::LoadTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -231,7 +231,7 @@ TestLoadFromUndef() }; jlm::tests::LoadFromUndefTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -313,7 +313,7 @@ TestCall1() }; jlm::tests::CallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -401,7 +401,7 @@ TestCall2() }; jlm::tests::CallTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -494,7 +494,7 @@ TestIndirectCall() }; jlm::tests::IndirectCallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -641,7 +641,7 @@ TestIndirectCall2() }; jlm::tests::IndirectCallTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -685,7 +685,7 @@ TestGamma() }; jlm::tests::GammaTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -726,7 +726,7 @@ TestTheta() }; jlm::tests::ThetaTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -785,7 +785,7 @@ TestDelta1() }; jlm::tests::DeltaTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -845,7 +845,7 @@ TestDelta2() }; jlm::tests::DeltaTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -905,7 +905,7 @@ TestImports() }; jlm::tests::ImportTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -976,7 +976,7 @@ TestPhi1() }; jlm::tests::PhiTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1146,7 +1146,7 @@ TestPhi2() }; jlm::tests::PhiTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1169,7 +1169,7 @@ TestPhiWithDelta() // Assert jlm::tests::PhiWithDeltaTest test; std::unordered_map outputMap; - std::cout << jlm::rvsdg::view(test.graph().root(), outputMap) << std::flush; + std::cout << jlm::rvsdg::view(&test.graph().GetRootRegion(), outputMap) << std::flush; auto pointsToGraph = RunSteensgaard(test.module()); std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph, outputMap) << std::flush; @@ -1225,7 +1225,7 @@ TestMemcpy() }; jlm::tests::MemcpyTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -1275,7 +1275,7 @@ TestEscapedMemory1() }; jlm::tests::EscapedMemoryTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1377,7 +1377,7 @@ TestEscapedMemory2() }; jlm::tests::EscapedMemoryTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1425,7 +1425,7 @@ TestEscapedMemory3() }; jlm::tests::EscapedMemoryTest3 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp index 525e69b55..06bc14d98 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp @@ -80,7 +80,7 @@ TestStore1() }; jlm::tests::StoreTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -133,7 +133,7 @@ TestStore2() }; jlm::tests::StoreTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -166,7 +166,7 @@ TestLoad1() }; jlm::tests::LoadTest1 test; - // jlm::rvsdg::view(test.graph()->root(), stdout); + // jlm::rvsdg::view(test.graph()->GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -206,7 +206,7 @@ TestLoad2() }; jlm::tests::LoadTest2 test; - // jlm::rvsdg::view(test.graph()->root(), stdout); + // jlm::rvsdg::view(test.graph()->GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -233,7 +233,7 @@ TestLoadFromUndef() }; jlm::tests::LoadFromUndefTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -266,7 +266,7 @@ TestGetElementPtr() }; jlm::tests::GetElementPtrTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -298,7 +298,7 @@ TestBitCast() }; jlm::tests::BitCastTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -332,7 +332,7 @@ TestConstantPointerNull() }; jlm::tests::ConstantPointerNullTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -369,7 +369,7 @@ TestBits2Ptr() }; jlm::tests::Bits2PtrTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph) << std::flush; @@ -434,7 +434,7 @@ TestCall1() }; jlm::tests::CallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -491,7 +491,7 @@ TestCall2() }; jlm::tests::CallTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -543,7 +543,7 @@ TestIndirectCall() }; jlm::tests::IndirectCallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -573,7 +573,7 @@ TestIndirectCall2() }; jlm::tests::IndirectCallTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -608,7 +608,7 @@ TestExternalCall1() }; jlm::tests::ExternalCallTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph) << std::flush; @@ -622,7 +622,7 @@ TestExternalCall2() // Arrange jlm::tests::ExternalCallTest2 test; std::unordered_map outputMap; - std::cout << jlm::rvsdg::view(test.graph().root(), outputMap) << std::flush; + std::cout << jlm::rvsdg::view(&test.graph().GetRootRegion(), outputMap) << std::flush; // Act auto pointsToGraph = RunSteensgaard(test.module()); @@ -680,7 +680,7 @@ TestGamma() }; jlm::tests::GammaTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -719,7 +719,7 @@ TestTheta() }; jlm::tests::ThetaTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -764,7 +764,7 @@ TestDelta1() }; jlm::tests::DeltaTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -814,7 +814,7 @@ TestDelta2() }; jlm::tests::DeltaTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*ptg); @@ -864,7 +864,7 @@ TestImports() }; jlm::tests::ImportTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*ptg); @@ -916,7 +916,7 @@ TestPhi1() }; jlm::tests::PhiTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto ptg = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*PointsToGraph); @@ -947,7 +947,7 @@ TestExternalMemory() }; jlm::tests::ExternalMemoryTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -988,7 +988,7 @@ TestEscapedMemory1() }; jlm::tests::EscapedMemoryTest1 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1037,7 +1037,7 @@ TestEscapedMemory2() }; jlm::tests::EscapedMemoryTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1070,7 +1070,7 @@ TestEscapedMemory3() }; jlm::tests::EscapedMemoryTest3 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1109,7 +1109,7 @@ TestMemcpy() jlm::tests::MemcpyTest test; std::unordered_map outputMap; - std::cout << jlm::rvsdg::view(test.graph().root(), outputMap) << std::flush; + std::cout << jlm::rvsdg::view(&test.graph().GetRootRegion(), outputMap) << std::flush; /* * Act @@ -1128,7 +1128,7 @@ TestMemcpy2() { // Arrange jlm::tests::MemcpyTest2 test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); // Act auto pointsToGraph = RunSteensgaard(test.module()); @@ -1170,7 +1170,7 @@ TestMemcpy3() // Arrange jlm::tests::MemcpyTest3 test; std::unordered_map outputMap; - std::cout << jlm::rvsdg::view(test.graph().root(), outputMap) << std::flush; + std::cout << jlm::rvsdg::view(&test.graph().GetRootRegion(), outputMap) << std::flush; // Act auto pointsToGraph = RunSteensgaard(test.module()); @@ -1224,7 +1224,7 @@ TestLinkedList() }; jlm::tests::LinkedListTest test; - // jlm::rvsdg::view(test.graph().root(), stdout); + // jlm::rvsdg::view(test.graph().GetRootRegion(), stdout); auto pointsToGraph = RunSteensgaard(test.module()); // std::cout << jlm::llvm::aa::PointsToGraph::ToDot(*pointsToGraph); @@ -1277,7 +1277,7 @@ TestVariadicFunction2() // Arrange jlm::tests::VariadicFunctionTest2 test; - std::cout << jlm::rvsdg::view(test.module().Rvsdg().root(), outputMap) << std::flush; + std::cout << jlm::rvsdg::view(&test.module().Rvsdg().GetRootRegion(), outputMap) << std::flush; // Act auto pointsToGraph = RunSteensgaard(test.module()); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index ad8e4e3d3..446307e9b 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -28,22 +28,22 @@ test_simple() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); - auto n1 = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; - auto n2 = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; + auto n1 = jlm::tests::create_testop(&graph.GetRootRegion(), {}, { vt })[0]; + auto n2 = jlm::tests::create_testop(&graph.GetRootRegion(), {}, { vt })[0]; - auto u1 = jlm::tests::create_testop(graph.root(), { z }, { vt })[0]; + auto u1 = jlm::tests::create_testop(&graph.GetRootRegion(), { z }, { vt })[0]; - auto b1 = jlm::tests::create_testop(graph.root(), { x, y }, { vt })[0]; - auto b2 = jlm::tests::create_testop(graph.root(), { x, y }, { vt })[0]; - auto b3 = jlm::tests::create_testop(graph.root(), { n1, z }, { vt })[0]; - auto b4 = jlm::tests::create_testop(graph.root(), { n2, z }, { vt })[0]; + auto b1 = jlm::tests::create_testop(&graph.GetRootRegion(), { x, y }, { vt })[0]; + auto b2 = jlm::tests::create_testop(&graph.GetRootRegion(), { x, y }, { vt })[0]; + auto b3 = jlm::tests::create_testop(&graph.GetRootRegion(), { n1, z }, { vt })[0]; + auto b4 = jlm::tests::create_testop(&graph.GetRootRegion(), { n2, z }, { vt })[0]; GraphExport::Create(*n1, "n1"); GraphExport::Create(*n2, "n2"); @@ -53,14 +53,14 @@ test_simple() GraphExport::Create(*b3, "b3"); GraphExport::Create(*b4, "b4"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - assert(graph.root()->result(0)->origin() == graph.root()->result(1)->origin()); - assert(graph.root()->result(3)->origin() == graph.root()->result(4)->origin()); - assert(graph.root()->result(5)->origin() == graph.root()->result(6)->origin()); + assert(graph.GetRootRegion().result(0)->origin() == graph.GetRootRegion().result(1)->origin()); + assert(graph.GetRootRegion().result(3)->origin() == graph.GetRootRegion().result(4)->origin()); + assert(graph.GetRootRegion().result(5)->origin() == graph.GetRootRegion().result(6)->origin()); } static inline void @@ -73,7 +73,7 @@ test_gamma() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); @@ -81,8 +81,8 @@ test_gamma() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); - auto u1 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; - auto u2 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; + auto u1 = jlm::tests::create_testop(&graph.GetRootRegion(), { x }, { vt })[0]; + auto u2 = jlm::tests::create_testop(&graph.GetRootRegion(), { x }, { vt })[0]; auto gamma = jlm::rvsdg::GammaNode::create(c, 2); @@ -108,10 +108,10 @@ test_gamma() GraphExport::Create(*gamma->output(1), "x2"); GraphExport::Create(*gamma->output(2), "y"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); auto subregion0 = gamma->subregion(0); auto subregion1 = gamma->subregion(1); @@ -120,7 +120,7 @@ test_gamma() assert(subregion0->result(3)->origin() == subregion0->result(4)->origin()); assert(subregion0->result(3)->origin() == subregion0->result(5)->origin()); assert(subregion1->result(0)->origin() == subregion1->result(1)->origin()); - assert(graph.root()->result(0)->origin() == graph.root()->result(1)->origin()); + assert(graph.GetRootRegion().result(0)->origin() == graph.GetRootRegion().result(1)->origin()); auto argument0 = dynamic_cast(subregion0->result(6)->origin()); @@ -139,13 +139,13 @@ test_theta() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); auto lv1 = theta->add_loopvar(c); @@ -167,10 +167,10 @@ test_theta() GraphExport::Create(*theta->output(2), "lv3"); GraphExport::Create(*theta->output(3), "lv4"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); auto un1 = jlm::rvsdg::output::GetNode(*u1); auto un2 = jlm::rvsdg::output::GetNode(*u2); @@ -179,7 +179,7 @@ test_theta() assert(bn1->input(0)->origin() == un1->input(0)->origin()); assert(bn1->input(1)->origin() == region->argument(3)); assert(region->result(2)->origin() == region->result(3)->origin()); - assert(graph.root()->result(0)->origin() == graph.root()->result(1)->origin()); + assert(graph.GetRootRegion().result(0)->origin() == graph.GetRootRegion().result(1)->origin()); } static inline void @@ -192,13 +192,13 @@ test_theta2() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); auto lv1 = theta->add_loopvar(c); @@ -236,13 +236,13 @@ test_theta3() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta1 = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta1 = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto r1 = theta1->subregion(); auto lv1 = theta1->add_loopvar(c); @@ -295,14 +295,14 @@ test_theta4() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); auto lv1 = theta->add_loopvar(c); @@ -348,14 +348,14 @@ test_theta5() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); auto lv0 = theta->add_loopvar(c); @@ -395,12 +395,12 @@ test_lambda() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto lambda = lambda::node::create(graph.root(), ft, "f", linkage::external_linkage); + auto lambda = lambda::node::create(&graph.GetRootRegion(), ft, "f", linkage::external_linkage); auto d1 = lambda->AddContextVar(*x).inner; auto d2 = lambda->AddContextVar(*x).inner; @@ -411,10 +411,10 @@ test_lambda() GraphExport::Create(*output, "f"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); auto bn1 = jlm::rvsdg::output::GetNode(*b1); assert(bn1->input(0)->origin() == bn1->input(1)->origin()); @@ -430,13 +430,13 @@ test_phi() RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); phi::builder pb; - pb.begin(graph.root()); + pb.begin(&graph.GetRootRegion()); auto region = pb.subregion(); auto d1 = pb.add_ctxvar(x); @@ -461,10 +461,10 @@ test_phi() GraphExport::Create(*phi->output(0), "f1"); GraphExport::Create(*phi->output(1), "f2"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert( jlm::rvsdg::AssertGetOwnerNode(*f1).input(0)->origin() diff --git a/tests/jlm/llvm/opt/test-inlining.cpp b/tests/jlm/llvm/opt/test-inlining.cpp index d6c70d0dd..237ef8b49 100644 --- a/tests/jlm/llvm/opt/test-inlining.cpp +++ b/tests/jlm/llvm/opt/test-inlining.cpp @@ -37,7 +37,8 @@ test1() { vt, iostatetype::Create(), MemoryStateType::Create() }, { vt, iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph.GetRootRegion(), functionType, "f1", linkage::external_linkage); lambda->AddContextVar(*i); auto t = jlm::tests::test_op::create( @@ -62,7 +63,8 @@ test1() MemoryStateType::Create() }, { vt, iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph.GetRootRegion(), functionType, "f1", linkage::external_linkage); auto d = lambda->AddContextVar(*f1).inner; auto controlArgument = lambda->GetFunctionArguments()[0]; auto valueArgument = lambda->GetFunctionArguments()[1]; @@ -98,15 +100,15 @@ test1() GraphExport::Create(*f2, "f2"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); // Act jlm::llvm::fctinline fctinline; fctinline.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); // Assert - assert(!jlm::rvsdg::Region::Contains(*graph.root(), true)); + assert(!jlm::rvsdg::Region::Contains(graph.GetRootRegion(), true)); } static void @@ -134,7 +136,8 @@ test2() auto SetupF1 = [&](const std::shared_ptr & functionType) { - auto lambda = lambda::node::create(graph.root(), functionType, "f1", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph.GetRootRegion(), functionType, "f1", linkage::external_linkage); return lambda->finalize( { lambda->GetFunctionArguments()[1], lambda->GetFunctionArguments()[2] }); }; @@ -147,7 +150,8 @@ test2() { iostatetype::Create(), MemoryStateType::Create() }, { iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = lambda::node::create(graph.root(), functionType, "f2", linkage::external_linkage); + auto lambda = + lambda::node::create(&graph.GetRootRegion(), functionType, "f2", linkage::external_linkage); auto cvi = lambda->AddContextVar(*i).inner; auto cvf1 = lambda->AddContextVar(*f1).inner; auto iOStateArgument = lambda->GetFunctionArguments()[0]; @@ -164,12 +168,12 @@ test2() GraphExport::Create(*f2, "f2"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act jlm::llvm::fctinline fctinline; fctinline.run(rm, statisticsCollector); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert // Function f1 should not have been inlined. diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index a2ac53058..e37700d68 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -29,7 +29,7 @@ test1() auto y = &jlm::tests::GraphImport::Create(graph, vt, "y"); auto z = &jlm::tests::GraphImport::Create(graph, vt, "z"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lvx = theta->add_loopvar(x); auto lvy = theta->add_loopvar(y); @@ -65,10 +65,10 @@ test1() auto & ex2 = GraphExport::Create(*theta->output(1), "y"); auto & ex3 = GraphExport::Create(*theta->output(2), "z"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::tginversion tginversion; tginversion.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex1.origin()))); assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex2.origin()))); @@ -85,7 +85,7 @@ test2() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lv1 = theta->add_loopvar(x); @@ -112,10 +112,10 @@ test2() auto & ex = GraphExport::Create(*theta->output(0), "x"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::tginversion tginversion; tginversion.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert(jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*ex.origin()))); } diff --git a/tests/jlm/llvm/opt/test-pull.cpp b/tests/jlm/llvm/opt/test-pull.cpp index 2de4c8f79..c6bb08f3d 100644 --- a/tests/jlm/llvm/opt/test-pull.cpp +++ b/tests/jlm/llvm/opt/test-pull.cpp @@ -33,11 +33,11 @@ test_pullin_top() auto c = &jlm::tests::GraphImport::Create(graph, ct, "c"); auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto n1 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; - auto n2 = jlm::tests::create_testop(graph.root(), { x }, { vt })[0]; - auto n3 = jlm::tests::create_testop(graph.root(), { n2 }, { vt })[0]; - auto n4 = jlm::tests::create_testop(graph.root(), { c, n1 }, { ct })[0]; - auto n5 = jlm::tests::create_testop(graph.root(), { n1, n3 }, { vt })[0]; + auto n1 = jlm::tests::create_testop(&graph.GetRootRegion(), { x }, { vt })[0]; + auto n2 = jlm::tests::create_testop(&graph.GetRootRegion(), { x }, { vt })[0]; + auto n3 = jlm::tests::create_testop(&graph.GetRootRegion(), { n2 }, { vt })[0]; + auto n4 = jlm::tests::create_testop(&graph.GetRootRegion(), { c, n1 }, { ct })[0]; + auto n5 = jlm::tests::create_testop(&graph.GetRootRegion(), { n1, n3 }, { vt })[0]; auto gamma = jlm::rvsdg::GammaNode::create(n4, 2); @@ -71,8 +71,8 @@ test_pullin_bottom() auto ev = gamma->AddEntryVar(x); gamma->AddExitVar(ev.branchArgument); - auto b1 = jlm::tests::create_testop(graph.root(), { gamma->output(0), x }, { vt })[0]; - auto b2 = jlm::tests::create_testop(graph.root(), { gamma->output(0), b1 }, { vt })[0]; + auto b1 = jlm::tests::create_testop(&graph.GetRootRegion(), { gamma->output(0), x }, { vt })[0]; + auto b2 = jlm::tests::create_testop(&graph.GetRootRegion(), { gamma->output(0), b1 }, { vt })[0]; auto & xp = jlm::llvm::GraphExport::Create(*b2, "x"); @@ -95,7 +95,7 @@ test_pull() auto p = &jlm::tests::GraphImport::Create(graph, jlm::rvsdg::ControlType::Create(2), ""); - auto croot = jlm::tests::create_testop(graph.root(), {}, { vt })[0]; + auto croot = jlm::tests::create_testop(&graph.GetRootRegion(), {}, { vt })[0]; /* outer gamma */ auto gamma1 = jlm::rvsdg::GammaNode::create(p, 2); @@ -118,10 +118,10 @@ test_pull() jlm::rvsdg::view(graph, stdout); jlm::llvm::pullin pullin; pullin.run(rm, statisticsCollector); - graph.prune(); + graph.PruneNodes(); jlm::rvsdg::view(graph, stdout); - assert(graph.root()->nnodes() == 1); + assert(graph.GetRootRegion().nnodes() == 1); } static int diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index 2be811337..ab4a5f39d 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -48,12 +48,12 @@ test_gamma() GraphExport::Create(*gamma->output(0), "x"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::pushout pushout; pushout.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - assert(graph.root()->nnodes() == 3); + assert(graph.GetRootRegion().nnodes() == 3); } static inline void @@ -74,7 +74,7 @@ test_theta() auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); auto s = &jlm::tests::GraphImport::Create(graph, st, "s"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lv1 = theta->add_loopvar(c); auto lv2 = theta->add_loopvar(x); @@ -96,12 +96,12 @@ test_theta() GraphExport::Create(*theta->output(0), "c"); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::pushout pushout; pushout.run(rm, statisticsCollector); - // jlm::rvsdg::view(graph.root(), stdout); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - assert(graph.root()->nnodes() == 3); + assert(graph.GetRootRegion().nnodes() == 3); } static inline void @@ -119,7 +119,7 @@ test_push_theta_bottom() auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lvc = theta->add_loopvar(c); auto lva = theta->add_loopvar(a); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index 034b06e80..f901dd3e4 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -45,7 +45,7 @@ create_theta( auto graph = init->region()->graph(); - auto theta = ThetaNode::create(graph->root()); + auto theta = ThetaNode::create(&graph->GetRootRegion()); auto subregion = theta->subregion(); auto idv = theta->add_loopvar(init); auto lvs = theta->add_loopvar(step); @@ -92,19 +92,19 @@ test_unrollinfo() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto init0 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); - auto init1 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 1); - auto initm1 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0xFFFFFFFF); + auto init0 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); + auto init1 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 1); + auto initm1 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0xFFFFFFFF); - auto step1 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 1); - auto step0 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); - auto stepm1 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0xFFFFFFFF); - auto step2 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 2); + auto step1 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 1); + auto step0 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); + auto stepm1 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0xFFFFFFFF); + auto step2 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 2); - auto end100 = jlm::rvsdg::create_bitconstant(graph.root(), 32, 100); + auto end100 = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 100); auto theta = create_theta(ult, add, init0, step1, end100); auto ui = jlm::llvm::unrollinfo::create(theta); @@ -146,12 +146,12 @@ test_known_boundaries() { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); - auto step = jlm::rvsdg::create_bitconstant(graph.root(), 32, 1); - auto end = jlm::rvsdg::create_bitconstant(graph.root(), 32, 4); + auto init = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); + auto step = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 1); + auto end = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 4); auto theta = create_theta(ult, add, init, step, end); // jlm::rvsdg::view(graph, stdout); @@ -161,17 +161,17 @@ test_known_boundaries() The unroll factor is greater than or equal the number of iterations. The loop should be fully unrolled and the theta removed. */ - assert(nthetas(graph.root()) == 0); + assert(nthetas(&graph.GetRootRegion()) == 0); } { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); - auto step = jlm::rvsdg::create_bitconstant(graph.root(), 32, 1); - auto end = jlm::rvsdg::create_bitconstant(graph.root(), 32, 100); + auto init = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); + auto step = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 1); + auto end = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 100); auto theta = create_theta(ult, add, init, step, end); // jlm::rvsdg::view(graph, stdout); @@ -181,17 +181,17 @@ test_known_boundaries() The unroll factor is a multiple of the number of iterations. We should only find one (unrolled) theta. */ - assert(nthetas(graph.root()) == 1); + assert(nthetas(&graph.GetRootRegion()) == 1); } { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); - auto step = jlm::rvsdg::create_bitconstant(graph.root(), 32, 1); - auto end = jlm::rvsdg::create_bitconstant(graph.root(), 32, 100); + auto init = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); + auto step = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 1); + auto end = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 100); auto theta = create_theta(ult, add, init, step, end); // jlm::rvsdg::view(graph, stdout); @@ -202,17 +202,17 @@ test_known_boundaries() and we have one remaining iteration. We should find only the unrolled theta and the body of the old theta as epilogue. */ - assert(nthetas(graph.root()) == 1); + assert(nthetas(&graph.GetRootRegion()) == 1); } { jlm::rvsdg::Graph graph; - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 100); - auto step = jlm::rvsdg::create_bitconstant(graph.root(), 32, -1); - auto end = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); + auto init = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 100); + auto step = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, -1); + auto end = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); auto theta = create_theta(sgt, sub, init, step, end); // jlm::rvsdg::view(graph, stdout); @@ -223,7 +223,7 @@ test_known_boundaries() and we have four remaining iterations. We should find two thetas: one unrolled theta and one theta for the residual iterations. */ - assert(nthetas(graph.root()) == 2); + assert(nthetas(&graph.GetRootRegion()) == 2); } } @@ -241,7 +241,7 @@ test_unknown_boundaries() auto x = &jlm::tests::GraphImport::Create(graph, bt, "x"); auto y = &jlm::tests::GraphImport::Create(graph, bt, "y"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lv1 = theta->add_loopvar(x); auto lv2 = theta->add_loopvar(y); @@ -291,15 +291,15 @@ test_nested_theta() jlm::llvm::RvsdgModule rm(jlm::util::filepath(""), "", ""); auto & graph = rm.Rvsdg(); - auto nf = graph.node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph.GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); - auto init = jlm::rvsdg::create_bitconstant(graph.root(), 32, 0); - auto step = jlm::rvsdg::create_bitconstant(graph.root(), 32, 1); - auto end = jlm::rvsdg::create_bitconstant(graph.root(), 32, 97); + auto init = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 0); + auto step = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 1); + auto end = jlm::rvsdg::create_bitconstant(&graph.GetRootRegion(), 32, 97); /* Outer loop */ - auto otheta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto otheta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lvo_init = otheta->add_loopvar(init); auto lvo_step = otheta->add_loopvar(step); @@ -407,7 +407,7 @@ test_nested_theta() After unrolling the outher theta four times it should now contain 8 thetas. */ - thetas = find_thetas(graph.root()); + thetas = find_thetas(&graph.GetRootRegion()); assert(thetas.size() == 3 && nthetas(thetas[0]->subregion()) == 8); } diff --git a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp index 7e6e7307a..71ae0c2dc 100644 --- a/tests/jlm/mlir/TestJlmToMlirToJlm.cpp +++ b/tests/jlm/mlir/TestJlmToMlirToJlm.cpp @@ -21,12 +21,12 @@ TestUndef() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { // Create an undef operation std::cout << "Undef Operation" << std::endl; - UndefValueOperation::Create(*graph->root(), jlm::rvsdg::bittype::Create(32)); + UndefValueOperation::Create(graph->GetRootRegion(), jlm::rvsdg::bittype::Create(32)); // Convert the RVSDG to MLIR std::cout << "Convert to MLIR" << std::endl; @@ -48,7 +48,7 @@ TestUndef() std::unique_ptr rootBlock = std::make_unique(); rootBlock->push_back(omega); auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); { using namespace jlm::llvm; diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 0460592a1..4dc2d775d 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -21,7 +21,7 @@ TestLambda() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -31,8 +31,11 @@ TestLambda() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -140,7 +143,7 @@ TestAddOperation() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -150,8 +153,11 @@ TestAddOperation() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(32), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -241,7 +247,7 @@ TestComZeroExt() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -251,8 +257,11 @@ TestComZeroExt() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::bittype::Create(1), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -385,7 +394,7 @@ TestMatch() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { @@ -395,8 +404,11 @@ TestMatch() { iostatetype::Create(), MemoryStateType::Create() }, { jlm::rvsdg::ControlType::Create(2), iostatetype::Create(), MemoryStateType::Create() }); - auto lambda = - lambda::node::create(graph->root(), functionType, "test", linkage::external_linkage); + auto lambda = lambda::node::create( + &graph->GetRootRegion(), + functionType, + "test", + linkage::external_linkage); auto iOStateArgument = lambda->GetFunctionArguments()[0]; auto memoryStateArgument = lambda->GetFunctionArguments()[1]; @@ -492,16 +504,16 @@ TestGamma() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { // Create a gamma operation std::cout << "Gamma Operation" << std::endl; - auto CtrlConstant = jlm::rvsdg::control_constant(graph->root(), 3, 1); - auto entryvar1 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 5); - auto entryvar2 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 6); + auto CtrlConstant = jlm::rvsdg::control_constant(&graph->GetRootRegion(), 3, 1); + auto entryvar1 = jlm::rvsdg::create_bitconstant(&graph->GetRootRegion(), 32, 5); + auto entryvar2 = jlm::rvsdg::create_bitconstant(&graph->GetRootRegion(), 32, 6); auto rvsdgGammaNode = jlm::rvsdg::GammaNode::create( CtrlConstant, // predicate 3 // nalternatives @@ -611,14 +623,14 @@ TestTheta() auto rvsdgModule = RvsdgModule::Create(jlm::util::filepath(""), "", ""); auto graph = &rvsdgModule->Rvsdg(); - auto nf = graph->node_normal_form(typeid(jlm::rvsdg::Operation)); + auto nf = graph->GetNodeNormalForm(typeid(jlm::rvsdg::Operation)); nf->set_mutable(false); { // Create a theta operation std::cout << "Theta Operation" << std::endl; - auto entryvar1 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 5); - auto entryvar2 = jlm::rvsdg::create_bitconstant(graph->root(), 32, 6); - jlm::rvsdg::ThetaNode * rvsdgThetaNode = jlm::rvsdg::ThetaNode::create(graph->root()); + auto entryvar1 = jlm::rvsdg::create_bitconstant(&graph->GetRootRegion(), 32, 5); + auto entryvar2 = jlm::rvsdg::create_bitconstant(&graph->GetRootRegion(), 32, 6); + jlm::rvsdg::ThetaNode * rvsdgThetaNode = jlm::rvsdg::ThetaNode::create(&graph->GetRootRegion()); auto predicate = jlm::rvsdg::control_constant(rvsdgThetaNode->subregion(), 2, 0); diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index bf0ab491e..348bf174b 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -108,7 +108,7 @@ TestLambda() // Convert the MLIR to RVSDG and check the result std::cout << "Converting MLIR to RVSDG" << std::endl; auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); { using namespace jlm::rvsdg; std::cout << "Checking the result" << std::endl; @@ -257,7 +257,7 @@ TestDivOperation() // Convert the MLIR to RVSDG and check the result std::cout << "Converting MLIR to RVSDG" << std::endl; auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); jlm::rvsdg::view(region, stdout); @@ -435,7 +435,7 @@ TestCompZeroExt() // Convert the MLIR to RVSDG and check the result std::cout << "Converting MLIR to RVSDG" << std::endl; auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); { using namespace jlm::rvsdg; @@ -652,7 +652,7 @@ TestMatchOp() std::unique_ptr rootBlock = std::make_unique(); rootBlock->push_back(omega); auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); { using namespace jlm::rvsdg; @@ -823,7 +823,7 @@ TestGammaOp() std::unique_ptr rootBlock = std::make_unique(); rootBlock->push_back(omega); auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); { using namespace jlm::rvsdg; @@ -972,7 +972,7 @@ TestThetaOp() std::unique_ptr rootBlock = std::make_unique(); rootBlock->push_back(omega); auto rvsdgModule = jlm::mlir::MlirToJlmConverter::CreateAndConvert(rootBlock); - auto region = rvsdgModule->Rvsdg().root(); + auto region = &rvsdgModule->Rvsdg().GetRootRegion(); { using namespace jlm::rvsdg; diff --git a/tests/jlm/rvsdg/ArgumentTests.cpp b/tests/jlm/rvsdg/ArgumentTests.cpp index ee8f13bc9..633cdf0a0 100644 --- a/tests/jlm/rvsdg/ArgumentTests.cpp +++ b/tests/jlm/rvsdg/ArgumentTests.cpp @@ -24,8 +24,8 @@ ArgumentNodeMismatch() Graph graph; auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); - auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); - auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); + auto structuralNode1 = jlm::tests::structural_node::create(&graph.GetRootRegion(), 1); + auto structuralNode2 = jlm::tests::structural_node::create(&graph.GetRootRegion(), 2); auto structuralInput = StructuralInput::create(structuralNode1, import, valueType); @@ -61,7 +61,7 @@ ArgumentInputTypeMismatch() jlm::rvsdg::Graph rvsdg; auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "import"); - auto structuralNode = structural_node::create(rvsdg.root(), 1); + auto structuralNode = structural_node::create(&rvsdg.GetRootRegion(), 1); auto structuralInput = jlm::rvsdg::StructuralInput::create(structuralNode, x, valueType); // Act & Assert diff --git a/tests/jlm/rvsdg/RegionTests.cpp b/tests/jlm/rvsdg/RegionTests.cpp index 5e7d35918..6a1d499ae 100644 --- a/tests/jlm/rvsdg/RegionTests.cpp +++ b/tests/jlm/rvsdg/RegionTests.cpp @@ -22,7 +22,7 @@ IteratorRanges() jlm::rvsdg::Graph graph; - auto structuralNode = structural_node::create(graph.root(), 1); + auto structuralNode = structural_node::create(&graph.GetRootRegion(), 1); auto & subregion = *structuralNode->subregion(0); auto & constSubregion = *static_cast(structuralNode->subregion(0)); @@ -94,23 +94,23 @@ Contains() jlm::rvsdg::Graph graph; auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); - auto structuralNode1 = structural_node::create(graph.root(), 1); + auto structuralNode1 = structural_node::create(&graph.GetRootRegion(), 1); auto structuralInput1 = jlm::rvsdg::StructuralInput::create(structuralNode1, import, valueType); auto & regionArgument1 = TestGraphArgument::Create(*structuralNode1->subregion(0), structuralInput1, valueType); unary_op::create(structuralNode1->subregion(0), valueType, ®ionArgument1, valueType); - auto structuralNode2 = structural_node::create(graph.root(), 1); + auto structuralNode2 = structural_node::create(&graph.GetRootRegion(), 1); auto structuralInput2 = jlm::rvsdg::StructuralInput::create(structuralNode2, import, valueType); auto & regionArgument2 = TestGraphArgument::Create(*structuralNode2->subregion(0), structuralInput2, valueType); binary_op::create(valueType, valueType, ®ionArgument2, ®ionArgument2); // Act & Assert - assert(jlm::rvsdg::Region::Contains(*graph.root(), false)); - assert(jlm::rvsdg::Region::Contains(*graph.root(), true)); - assert(jlm::rvsdg::Region::Contains(*graph.root(), true)); - assert(!jlm::rvsdg::Region::Contains(*graph.root(), true)); + assert(jlm::rvsdg::Region::Contains(graph.GetRootRegion(), false)); + assert(jlm::rvsdg::Region::Contains(graph.GetRootRegion(), true)); + assert(jlm::rvsdg::Region::Contains(graph.GetRootRegion(), true)); + assert(!jlm::rvsdg::Region::Contains(graph.GetRootRegion(), true)); return 0; } @@ -126,10 +126,10 @@ IsRootRegion() // Arrange jlm::rvsdg::Graph graph; - auto structuralNode = jlm::tests::structural_node::create(graph.root(), 1); + auto structuralNode = jlm::tests::structural_node::create(&graph.GetRootRegion(), 1); // Act & Assert - assert(graph.root()->IsRootRegion()); + assert(graph.GetRootRegion().IsRootRegion()); assert(!structuralNode->subregion(0)->IsRootRegion()); return 0; @@ -149,7 +149,7 @@ NumRegions_EmptyRvsdg() Graph graph; // Act & Assert - assert(Region::NumRegions(*graph.root()) == 1); + assert(Region::NumRegions(graph.GetRootRegion()) == 1); return 0; } @@ -166,12 +166,12 @@ NumRegions_NonEmptyRvsdg() // Arrange const Graph graph; - auto structuralNode = jlm::tests::structural_node::create(graph.root(), 4); + auto structuralNode = jlm::tests::structural_node::create(&graph.GetRootRegion(), 4); jlm::tests::structural_node::create(structuralNode->subregion(0), 2); jlm::tests::structural_node::create(structuralNode->subregion(3), 5); // Act & Assert - assert(Region::NumRegions(*graph.root()) == 1 + 4 + 2 + 5); + assert(Region::NumRegions(graph.GetRootRegion()) == 1 + 4 + 2 + 5); return 0; } @@ -188,7 +188,7 @@ RemoveResultsWhere() // Arrange jlm::rvsdg::Graph rvsdg; - jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); + jlm::rvsdg::Region region(&rvsdg.GetRootRegion(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); auto node = jlm::tests::test_op::Create(®ion, {}, {}, { valueType }); @@ -243,7 +243,7 @@ RemoveArgumentsWhere() // Arrange jlm::rvsdg::Graph rvsdg; - jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); + jlm::rvsdg::Region region(&rvsdg.GetRootRegion(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); auto & argument0 = TestGraphArgument::Create(region, nullptr, valueType); @@ -297,7 +297,7 @@ PruneArguments() // Arrange jlm::rvsdg::Graph rvsdg; - jlm::rvsdg::Region region(rvsdg.root(), &rvsdg); + jlm::rvsdg::Region region(&rvsdg.GetRootRegion(), &rvsdg); auto valueType = jlm::tests::valuetype::Create(); auto & argument0 = TestGraphArgument::Create(region, nullptr, valueType); @@ -336,7 +336,7 @@ ToTree_EmptyRvsdg() Graph rvsdg; // Act - auto tree = Region::ToTree(*rvsdg.root()); + auto tree = Region::ToTree(rvsdg.GetRootRegion()); std::cout << tree << std::flush; // Assert @@ -357,10 +357,12 @@ ToTree_EmptyRvsdgWithAnnotations() Graph rvsdg; AnnotationMap annotationMap; - annotationMap.AddAnnotation(rvsdg.root(), Annotation("NumNodes", rvsdg.root()->nnodes())); + annotationMap.AddAnnotation( + &rvsdg.GetRootRegion(), + Annotation("NumNodes", rvsdg.GetRootRegion().nnodes())); // Act - auto tree = Region::ToTree(*rvsdg.root(), annotationMap); + auto tree = Region::ToTree(rvsdg.GetRootRegion(), annotationMap); std::cout << tree << std::flush; // Assert @@ -380,12 +382,12 @@ ToTree_RvsdgWithStructuralNodes() // Arrange Graph rvsdg; - auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 2); + auto structuralNode = jlm::tests::structural_node::create(&rvsdg.GetRootRegion(), 2); jlm::tests::structural_node::create(structuralNode->subregion(0), 1); jlm::tests::structural_node::create(structuralNode->subregion(1), 3); // Act - auto tree = Region::ToTree(*rvsdg.root()); + auto tree = Region::ToTree(rvsdg.GetRootRegion()); std::cout << tree << std::flush; // Assert @@ -413,7 +415,7 @@ ToTree_RvsdgWithStructuralNodesAndAnnotations() // Arrange Graph rvsdg; - auto structuralNode1 = jlm::tests::structural_node::create(rvsdg.root(), 2); + auto structuralNode1 = jlm::tests::structural_node::create(&rvsdg.GetRootRegion(), 2); auto structuralNode2 = jlm::tests::structural_node::create(structuralNode1->subregion(1), 3); auto subregion2 = structuralNode2->subregion(2); @@ -422,7 +424,7 @@ ToTree_RvsdgWithStructuralNodesAndAnnotations() annotationMap.AddAnnotation(subregion2, Annotation("NumArguments", subregion2->narguments())); // Act - auto tree = Region::ToTree(*rvsdg.root(), annotationMap); + auto tree = Region::ToTree(rvsdg.GetRootRegion(), annotationMap); std::cout << tree << std::flush; // Assert @@ -455,27 +457,27 @@ BottomNodeTests() // Act & Assert // A newly created node without any users should automatically be added to the bottom nodes - auto structuralNode = jlm::tests::structural_node::create(rvsdg.root(), 1); + auto structuralNode = jlm::tests::structural_node::create(&rvsdg.GetRootRegion(), 1); assert(structuralNode->IsDead()); - assert(rvsdg.root()->NumBottomNodes() == 1); - assert(&*(rvsdg.root()->BottomNodes().begin()) == structuralNode); + assert(rvsdg.GetRootRegion().NumBottomNodes() == 1); + assert(&*(rvsdg.GetRootRegion().BottomNodes().begin()) == structuralNode); // The node cedes to be dead auto & output = structuralNode->AddOutput(valueType); jlm::tests::GraphExport::Create(output, "x"); assert(structuralNode->IsDead() == false); - assert(rvsdg.root()->NumBottomNodes() == 0); - assert(rvsdg.root()->BottomNodes().begin() == rvsdg.root()->BottomNodes().end()); + assert(rvsdg.GetRootRegion().NumBottomNodes() == 0); + assert(rvsdg.GetRootRegion().BottomNodes().begin() == rvsdg.GetRootRegion().BottomNodes().end()); // And it becomes dead again - rvsdg.root()->RemoveResultsWhere( + rvsdg.GetRootRegion().RemoveResultsWhere( [](const RegionResult &) { return true; }); assert(structuralNode->IsDead()); - assert(rvsdg.root()->NumBottomNodes() == 1); - assert(&*(rvsdg.root()->BottomNodes().begin()) == structuralNode); + assert(rvsdg.GetRootRegion().NumBottomNodes() == 1); + assert(&*(rvsdg.GetRootRegion().BottomNodes().begin()) == structuralNode); return 0; } diff --git a/tests/jlm/rvsdg/ResultTests.cpp b/tests/jlm/rvsdg/ResultTests.cpp index 836e70844..b7a4ce5ef 100644 --- a/tests/jlm/rvsdg/ResultTests.cpp +++ b/tests/jlm/rvsdg/ResultTests.cpp @@ -24,8 +24,8 @@ ResultNodeMismatch() Graph graph; auto import = &jlm::tests::GraphImport::Create(graph, valueType, "import"); - auto structuralNode1 = jlm::tests::structural_node::create(graph.root(), 1); - auto structuralNode2 = jlm::tests::structural_node::create(graph.root(), 2); + auto structuralNode1 = jlm::tests::structural_node::create(&graph.GetRootRegion(), 1); + auto structuralNode2 = jlm::tests::structural_node::create(&graph.GetRootRegion(), 2); auto structuralInput = StructuralInput::create(structuralNode1, import, valueType); @@ -65,7 +65,7 @@ ResultInputTypeMismatch() jlm::rvsdg::Graph rvsdg; - auto structuralNode = structural_node::create(rvsdg.root(), 1); + auto structuralNode = structural_node::create(&rvsdg.GetRootRegion(), 1); auto structuralOutput = jlm::rvsdg::StructuralOutput::create(structuralNode, valueType); // Act & Assert diff --git a/tests/jlm/rvsdg/TestStructuralNode.cpp b/tests/jlm/rvsdg/TestStructuralNode.cpp index 9b40157a0..48ce33aa3 100644 --- a/tests/jlm/rvsdg/TestStructuralNode.cpp +++ b/tests/jlm/rvsdg/TestStructuralNode.cpp @@ -18,7 +18,7 @@ TestOutputRemoval() rvsdg::Graph rvsdg; auto valueType = tests::valuetype::Create(); - auto structuralNode = tests::structural_node::create(rvsdg.root(), 1); + auto structuralNode = tests::structural_node::create(&rvsdg.GetRootRegion(), 1); auto output0 = rvsdg::StructuralOutput::create(structuralNode, valueType); auto output1 = rvsdg::StructuralOutput::create(structuralNode, valueType); auto output2 = rvsdg::StructuralOutput::create(structuralNode, valueType); diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 1a6dbcead..692c1bd11 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -20,8 +20,8 @@ types_bitstring_arithmetic_test_bitand() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 3); - auto c1 = create_bitconstant(graph.root(), 32, 5); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); auto and0 = bitand_op::create(32, s0, s1); auto and1 = bitand_op::create(32, c0, c1); @@ -29,8 +29,8 @@ types_bitstring_arithmetic_test_bitand() jlm::tests::GraphExport::Create(*and0, "dummy"); jlm::tests::GraphExport::Create(*and1, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*and0)->GetOperation() == bitand_op(32)); assert(output::GetNode(*and1)->GetOperation() == int_constant_op(32, +1)); @@ -48,10 +48,10 @@ types_bitstring_arithmetic_test_bitashr() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 16); - auto c1 = create_bitconstant(graph.root(), 32, -16); - auto c2 = create_bitconstant(graph.root(), 32, 2); - auto c3 = create_bitconstant(graph.root(), 32, 32); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 16); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, -16); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 2); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, 32); auto ashr0 = bitashr_op::create(32, s0, s1); auto ashr1 = bitashr_op::create(32, c0, c2); @@ -65,8 +65,8 @@ types_bitstring_arithmetic_test_bitashr() jlm::tests::GraphExport::Create(*ashr3, "dummy"); jlm::tests::GraphExport::Create(*ashr4, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*ashr0)->GetOperation() == bitashr_op(32)); assert(output::GetNode(*ashr1)->GetOperation() == int_constant_op(32, 4)); @@ -91,9 +91,9 @@ types_bitstring_arithmetic_test_bitdifference() jlm::tests::GraphExport::Create(*diff, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*diff)->GetOperation() == bitsub_op(32)); @@ -108,7 +108,7 @@ types_bitstring_arithmetic_test_bitnegate() Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); - auto c0 = create_bitconstant(graph.root(), 32, 3); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); auto neg0 = bitneg_op::create(32, s0); auto neg1 = bitneg_op::create(32, c0); @@ -118,8 +118,8 @@ types_bitstring_arithmetic_test_bitnegate() jlm::tests::GraphExport::Create(*neg1, "dummy"); jlm::tests::GraphExport::Create(*neg2, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*neg0)->GetOperation() == bitneg_op(32)); assert(output::GetNode(*neg1)->GetOperation() == int_constant_op(32, -3)); @@ -136,7 +136,7 @@ types_bitstring_arithmetic_test_bitnot() Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); - auto c0 = create_bitconstant(graph.root(), 32, 3); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); auto not0 = bitnot_op::create(32, s0); auto not1 = bitnot_op::create(32, c0); @@ -146,8 +146,8 @@ types_bitstring_arithmetic_test_bitnot() jlm::tests::GraphExport::Create(*not1, "dummy"); jlm::tests::GraphExport::Create(*not2, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*not0)->GetOperation() == bitnot_op(32)); assert(output::GetNode(*not1)->GetOperation() == int_constant_op(32, -4)); @@ -166,8 +166,8 @@ types_bitstring_arithmetic_test_bitor() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 3); - auto c1 = create_bitconstant(graph.root(), 32, 5); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); auto or0 = bitor_op::create(32, s0, s1); auto or1 = bitor_op::create(32, c0, c1); @@ -175,8 +175,8 @@ types_bitstring_arithmetic_test_bitor() jlm::tests::GraphExport::Create(*or0, "dummy"); jlm::tests::GraphExport::Create(*or1, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*or0)->GetOperation() == bitor_op(32)); assert(output::GetNode(*or1)->GetOperation() == uint_constant_op(32, 7)); @@ -194,8 +194,8 @@ types_bitstring_arithmetic_test_bitproduct() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 3); - auto c1 = create_bitconstant(graph.root(), 32, 5); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); auto product0 = bitmul_op::create(32, s0, s1); auto product1 = bitmul_op::create(32, c0, c1); @@ -203,9 +203,9 @@ types_bitstring_arithmetic_test_bitproduct() jlm::tests::GraphExport::Create(*product0, "dummy"); jlm::tests::GraphExport::Create(*product1, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*product0)->GetOperation() == bitmul_op(32)); assert(output::GetNode(*product1)->GetOperation() == uint_constant_op(32, 15)); @@ -227,9 +227,9 @@ types_bitstring_arithmetic_test_bitshiproduct() jlm::tests::GraphExport::Create(*shiproduct, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*shiproduct)->GetOperation() == bitsmulh_op(32)); @@ -246,9 +246,9 @@ types_bitstring_arithmetic_test_bitshl() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 16); - auto c1 = create_bitconstant(graph.root(), 32, 2); - auto c2 = create_bitconstant(graph.root(), 32, 32); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 16); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 2); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 32); auto shl0 = bitshl_op::create(32, s0, s1); auto shl1 = bitshl_op::create(32, c0, c1); @@ -258,8 +258,8 @@ types_bitstring_arithmetic_test_bitshl() jlm::tests::GraphExport::Create(*shl1, "dummy"); jlm::tests::GraphExport::Create(*shl2, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*shl0)->GetOperation() == bitshl_op(32)); assert(output::GetNode(*shl1)->GetOperation() == uint_constant_op(32, 64)); @@ -278,9 +278,9 @@ types_bitstring_arithmetic_test_bitshr() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 16); - auto c1 = create_bitconstant(graph.root(), 32, 2); - auto c2 = create_bitconstant(graph.root(), 32, 32); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 16); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 2); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 32); auto shr0 = bitshr_op::create(32, s0, s1); auto shr1 = bitshr_op::create(32, c0, c1); @@ -290,8 +290,8 @@ types_bitstring_arithmetic_test_bitshr() jlm::tests::GraphExport::Create(*shr1, "dummy"); jlm::tests::GraphExport::Create(*shr2, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*shr0)->GetOperation() == bitshr_op(32)); assert(output::GetNode(*shr1)->GetOperation() == uint_constant_op(32, 4)); @@ -310,8 +310,8 @@ types_bitstring_arithmetic_test_bitsmod() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, -7); - auto c1 = create_bitconstant(graph.root(), 32, 3); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, -7); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 3); auto smod0 = bitsmod_op::create(32, s0, s1); auto smod1 = bitsmod_op::create(32, c0, c1); @@ -319,9 +319,9 @@ types_bitstring_arithmetic_test_bitsmod() jlm::tests::GraphExport::Create(*smod0, "dummy"); jlm::tests::GraphExport::Create(*smod1, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*smod0)->GetOperation() == bitsmod_op(32)); assert(output::GetNode(*smod1)->GetOperation() == int_constant_op(32, -1)); @@ -339,8 +339,8 @@ types_bitstring_arithmetic_test_bitsquotient() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 7); - auto c1 = create_bitconstant(graph.root(), 32, -3); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 7); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, -3); auto squot0 = bitsdiv_op::create(32, s0, s1); auto squot1 = bitsdiv_op::create(32, c0, c1); @@ -348,9 +348,9 @@ types_bitstring_arithmetic_test_bitsquotient() jlm::tests::GraphExport::Create(*squot0, "dummy"); jlm::tests::GraphExport::Create(*squot1, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*squot0)->GetOperation() == bitsdiv_op(32)); assert(output::GetNode(*squot1)->GetOperation() == int_constant_op(32, -2)); @@ -368,8 +368,8 @@ types_bitstring_arithmetic_test_bitsum() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 3); - auto c1 = create_bitconstant(graph.root(), 32, 5); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); auto sum0 = bitadd_op::create(32, s0, s1); auto sum1 = bitadd_op::create(32, c0, c1); @@ -377,9 +377,9 @@ types_bitstring_arithmetic_test_bitsum() jlm::tests::GraphExport::Create(*sum0, "dummy"); jlm::tests::GraphExport::Create(*sum1, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*sum0)->GetOperation() == bitadd_op(32)); assert(output::GetNode(*sum1)->GetOperation() == int_constant_op(32, 8)); @@ -401,9 +401,9 @@ types_bitstring_arithmetic_test_bituhiproduct() jlm::tests::GraphExport::Create(*uhiproduct, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*uhiproduct)->GetOperation() == bitumulh_op(32)); @@ -420,8 +420,8 @@ types_bitstring_arithmetic_test_bitumod() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 7); - auto c1 = create_bitconstant(graph.root(), 32, 3); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 7); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 3); auto umod0 = bitumod_op::create(32, s0, s1); auto umod1 = bitumod_op::create(32, c0, c1); @@ -429,9 +429,9 @@ types_bitstring_arithmetic_test_bitumod() jlm::tests::GraphExport::Create(*umod0, "dummy"); jlm::tests::GraphExport::Create(*umod1, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*umod0)->GetOperation() == bitumod_op(32)); assert(output::GetNode(*umod1)->GetOperation() == int_constant_op(32, 1)); @@ -449,8 +449,8 @@ types_bitstring_arithmetic_test_bituquotient() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 7); - auto c1 = create_bitconstant(graph.root(), 32, 3); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 7); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 3); auto uquot0 = bitudiv_op::create(32, s0, s1); auto uquot1 = bitudiv_op::create(32, c0, c1); @@ -458,9 +458,9 @@ types_bitstring_arithmetic_test_bituquotient() jlm::tests::GraphExport::Create(*uquot0, "dummy"); jlm::tests::GraphExport::Create(*uquot1, "dummy"); - graph.normalize(); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*uquot0)->GetOperation() == bitudiv_op(32)); assert(output::GetNode(*uquot1)->GetOperation() == int_constant_op(32, 2)); @@ -478,8 +478,8 @@ types_bitstring_arithmetic_test_bitxor() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 3); - auto c1 = create_bitconstant(graph.root(), 32, 5); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); auto xor0 = bitxor_op::create(32, s0, s1); auto xor1 = bitxor_op::create(32, c0, c1); @@ -487,8 +487,8 @@ types_bitstring_arithmetic_test_bitxor() jlm::tests::GraphExport::Create(*xor0, "dummy"); jlm::tests::GraphExport::Create(*xor1, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*xor0)->GetOperation() == bitxor_op(32)); assert(output::GetNode(*xor1)->GetOperation() == int_constant_op(32, 6)); @@ -521,9 +521,9 @@ types_bitstring_comparison_test_bitequal() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant_undefined(graph.root(), 32); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant_undefined(&graph.GetRootRegion(), 32); auto equal0 = biteq_op::create(32, s0, s1); auto equal1 = biteq_op::create(32, c0, c0); @@ -535,8 +535,8 @@ types_bitstring_comparison_test_bitequal() jlm::tests::GraphExport::Create(*equal2, "dummy"); jlm::tests::GraphExport::Create(*equal3, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*equal0)->GetOperation() == biteq_op(32)); expect_static_true(equal1); @@ -555,9 +555,9 @@ types_bitstring_comparison_test_bitnotequal() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant_undefined(graph.root(), 32); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant_undefined(&graph.GetRootRegion(), 32); auto nequal0 = bitne_op::create(32, s0, s1); auto nequal1 = bitne_op::create(32, c0, c0); @@ -569,8 +569,8 @@ types_bitstring_comparison_test_bitnotequal() jlm::tests::GraphExport::Create(*nequal2, "dummy"); jlm::tests::GraphExport::Create(*nequal3, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*nequal0)->GetOperation() == bitne_op(32)); expect_static_false(nequal1); @@ -589,10 +589,10 @@ types_bitstring_comparison_test_bitsgreater() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); - auto c3 = create_bitconstant(graph.root(), 32, (-0x7fffffffL - 1)); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 0x7fffffffL); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, (-0x7fffffffL - 1)); auto sgreater0 = bitsgt_op::create(32, s0, s1); auto sgreater1 = bitsgt_op::create(32, c0, c1); @@ -606,8 +606,8 @@ types_bitstring_comparison_test_bitsgreater() jlm::tests::GraphExport::Create(*sgreater3, "dummy"); jlm::tests::GraphExport::Create(*sgreater4, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*sgreater0)->GetOperation() == bitsgt_op(32)); expect_static_false(sgreater1); @@ -627,10 +627,10 @@ types_bitstring_comparison_test_bitsgreatereq() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); - auto c3 = create_bitconstant(graph.root(), 32, (-0x7fffffffL - 1)); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 0x7fffffffL); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, (-0x7fffffffL - 1)); auto sgreatereq0 = bitsge_op::create(32, s0, s1); auto sgreatereq1 = bitsge_op::create(32, c0, c1); @@ -646,8 +646,8 @@ types_bitstring_comparison_test_bitsgreatereq() jlm::tests::GraphExport::Create(*sgreatereq4, "dummy"); jlm::tests::GraphExport::Create(*sgreatereq5, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*sgreatereq0)->GetOperation() == bitsge_op(32)); expect_static_false(sgreatereq1); @@ -668,10 +668,10 @@ types_bitstring_comparison_test_bitsless() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); - auto c3 = create_bitconstant(graph.root(), 32, (-0x7fffffffL - 1)); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 0x7fffffffL); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, (-0x7fffffffL - 1)); auto sless0 = bitslt_op::create(32, s0, s1); auto sless1 = bitslt_op::create(32, c0, c1); @@ -685,8 +685,8 @@ types_bitstring_comparison_test_bitsless() jlm::tests::GraphExport::Create(*sless3, "dummy"); jlm::tests::GraphExport::Create(*sless4, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*sless0)->GetOperation() == bitslt_op(32)); expect_static_true(sless1); @@ -706,10 +706,10 @@ types_bitstring_comparison_test_bitslesseq() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, 0x7fffffffL); - auto c3 = create_bitconstant(graph.root(), 32, (-0x7fffffffL - 1)); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, 0x7fffffffL); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, (-0x7fffffffL - 1)); auto slesseq0 = bitsle_op::create(32, s0, s1); auto slesseq1 = bitsle_op::create(32, c0, c1); @@ -725,8 +725,8 @@ types_bitstring_comparison_test_bitslesseq() jlm::tests::GraphExport::Create(*slesseq4, "dummy"); jlm::tests::GraphExport::Create(*slesseq5, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*slesseq0)->GetOperation() == bitsle_op(32)); expect_static_true(slesseq1); @@ -747,10 +747,10 @@ types_bitstring_comparison_test_bitugreater() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); - auto c3 = create_bitconstant(graph.root(), 32, 0); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, (0xffffffffUL)); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, 0); auto ugreater0 = bitugt_op::create(32, s0, s1); auto ugreater1 = bitugt_op::create(32, c0, c1); @@ -764,8 +764,8 @@ types_bitstring_comparison_test_bitugreater() jlm::tests::GraphExport::Create(*ugreater3, "dummy"); jlm::tests::GraphExport::Create(*ugreater4, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*ugreater0)->GetOperation() == bitugt_op(32)); expect_static_false(ugreater1); @@ -785,10 +785,10 @@ types_bitstring_comparison_test_bitugreatereq() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); - auto c3 = create_bitconstant(graph.root(), 32, 0); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, (0xffffffffUL)); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, 0); auto ugreatereq0 = bituge_op::create(32, s0, s1); auto ugreatereq1 = bituge_op::create(32, c0, c1); @@ -804,8 +804,8 @@ types_bitstring_comparison_test_bitugreatereq() jlm::tests::GraphExport::Create(*ugreatereq4, "dummy"); jlm::tests::GraphExport::Create(*ugreatereq5, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*ugreatereq0)->GetOperation() == bituge_op(32)); expect_static_false(ugreatereq1); @@ -826,10 +826,10 @@ types_bitstring_comparison_test_bituless() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); - auto c3 = create_bitconstant(graph.root(), 32, 0); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, (0xffffffffUL)); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, 0); auto uless0 = bitult_op::create(32, s0, s1); auto uless1 = bitult_op::create(32, c0, c1); @@ -843,8 +843,8 @@ types_bitstring_comparison_test_bituless() jlm::tests::GraphExport::Create(*uless3, "dummy"); jlm::tests::GraphExport::Create(*uless4, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*uless0)->GetOperation() == bitult_op(32)); expect_static_true(uless1); @@ -864,10 +864,10 @@ types_bitstring_comparison_test_bitulesseq() auto s0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "s1"); - auto c0 = create_bitconstant(graph.root(), 32, 4); - auto c1 = create_bitconstant(graph.root(), 32, 5); - auto c2 = create_bitconstant(graph.root(), 32, (0xffffffffUL)); - auto c3 = create_bitconstant(graph.root(), 32, 0); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 4); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 5); + auto c2 = create_bitconstant(&graph.GetRootRegion(), 32, (0xffffffffUL)); + auto c3 = create_bitconstant(&graph.GetRootRegion(), 32, 0); auto ulesseq0 = bitule_op::create(32, s0, s1); auto ulesseq1 = bitule_op::create(32, c0, c1); @@ -883,8 +883,8 @@ types_bitstring_comparison_test_bitulesseq() jlm::tests::GraphExport::Create(*ulesseq4, "dummy"); jlm::tests::GraphExport::Create(*ulesseq5, "dummy"); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + graph.PruneNodes(); + view(&graph.GetRootRegion(), stdout); assert(output::GetNode(*ulesseq0)->GetOperation() == bitule_op(32)); expect_static_true(ulesseq1); @@ -931,10 +931,10 @@ types_bitstring_test_constant() Graph graph; - auto b1 = output::GetNode(*create_bitconstant(graph.root(), "00110011")); - auto b2 = output::GetNode(*create_bitconstant(graph.root(), 8, 204)); - auto b3 = output::GetNode(*create_bitconstant(graph.root(), 8, 204)); - auto b4 = output::GetNode(*create_bitconstant(graph.root(), "001100110")); + auto b1 = output::GetNode(*create_bitconstant(&graph.GetRootRegion(), "00110011")); + auto b2 = output::GetNode(*create_bitconstant(&graph.GetRootRegion(), 8, 204)); + auto b3 = output::GetNode(*create_bitconstant(&graph.GetRootRegion(), 8, 204)); + auto b4 = output::GetNode(*create_bitconstant(&graph.GetRootRegion(), "001100110")); assert(b1->GetOperation() == uint_constant_op(8, 204)); assert(b1->GetOperation() == int_constant_op(8, -52)); @@ -948,14 +948,15 @@ types_bitstring_test_constant() assert(b4->GetOperation() == uint_constant_op(9, 204)); assert(b4->GetOperation() == int_constant_op(9, 204)); - auto plus_one_128 = output::GetNode(*create_bitconstant(graph.root(), ONE_64 ZERO_64)); + auto plus_one_128 = output::GetNode(*create_bitconstant(&graph.GetRootRegion(), ONE_64 ZERO_64)); assert(plus_one_128->GetOperation() == uint_constant_op(128, 1)); assert(plus_one_128->GetOperation() == int_constant_op(128, 1)); - auto minus_one_128 = output::GetNode(*create_bitconstant(graph.root(), MONE_64 MONE_64)); + auto minus_one_128 = + output::GetNode(*create_bitconstant(&graph.GetRootRegion(), MONE_64 MONE_64)); assert(minus_one_128->GetOperation() == int_constant_op(128, -1)); - jlm::rvsdg::view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); return 0; } @@ -970,10 +971,10 @@ types_bitstring_test_normalize() bittype bits32(32); auto imp = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), "imp"); - auto c0 = create_bitconstant(graph.root(), 32, 3); - auto c1 = create_bitconstant(graph.root(), 32, 4); + auto c0 = create_bitconstant(&graph.GetRootRegion(), 32, 3); + auto c1 = create_bitconstant(&graph.GetRootRegion(), 32, 4); - auto sum_nf = graph.node_normal_form(typeid(bitadd_op)); + auto sum_nf = graph.GetNodeNormalForm(typeid(bitadd_op)); assert(sum_nf); sum_nf->set_mutable(false); @@ -988,8 +989,8 @@ types_bitstring_test_normalize() auto & exp = jlm::tests::GraphExport::Create(*sum1->output(0), "dummy"); sum_nf->set_mutable(true); - graph.normalize(); - graph.prune(); + graph.Normalize(); + graph.PruneNodes(); auto origin = dynamic_cast(exp.origin()); assert(origin->node()->GetOperation() == bitadd_op(32)); @@ -1006,7 +1007,7 @@ types_bitstring_test_normalize() assert(output::GetNode(*op1)->GetOperation() == int_constant_op(32, 3 + 4)); assert(op2 == imp); - jlm::rvsdg::view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); return 0; } @@ -1026,8 +1027,8 @@ types_bitstring_test_reduction() Graph graph; - auto a = create_bitconstant(graph.root(), "1100"); - auto b = create_bitconstant(graph.root(), "1010"); + auto a = create_bitconstant(&graph.GetRootRegion(), "1100"); + auto b = create_bitconstant(&graph.GetRootRegion(), "1010"); assert_constant(bitand_op::create(4, a, b), 4, "1000"); assert_constant(bitor_op::create(4, a, b), 4, "1110"); @@ -1038,7 +1039,7 @@ types_bitstring_test_reduction() assert_constant(bitneg_op::create(4, a), 4, "1011"); assert_constant(bitneg_op::create(4, b), 4, "1101"); - graph.prune(); + graph.PruneNodes(); auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(16), "x"); auto y = &jlm::tests::GraphImport::Create(graph, bittype::Create(16), "y"); @@ -1080,8 +1081,8 @@ types_bitstring_test_slice_concat() Graph graph; - auto base_const1 = create_bitconstant(graph.root(), "00110111"); - auto base_const2 = create_bitconstant(graph.root(), "11001000"); + auto base_const1 = create_bitconstant(&graph.GetRootRegion(), "00110111"); + auto base_const2 = create_bitconstant(&graph.GetRootRegion(), "11001000"); auto base_x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); auto base_y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); @@ -1161,7 +1162,7 @@ types_bitstring_test_slice_concat() { /* CSE */ - auto b = create_bitconstant(graph.root(), "00110111"); + auto b = create_bitconstant(&graph.GetRootRegion(), "00110111"); assert(b == base_const1); auto c = jlm::rvsdg::bitslice(base_x, 2, 6); diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index 792a0eaaf..a0f824952 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -95,21 +95,22 @@ FlattenedBinaryReduction() auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i3 = &jlm::tests::GraphImport::Create(graph, vt, ""); - auto o1 = SimpleNode::create_normalized(graph.root(), op, { i0, i1 })[0]; - auto o2 = SimpleNode::create_normalized(graph.root(), op, { o1, i2 })[0]; - auto o3 = SimpleNode::create_normalized(graph.root(), op, { o2, i3 })[0]; + auto o1 = SimpleNode::create_normalized(&graph.GetRootRegion(), op, { i0, i1 })[0]; + auto o2 = SimpleNode::create_normalized(&graph.GetRootRegion(), op, { o1, i2 })[0]; + auto o3 = SimpleNode::create_normalized(&graph.GetRootRegion(), op, { o2, i3 })[0]; auto & ex = jlm::tests::GraphExport::Create(*o3, ""); - graph.prune(); + graph.PruneNodes(); jlm::rvsdg::view(graph, stdout); assert( - graph.root()->nnodes() == 1 && Region::Contains(*graph.root(), false)); + graph.GetRootRegion().nnodes() == 1 + && Region::Contains(graph.GetRootRegion(), false)); flattened_binary_op::reduce(&graph, jlm::rvsdg::flattened_binary_op::reduction::parallel); jlm::rvsdg::view(graph, stdout); - assert(graph.root()->nnodes() == 3); + assert(graph.GetRootRegion().nnodes() == 3); auto node0 = output::GetNode(*ex.origin()); assert(is(node0)); @@ -129,21 +130,22 @@ FlattenedBinaryReduction() auto i2 = &jlm::tests::GraphImport::Create(graph, vt, ""); auto i3 = &jlm::tests::GraphImport::Create(graph, vt, ""); - auto o1 = SimpleNode::create_normalized(graph.root(), op, { i0, i1 })[0]; - auto o2 = SimpleNode::create_normalized(graph.root(), op, { o1, i2 })[0]; - auto o3 = SimpleNode::create_normalized(graph.root(), op, { o2, i3 })[0]; + auto o1 = SimpleNode::create_normalized(&graph.GetRootRegion(), op, { i0, i1 })[0]; + auto o2 = SimpleNode::create_normalized(&graph.GetRootRegion(), op, { o1, i2 })[0]; + auto o3 = SimpleNode::create_normalized(&graph.GetRootRegion(), op, { o2, i3 })[0]; auto & ex = jlm::tests::GraphExport::Create(*o3, ""); - graph.prune(); + graph.PruneNodes(); jlm::rvsdg::view(graph, stdout); assert( - graph.root()->nnodes() == 1 && Region::Contains(*graph.root(), false)); + graph.GetRootRegion().nnodes() == 1 + && Region::Contains(graph.GetRootRegion(), false)); flattened_binary_op::reduce(&graph, jlm::rvsdg::flattened_binary_op::reduction::linear); jlm::rvsdg::view(graph, stdout); - assert(graph.root()->nnodes() == 3); + assert(graph.GetRootRegion().nnodes() == 3); auto node0 = output::GetNode(*ex.origin()); assert(is(node0)); @@ -174,8 +176,8 @@ FlattenAssociativeBinaryOperation_NotAssociativeBinary() auto i2 = &jlm::tests::GraphImport::Create(graph, valueType, "i2"); jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::none); - auto o1 = SimpleNode::create(graph.root(), binaryOperation, { i0, i1 }); - auto o2 = SimpleNode::create(graph.root(), binaryOperation, { o1->output(0), i2 }); + auto o1 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { i0, i1 }); + auto o2 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { o1->output(0), i2 }); auto & ex = jlm::tests::GraphExport::Create(*o2->output(0), "o2"); @@ -212,9 +214,10 @@ FlattenAssociativeBinaryOperation_NoNewOperands() jlm::tests::unary_op unaryOperation(valueType, valueType); jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); - auto u1 = SimpleNode::create(graph.root(), unaryOperation, { i0 }); - auto u2 = SimpleNode::create(graph.root(), unaryOperation, { i1 }); - auto b2 = SimpleNode::create(graph.root(), binaryOperation, { u1->output(0), u2->output(0) }); + auto u1 = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { i0 }); + auto u2 = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { i1 }); + auto b2 = + SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { u1->output(0), u2->output(0) }); auto & ex = jlm::tests::GraphExport::Create(*b2->output(0), "o2"); @@ -251,8 +254,8 @@ FlattenAssociativeBinaryOperation_Success() auto i2 = &jlm::tests::GraphImport::Create(graph, valueType, "i2"); jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); - auto o1 = SimpleNode::create(graph.root(), binaryOperation, { i0, i1 }); - auto o2 = SimpleNode::create(graph.root(), binaryOperation, { o1->output(0), i2 }); + auto o1 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { i0, i1 }); + auto o2 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { o1->output(0), i2 }); auto & ex = jlm::tests::GraphExport::Create(*o2->output(0), "o2"); @@ -290,7 +293,7 @@ NormalizeBinaryOperation_NoNewOperands() auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); - auto o1 = SimpleNode::create(graph.root(), binaryOperation, { i0, i1 }); + auto o1 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { i0, i1 }); auto & ex = jlm::tests::GraphExport::Create(*o1->output(0), "o2"); @@ -327,10 +330,11 @@ NormalizeBinaryOperation_SingleOperand() auto s0 = &jlm::tests::GraphImport::Create(graph, valueType, "s0"); auto s1 = &jlm::tests::GraphImport::Create(graph, valueType, "s1"); - auto u1 = SimpleNode::create(graph.root(), unaryOperation, { s0 }); - auto u2 = SimpleNode::create(graph.root(), unaryOperation, { s1 }); + auto u1 = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { s0 }); + auto u2 = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { s1 }); - auto o1 = SimpleNode::create(graph.root(), binaryOperation, { u1->output(0), u2->output(0) }); + auto o1 = + SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { u1->output(0), u2->output(0) }); auto & ex = jlm::tests::GraphExport::Create(*o1->output(0), "ex"); diff --git a/tests/jlm/rvsdg/test-bottomup.cpp b/tests/jlm/rvsdg/test-bottomup.cpp index 3f86e8dac..393fee3bb 100644 --- a/tests/jlm/rvsdg/test-bottomup.cpp +++ b/tests/jlm/rvsdg/test-bottomup.cpp @@ -14,14 +14,14 @@ test_initialization() { jlm::rvsdg::Graph graph; auto vtype = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, {}); - auto n2 = jlm::tests::test_op::create(graph.root(), {}, { vtype }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, {}); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { vtype }); jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); bool n1_visited = false; bool n2_visited = false; - for (const auto & node : jlm::rvsdg::bottomup_traverser(graph.root())) + for (const auto & node : jlm::rvsdg::bottomup_traverser(&graph.GetRootRegion())) { if (node == n1) n1_visited = true; @@ -38,14 +38,17 @@ test_basic_traversal() { jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type, type }); + auto n2 = jlm::tests::test_op::create( + &graph.GetRootRegion(), + { n1->output(0), n1->output(1) }, + { type }); jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { jlm::rvsdg::Node * tmp; - jlm::rvsdg::bottomup_traverser trav(graph.root()); + jlm::rvsdg::bottomup_traverser trav(&graph.GetRootRegion()); tmp = trav.next(); assert(tmp == n2); tmp = trav.next(); @@ -62,13 +65,16 @@ test_order_enforcement_traversal() { jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); - auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0), n1->output(1) }, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type, type }); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n1->output(0) }, { type }); + auto n3 = jlm::tests::test_op::create( + &graph.GetRootRegion(), + { n2->output(0), n1->output(1) }, + { type }); jlm::rvsdg::Node * tmp; { - jlm::rvsdg::bottomup_traverser trav(graph.root()); + jlm::rvsdg::bottomup_traverser trav(&graph.GetRootRegion()); tmp = trav.next(); assert(tmp == n3); diff --git a/tests/jlm/rvsdg/test-cse.cpp b/tests/jlm/rvsdg/test-cse.cpp index 929f7fbd4..566aab388 100644 --- a/tests/jlm/rvsdg/test-cse.cpp +++ b/tests/jlm/rvsdg/test-cse.cpp @@ -17,39 +17,39 @@ test_main() Graph graph; auto i = &jlm::tests::GraphImport::Create(graph, t, "i"); - auto o1 = jlm::tests::test_op::create(graph.root(), {}, { t })->output(0); - auto o2 = jlm::tests::test_op::create(graph.root(), { i }, { t })->output(0); + auto o1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { t })->output(0); + auto o2 = jlm::tests::test_op::create(&graph.GetRootRegion(), { i }, { t })->output(0); auto & e1 = jlm::tests::GraphExport::Create(*o1, "o1"); auto & e2 = jlm::tests::GraphExport::Create(*o2, "o2"); auto nf = dynamic_cast( - graph.node_normal_form(typeid(jlm::tests::test_op))); + graph.GetNodeNormalForm(typeid(jlm::tests::test_op))); nf->set_mutable(false); - auto o3 = jlm::tests::create_testop(graph.root(), {}, { t })[0]; - auto o4 = jlm::tests::create_testop(graph.root(), { i }, { t })[0]; + auto o3 = jlm::tests::create_testop(&graph.GetRootRegion(), {}, { t })[0]; + auto o4 = jlm::tests::create_testop(&graph.GetRootRegion(), { i }, { t })[0]; auto & e3 = jlm::tests::GraphExport::Create(*o3, "o3"); auto & e4 = jlm::tests::GraphExport::Create(*o4, "o4"); nf->set_mutable(true); - graph.normalize(); + graph.Normalize(); assert(e1.origin() == e3.origin()); assert(e2.origin() == e4.origin()); - auto o5 = jlm::tests::create_testop(graph.root(), {}, { t })[0]; + auto o5 = jlm::tests::create_testop(&graph.GetRootRegion(), {}, { t })[0]; assert(o5 == e1.origin()); - auto o6 = jlm::tests::create_testop(graph.root(), { i }, { t })[0]; + auto o6 = jlm::tests::create_testop(&graph.GetRootRegion(), { i }, { t })[0]; assert(o6 == e2.origin()); nf->set_cse(false); - auto o7 = jlm::tests::create_testop(graph.root(), {}, { t })[0]; + auto o7 = jlm::tests::create_testop(&graph.GetRootRegion(), {}, { t })[0]; assert(o7 != e1.origin()); - graph.normalize(); + graph.Normalize(); assert(o7 != e1.origin()); return 0; diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 8b20a5b87..73fb12450 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -37,8 +37,9 @@ test_gamma() /* test gamma copy */ - auto gamma2 = static_cast(gamma)->copy(graph.root(), { pred, v0, v1, v2 }); - view(graph.root(), stdout); + auto gamma2 = + static_cast(gamma)->copy(&graph.GetRootRegion(), { pred, v0, v1, v2 }); + view(&graph.GetRootRegion(), stdout); assert(is(gamma2)); /* test entry and exit variable iterators */ @@ -63,7 +64,7 @@ test_predicate_reduction() auto v1 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); auto v2 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); - auto pred = control_constant(graph.root(), 3, 1); + auto pred = control_constant(&graph.GetRootRegion(), 3, 1); auto gamma = GammaNode::create(pred, 3); auto ev0 = gamma->AddEntryVar(v0); @@ -73,18 +74,18 @@ test_predicate_reduction() auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Act auto gammaNode = TryGetOwnerNode(*r.origin()); ReduceGammaWithStaticallyKnownPredicate(*gammaNode); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Assert assert(r.origin() == v1); - graph.prune(); - assert(graph.root()->nnodes() == 0); + graph.PruneNodes(); + assert(graph.GetRootRegion().nnodes() == 0); } static void @@ -106,12 +107,12 @@ test_invariant_reduction() auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); - graph.normalize(); - // jlm::rvsdg::view(graph.root(), stdout); + graph.Normalize(); + // jlm::rvsdg::view(graph.GetRootRegion(), stdout); assert(r.origin() == v); - graph.prune(); - assert(graph.root()->nnodes() == 0); + graph.PruneNodes(); + assert(graph.GetRootRegion().nnodes() == 0); } static void @@ -141,12 +142,12 @@ test_control_constant_reduction() auto & ex1 = jlm::tests::GraphExport::Create(*xv1.output, ""); auto & ex2 = jlm::tests::GraphExport::Create(*xv2.output, ""); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Act auto gammaNode = TryGetOwnerNode(*ex1.origin()); ReduceGammaControlConstant(*gammaNode); - view(graph.root(), stdout); + view(&graph.GetRootRegion(), stdout); // Assert auto match = output::GetNode(*ex1.origin()); @@ -181,12 +182,12 @@ test_control_constant_reduction2() auto & ex = jlm::tests::GraphExport::Create(*xv.output, ""); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Act auto gammaNode = TryGetOwnerNode(*ex.origin()); ReduceGammaControlConstant(*gammaNode); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); // Assert auto match = output::GetNode(*ex.origin()); diff --git a/tests/jlm/rvsdg/test-graph.cpp b/tests/jlm/rvsdg/test-graph.cpp index c959a5d6e..a3ef47a24 100644 --- a/tests/jlm/rvsdg/test-graph.cpp +++ b/tests/jlm/rvsdg/test-graph.cpp @@ -33,10 +33,10 @@ test_recursive_prune() Graph graph; auto imp = &jlm::tests::GraphImport::Create(graph, t, "i"); - auto n1 = jlm::tests::test_op::create(graph.root(), { imp }, { t }); - auto n2 = jlm::tests::test_op::create(graph.root(), { imp }, { t }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), { imp }, { t }); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), { imp }, { t }); - auto n3 = jlm::tests::structural_node::create(graph.root(), 1); + auto n3 = jlm::tests::structural_node::create(&graph.GetRootRegion(), 1); StructuralInput::create(n3, imp, t); auto & a1 = TestGraphArgument::Create(*n3->subregion(0), nullptr, t); auto n4 = jlm::tests::test_op::create(n3->subregion(0), { &a1 }, { t }); @@ -49,13 +49,13 @@ test_recursive_prune() jlm::tests::GraphExport::Create(*n2->output(0), "n2"); jlm::tests::GraphExport::Create(*o1, "n3"); - jlm::rvsdg::view(graph.root(), stdout); - graph.prune(); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); + graph.PruneNodes(); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - assert(!region_contains_node(graph.root(), n1)); - assert(region_contains_node(graph.root(), n2)); - assert(region_contains_node(graph.root(), n3)); + assert(!region_contains_node(&graph.GetRootRegion(), n1)); + assert(region_contains_node(&graph.GetRootRegion(), n2)); + assert(region_contains_node(&graph.GetRootRegion(), n3)); assert(region_contains_node(n3->subregion(0), n4)); assert(!region_contains_node(n3->subregion(0), n5)); assert(!region_contains_node(n3->subregion(0), n6)); @@ -70,13 +70,13 @@ test_empty_graph_pruning() { jlm::rvsdg::Graph graph; - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - graph.prune(); + graph.PruneNodes(); - assert(graph.root()->nnodes() == 0); + assert(graph.GetRootRegion().nnodes() == 0); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); return 0; } @@ -91,21 +91,21 @@ test_prune_replace() auto type = jlm::tests::valuetype::Create(); Graph graph; - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); - auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0) }, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type }); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n1->output(0) }, { type }); + auto n3 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n2->output(0) }, { type }); jlm::tests::GraphExport::Create(*n2->output(0), "n2"); jlm::tests::GraphExport::Create(*n3->output(0), "n3"); - auto n4 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); + auto n4 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n1->output(0) }, { type }); n2->output(0)->divert_users(n4->output(0)); assert(n2->output(0)->nusers() == 0); - graph.prune(); + graph.PruneNodes(); - assert(!region_contains_node(graph.root(), n2)); + assert(!region_contains_node(&graph.GetRootRegion(), n2)); return 0; } @@ -121,11 +121,11 @@ test_graph() Graph graph; - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type }); assert(n1); assert(n1->depth() == 0); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, {}); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n1->output(0) }, {}); assert(n2); assert(n2->depth() == 1); @@ -144,25 +144,25 @@ Copy() auto valueType = jlm::tests::valuetype::Create(); Graph graph; - auto & argument = TestGraphArgument::Create(*graph.root(), nullptr, valueType); - auto node = test_op::create(graph.root(), { &argument }, { valueType }); + auto & argument = TestGraphArgument::Create(graph.GetRootRegion(), nullptr, valueType); + auto node = test_op::create(&graph.GetRootRegion(), { &argument }, { valueType }); TestGraphResult::Create(*node->output(0), nullptr); // Act - auto newGraph = graph.copy(); + auto newGraph = graph.Copy(); // Assert - assert(newGraph->root()->narguments() == 1); - auto copiedArgument = newGraph->root()->argument(0); + assert(newGraph->GetRootRegion().narguments() == 1); + auto copiedArgument = newGraph->GetRootRegion().argument(0); assert(is(copiedArgument)); - assert(newGraph->root()->nnodes() == 1); - auto copiedNode = newGraph->root()->Nodes().begin().ptr(); + assert(newGraph->GetRootRegion().nnodes() == 1); + auto copiedNode = newGraph->GetRootRegion().Nodes().begin().ptr(); assert(copiedNode->ninputs() == 1 && copiedNode->noutputs() == 1); assert(copiedNode->input(0)->origin() == copiedArgument); - assert(newGraph->root()->nresults() == 1); - auto copiedResult = newGraph->root()->result(0); + assert(newGraph->GetRootRegion().nresults() == 1); + auto copiedResult = newGraph->GetRootRegion().result(0); assert(is(*copiedResult)); assert(copiedResult->origin() == copiedNode->output(0)); diff --git a/tests/jlm/rvsdg/test-nodes.cpp b/tests/jlm/rvsdg/test-nodes.cpp index 5856577cb..66072d48a 100644 --- a/tests/jlm/rvsdg/test-nodes.cpp +++ b/tests/jlm/rvsdg/test-nodes.cpp @@ -22,7 +22,7 @@ test_node_copy() auto s = &jlm::tests::GraphImport::Create(graph, stype, ""); auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); - auto n1 = jlm::tests::structural_node::create(graph.root(), 3); + auto n1 = jlm::tests::structural_node::create(&graph.GetRootRegion(), 3); auto i1 = StructuralInput::create(n1, s, stype); auto i2 = StructuralInput::create(n1, v, vtype); auto o1 = StructuralOutput::create(n1, stype); @@ -37,7 +37,7 @@ test_node_copy() TestGraphResult::Create(*n2->output(0), o1); TestGraphResult::Create(*n3->output(0), o2); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); /* copy first into second region with arguments and results */ SubstitutionMap smap; @@ -47,7 +47,7 @@ test_node_copy() smap.insert(o2, o2); n1->subregion(0)->copy(n1->subregion(1), smap, true, true); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); auto r2 = n1->subregion(1); assert(r2->narguments() == 2); @@ -71,7 +71,7 @@ test_node_copy() smap2.insert(o2, o2); n1->subregion(1)->copy(n1->subregion(2), smap2, false, true); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); auto r3 = n1->subregion(2); assert(r3->nresults() == 2); @@ -84,11 +84,11 @@ test_node_copy() jlm::rvsdg::SubstitutionMap smap3; smap3.insert(s, s); smap3.insert(v, v); - n1->copy(graph.root(), smap3); + n1->copy(&graph.GetRootRegion(), smap3); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - assert(graph.root()->nnodes() == 2); + assert(graph.GetRootRegion().nnodes() == 2); } static inline void @@ -99,13 +99,13 @@ test_node_depth() jlm::rvsdg::Graph graph; auto x = &jlm::tests::GraphImport::Create(graph, vt, "x"); - auto null = jlm::tests::test_op::create(graph.root(), {}, { vt }); - auto bin = jlm::tests::test_op::create(graph.root(), { null->output(0), x }, { vt }); - auto un = jlm::tests::test_op::create(graph.root(), { bin->output(0) }, { vt }); + auto null = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { vt }); + auto bin = jlm::tests::test_op::create(&graph.GetRootRegion(), { null->output(0), x }, { vt }); + auto un = jlm::tests::test_op::create(&graph.GetRootRegion(), { bin->output(0) }, { vt }); jlm::tests::GraphExport::Create(*un->output(0), "x"); - jlm::rvsdg::view(graph.root(), stdout); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); assert(null->depth() == 0); assert(bin->depth() == 1); @@ -126,13 +126,17 @@ TestRemoveOutputsWhere() jlm::rvsdg::Graph rvsdg; auto valueType = jlm::tests::valuetype::Create(); - auto & node1 = - jlm::tests::SimpleNode::Create(*rvsdg.root(), {}, { valueType, valueType, valueType }); + auto & node1 = jlm::tests::SimpleNode::Create( + rvsdg.GetRootRegion(), + {}, + { valueType, valueType, valueType }); auto output0 = node1.output(0); auto output2 = node1.output(2); - auto & node2 = - jlm::tests::SimpleNode::Create(*rvsdg.root(), { output0, output2 }, { valueType, valueType }); + auto & node2 = jlm::tests::SimpleNode::Create( + rvsdg.GetRootRegion(), + { output0, output2 }, + { valueType, valueType }); // Act & Assert node2.RemoveOutputsWhere( @@ -190,7 +194,7 @@ TestRemoveInputsWhere() auto valueType = jlm::tests::valuetype::Create(); auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); - auto & node = jlm::tests::SimpleNode::Create(*rvsdg.root(), { x, x, x }, {}); + auto & node = jlm::tests::SimpleNode::Create(rvsdg.GetRootRegion(), { x, x, x }, {}); auto input0 = node.input(0); auto input2 = node.input(2); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index 0f4658bef..e2b190122 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -22,7 +22,7 @@ TestThetaCreation() auto imp2 = &jlm::tests::GraphImport::Create(graph, t, "imp2"); auto imp3 = &jlm::tests::GraphImport::Create(graph, t, "imp3"); - auto theta = jlm::rvsdg::ThetaNode::create(graph.root()); + auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto lv1 = theta->add_loopvar(imp1); auto lv2 = theta->add_loopvar(imp2); @@ -33,9 +33,10 @@ TestThetaCreation() theta->set_predicate(lv1->argument()); jlm::tests::GraphExport::Create(*theta->output(0), "exp"); - auto theta2 = - static_cast(theta)->copy(graph.root(), { imp1, imp2, imp3 }); - jlm::rvsdg::view(graph.root(), stdout); + auto theta2 = static_cast(theta)->copy( + &graph.GetRootRegion(), + { imp1, imp2, imp3 }); + jlm::rvsdg::view(&graph.GetRootRegion(), stdout); assert(lv1->node() == theta); assert(lv2->node() == theta); @@ -61,7 +62,7 @@ TestRemoveThetaOutputsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = ThetaNode::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); auto thetaOutput1 = thetaNode->add_loopvar(x); @@ -111,7 +112,7 @@ TestPruneThetaOutputs() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = ThetaNode::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); thetaNode->add_loopvar(x); @@ -146,7 +147,7 @@ TestRemoveThetaInputsWhere() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = ThetaNode::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); auto thetaOutput1 = thetaNode->add_loopvar(x); @@ -202,7 +203,7 @@ TestPruneThetaInputs() auto x = &jlm::tests::GraphImport::Create(rvsdg, valueType, "x"); auto y = &jlm::tests::GraphImport::Create(rvsdg, valueType, "y"); - auto thetaNode = ThetaNode::create(rvsdg.root()); + auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); auto thetaOutput0 = thetaNode->add_loopvar(ctl); auto thetaOutput1 = thetaNode->add_loopvar(x); diff --git a/tests/jlm/rvsdg/test-topdown.cpp b/tests/jlm/rvsdg/test-topdown.cpp index 64575f88a..7d07307a6 100644 --- a/tests/jlm/rvsdg/test-topdown.cpp +++ b/tests/jlm/rvsdg/test-topdown.cpp @@ -17,9 +17,10 @@ test_initialization() jlm::rvsdg::Graph graph; auto i = &jlm::tests::GraphImport::Create(graph, vtype, "i"); - auto constant = jlm::tests::test_op::create(graph.root(), {}, { vtype }); - auto unary = jlm::tests::test_op::create(graph.root(), { i }, { vtype }); - auto binary = jlm::tests::test_op::create(graph.root(), { i, unary->output(0) }, { vtype }); + auto constant = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { vtype }); + auto unary = jlm::tests::test_op::create(&graph.GetRootRegion(), { i }, { vtype }); + auto binary = + jlm::tests::test_op::create(&graph.GetRootRegion(), { i, unary->output(0) }, { vtype }); jlm::tests::GraphExport::Create(*constant->output(0), "c"); jlm::tests::GraphExport::Create(*unary->output(0), "u"); @@ -28,7 +29,7 @@ test_initialization() bool unary_visited = false; bool binary_visited = false; bool constant_visited = false; - for (const auto & node : jlm::rvsdg::topdown_traverser(graph.root())) + for (const auto & node : jlm::rvsdg::topdown_traverser(&graph.GetRootRegion())) { if (node == unary) unary_visited = true; @@ -49,14 +50,17 @@ test_basic_traversal() jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type, type }); + auto n2 = jlm::tests::test_op::create( + &graph.GetRootRegion(), + { n1->output(0), n1->output(1) }, + { type }); jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { jlm::rvsdg::Node * tmp; - jlm::rvsdg::topdown_traverser trav(graph.root()); + jlm::rvsdg::topdown_traverser trav(&graph.GetRootRegion()); tmp = trav.next(); assert(tmp == n1); @@ -75,13 +79,16 @@ test_order_enforcement_traversal() jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, { type }); - auto n3 = jlm::tests::test_op::create(graph.root(), { n2->output(0), n1->output(1) }, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type, type }); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n1->output(0) }, { type }); + auto n3 = jlm::tests::test_op::create( + &graph.GetRootRegion(), + { n2->output(0), n1->output(1) }, + { type }); { jlm::rvsdg::Node * tmp; - jlm::rvsdg::topdown_traverser trav(graph.root()); + jlm::rvsdg::topdown_traverser trav(&graph.GetRootRegion()); tmp = trav.next(); assert(tmp == n1); @@ -102,23 +109,26 @@ test_traversal_insertion() jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type, type }); - auto n2 = jlm::tests::test_op::create(graph.root(), { n1->output(0), n1->output(1) }, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type, type }); + auto n2 = jlm::tests::test_op::create( + &graph.GetRootRegion(), + { n1->output(0), n1->output(1) }, + { type }); jlm::tests::GraphExport::Create(*n2->output(0), "dummy"); { jlm::rvsdg::Node * node; - jlm::rvsdg::topdown_traverser trav(graph.root()); + jlm::rvsdg::topdown_traverser trav(&graph.GetRootRegion()); node = trav.next(); assert(node == n1); /* At this point, n1 has been visited, now create some nodes */ - auto n3 = jlm::tests::test_op::create(graph.root(), {}, { type }); - auto n4 = jlm::tests::test_op::create(graph.root(), { n3->output(0) }, {}); - auto n5 = jlm::tests::test_op::create(graph.root(), { n2->output(0) }, {}); + auto n3 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type }); + auto n4 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n3->output(0) }, {}); + auto n5 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n2->output(0) }, {}); /* The newly created nodes n3 and n4 will not be visited, @@ -163,7 +173,7 @@ test_mutable_traverse() bool seen_n2 = false; bool seen_n3 = false; - for (const auto & tmp : jlm::rvsdg::topdown_traverser(graph->root())) + for (const auto & tmp : jlm::rvsdg::topdown_traverser(&graph->GetRootRegion())) { seen_n1 = seen_n1 || (tmp == n1); seen_n2 = seen_n2 || (tmp == n2); @@ -181,9 +191,9 @@ test_mutable_traverse() jlm::rvsdg::Graph graph; auto type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); - auto n2 = jlm::tests::test_op::create(graph.root(), {}, { type }); - auto n3 = jlm::tests::test_op::create(graph.root(), { n1->output(0) }, {}); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type }); + auto n2 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type }); + auto n3 = jlm::tests::test_op::create(&graph.GetRootRegion(), { n1->output(0) }, {}); test(&graph, n1, n2, n3); test(&graph, n1, n2, n3); diff --git a/tests/jlm/rvsdg/test-typemismatch.cpp b/tests/jlm/rvsdg/test-typemismatch.cpp index db6d79101..f38458897 100644 --- a/tests/jlm/rvsdg/test-typemismatch.cpp +++ b/tests/jlm/rvsdg/test-typemismatch.cpp @@ -18,12 +18,12 @@ test_main() auto type = jlm::tests::statetype::Create(); auto value_type = jlm::tests::valuetype::Create(); - auto n1 = jlm::tests::test_op::create(graph.root(), {}, { type }); + auto n1 = jlm::tests::test_op::create(&graph.GetRootRegion(), {}, { type }); bool error_handler_called = false; try { - jlm::tests::test_op::Create(graph.root(), { value_type }, { n1->output(0) }, {}); + jlm::tests::test_op::Create(&graph.GetRootRegion(), { value_type }, { n1->output(0) }, {}); } catch (jlm::util::type_error & e) { diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index 52b390d96..a0f7d81da 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -157,7 +157,7 @@ structural_node::~structural_node() structural_node * structural_node::copy(rvsdg::Region * parent, rvsdg::SubstitutionMap & smap) const { - graph()->mark_denormalized(); + graph()->MarkDenormalized(); auto node = structural_node::create(parent, nsubregions()); /* copy inputs */ diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index eee6315ab..a388d48f8 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -38,7 +38,7 @@ class GraphImport final : public rvsdg::GraphImport Create(rvsdg::Graph & graph, std::shared_ptr type, std::string name) { auto graphImport = new GraphImport(graph, std::move(type), std::move(name)); - graph.root()->append_argument(graphImport); + graph.GetRootRegion().append_argument(graphImport); return *graphImport; } }; @@ -61,7 +61,7 @@ class GraphExport final : public rvsdg::GraphExport Create(rvsdg::output & origin, std::string name) { auto graphExport = new GraphExport(origin, std::move(name)); - origin.region()->graph()->root()->append_result(graphExport); + origin.region()->graph()->GetRootRegion().append_result(graphExport); return *graphExport; } }; From 087e4af3adef7572a847ebf698aa7ec9aaa16182 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 1 Jan 2025 20:15:50 +0100 Subject: [PATCH 149/170] Clean-up node reduction transformation (#701) This PR cleans up the node reduction transformation. It does the following: 1. Brings the entire transformation to the current naming standards 2. Brings the entire transformation to the current coding standards 3. Utilizes the new normalization interface for the reductions --- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 2 +- jlm/llvm/Makefile.sub | 1 + jlm/llvm/opt/reduction.cpp | 273 +++++++++++++++------- jlm/llvm/opt/reduction.hpp | 121 +++++++++- jlm/tooling/Command.cpp | 2 +- tests/jlm/llvm/opt/NodeReductionTests.cpp | 74 ++++++ 6 files changed, 380 insertions(+), 93 deletions(-) create mode 100644 tests/jlm/llvm/opt/NodeReductionTests.cpp diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 91856343e..3567f468b 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -59,7 +59,7 @@ split_opt(llvm::RvsdgModule & rm) jlm::hls::cne cne; jlm::llvm::InvariantValueRedirection ivr; jlm::llvm::tginversion tgi; - jlm::llvm::nodereduction red; + jlm::llvm::NodeReduction red; jlm::util::StatisticsCollector statisticsCollector; tgi.run(rm, statisticsCollector); dne.run(rm, statisticsCollector); diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index e5ee2244f..143d95186 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -202,6 +202,7 @@ libllvm_TESTS += \ tests/jlm/llvm/opt/alias-analyses/TestSteensgaard \ tests/jlm/llvm/opt/alias-analyses/TestTopDownMemoryNodeEliminator \ tests/jlm/llvm/opt/InvariantValueRedirectionTests \ + tests/jlm/llvm/opt/NodeReductionTests \ tests/jlm/llvm/opt/RvsdgTreePrinterTests \ tests/jlm/llvm/opt/test-cne \ tests/jlm/llvm/opt/TestDeadNodeElimination \ diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index 0b7d621c4..490d135a2 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -4,132 +4,233 @@ */ #include -#include #include #include +#include +#include #include -#include namespace jlm::llvm { -class redstat final : public util::Statistics +void +NodeReduction::Statistics::Start(const rvsdg::Graph & graph) noexcept { -public: - ~redstat() override = default; + AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); + AddTimer(Label::Timer).start(); +} - explicit redstat(const util::filepath & sourceFile) - : Statistics(Statistics::Id::ReduceNodes, sourceFile) - {} +void +NodeReduction::Statistics::End(const rvsdg::Graph & graph) noexcept +{ + AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); + AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); + GetTimer(Label::Timer).stop(); +} - void - start(const rvsdg::Graph & graph) noexcept - { - AddMeasurement(Label::NumRvsdgNodesBefore, rvsdg::nnodes(&graph.GetRootRegion())); - AddMeasurement(Label::NumRvsdgInputsBefore, rvsdg::ninputs(&graph.GetRootRegion())); - AddTimer(Label::Timer).start(); - } +bool +NodeReduction::Statistics::AddIteration(const rvsdg::Region & region, size_t numIterations) +{ + const auto it = NumIterations_.find(®ion); + NumIterations_[®ion] = numIterations; + return it != NumIterations_.end(); +} - void - end(const rvsdg::Graph & graph) noexcept +std::optional +NodeReduction::Statistics::GetNumIterations(const rvsdg::Region & region) const noexcept +{ + if (const auto it = NumIterations_.find(®ion); it != NumIterations_.end()) { - AddMeasurement(Label::NumRvsdgNodesAfter, rvsdg::nnodes(&graph.GetRootRegion())); - AddMeasurement(Label::NumRvsdgInputsAfter, rvsdg::ninputs(&graph.GetRootRegion())); - GetTimer(Label::Timer).stop(); + return it->second; } - static std::unique_ptr - Create(const util::filepath & sourceFile) - { - return std::make_unique(sourceFile); - } -}; + return std::nullopt; +} + +NodeReduction::~NodeReduction() noexcept = default; -static void -enable_store_reductions(rvsdg::Graph & graph) +NodeReduction::NodeReduction() = default; + +void +NodeReduction::run(RvsdgModule & rvsdgModule) { - auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(true); - nf->set_store_mux_reducible(true); - nf->set_store_store_reducible(true); - nf->set_store_alloca_reducible(true); - nf->set_multiple_origin_reducible(true); + util::StatisticsCollector statisticsCollector; + run(rvsdgModule, statisticsCollector); } -static void -enable_load_reductions(rvsdg::Graph & graph) +void +NodeReduction::run(RvsdgModule & rvsdgModule, util::StatisticsCollector & statisticsCollector) { - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(true); - nf->set_load_mux_reducible(true); - nf->set_load_store_reducible(true); - nf->set_load_alloca_reducible(true); - nf->set_multiple_origin_reducible(true); - nf->set_load_store_state_reducible(true); - nf->set_load_load_state_reducible(true); + const auto & graph = rvsdgModule.Rvsdg(); + + Statistics_ = Statistics::Create(rvsdgModule.SourceFileName()); + Statistics_->Start(graph); + + ReduceNodesInRegion(graph.GetRootRegion()); + + Statistics_->End(graph); + statisticsCollector.CollectDemandedStatistics(std::move(Statistics_)); } -static void -enable_gamma_reductions(rvsdg::Graph & graph) +void +NodeReduction::ReduceNodesInRegion(rvsdg::Region & region) { - auto nf = rvsdg::GammaOperation::normal_form(&graph); - nf->set_mutable(true); - nf->set_predicate_reduction(true); - // set_control_constante_reduction cause a PHI node input type error - // github issue #303 - nf->set_control_constant_reduction(false); + bool reductionPerformed; + size_t numIterations = 0; + do + { + numIterations++; + reductionPerformed = false; + + for (const auto node : rvsdg::topdown_traverser(®ion)) + { + if (const auto structuralNode = dynamic_cast(node)) + { + reductionPerformed |= ReduceStructuralNode(*structuralNode); + } + else if (rvsdg::is(node)) + { + reductionPerformed |= ReduceSimpleNode(*node); + } + else + { + JLM_UNREACHABLE("Unhandled node type."); + } + } + + if (reductionPerformed) + { + // Let's remove all dead nodes in this region to avoid reductions on + // dead nodes in the next iteration. + region.prune(false); + } + } while (reductionPerformed); + + Statistics_->AddIteration(region, numIterations); } -static void -enable_unary_reductions(rvsdg::Graph & graph) +bool +NodeReduction::ReduceStructuralNode(rvsdg::StructuralNode & structuralNode) { - auto nf = jlm::rvsdg::unary_op::normal_form(&graph); - // set_mutable generates incorrect output for a number of - // llvm suite tests when used in combination with other - // optimizations than the set_reducible - nf->set_mutable(false); - // set_reducible generates incorrect output for 18 llvm suite tests - // github issue #304 - nf->set_reducible(false); + bool reductionPerformed = false; + + // Reduce structural nodes + if (is(&structuralNode)) + { + reductionPerformed |= ReduceGammaNode(structuralNode); + } + + if (reductionPerformed) + { + // We can not go through the subregions as the structural node might already have been removed. + return true; + } + + // Reduce all nodes in the subregions + for (size_t n = 0; n < structuralNode.nsubregions(); n++) + { + const auto subregion = structuralNode.subregion(n); + ReduceNodesInRegion(*subregion); + } + + return false; } -static void -enable_binary_reductions(rvsdg::Graph & graph) +bool +NodeReduction::ReduceGammaNode(rvsdg::StructuralNode & gammaNode) { - auto nf = jlm::rvsdg::binary_op::normal_form(&graph); - nf->set_mutable(true); - nf->set_reducible(true); + JLM_ASSERT(is(&gammaNode)); + + // FIXME: We can not apply the reduction below due to a bug. See github issue #303 + // rvsdg::ReduceGammaControlConstant + + return ReduceGammaWithStaticallyKnownPredicate(gammaNode); } -static void -reduce(RvsdgModule & rm, util::StatisticsCollector & statisticsCollector) +bool +NodeReduction::ReduceSimpleNode(rvsdg::Node & simpleNode) { - auto & graph = rm.Rvsdg(); - auto statistics = redstat::Create(rm.SourceFileName()); + if (is(&simpleNode)) + { + return ReduceLoadNode(simpleNode); + } + if (is(&simpleNode)) + { + return ReduceStoreNode(simpleNode); + } + if (is(&simpleNode)) + { + // FIXME: handle the unary node + // See github issue #304 + return false; + } + if (is(&simpleNode)) + { + return ReduceBinaryNode(simpleNode); + } - statistics->start(graph); + return false; +} - enable_store_reductions(graph); - enable_load_reductions(graph); - enable_gamma_reductions(graph); - enable_unary_reductions(graph); - enable_binary_reductions(graph); +bool +NodeReduction::ReduceLoadNode(rvsdg::Node & simpleNode) +{ + JLM_ASSERT(is(&simpleNode)); - graph.Normalize(); - statistics->end(graph); + return rvsdg::ReduceNode(NormalizeLoadNode, simpleNode); +} + +bool +NodeReduction::ReduceStoreNode(rvsdg::Node & simpleNode) +{ + JLM_ASSERT(is(&simpleNode)); - statisticsCollector.CollectDemandedStatistics(std::move(statistics)); + return rvsdg::ReduceNode(NormalizeStoreNode, simpleNode); } -/* nodereduction class */ +bool +NodeReduction::ReduceBinaryNode(rvsdg::Node & simpleNode) +{ + JLM_ASSERT(is(&simpleNode)); -nodereduction::~nodereduction() -{} + return rvsdg::ReduceNode(rvsdg::NormalizeBinaryOperation, simpleNode); +} -void -nodereduction::run(RvsdgModule & module, util::StatisticsCollector & statisticsCollector) +std::optional> +NodeReduction::NormalizeLoadNode( + const LoadNonVolatileOperation & operation, + const std::vector & operands) +{ + static std::vector> loadNodeNormalizations( + { NormalizeLoadMux, + NormalizeLoadStore, + NormalizeLoadAlloca, + NormalizeLoadDuplicateState, + NormalizeLoadStoreState, + NormalizeLoadLoadState }); + + return rvsdg::NormalizeSequence( + loadNodeNormalizations, + operation, + operands); +} + +std::optional> +NodeReduction::NormalizeStoreNode( + const StoreNonVolatileOperation & operation, + const std::vector & operands) { - reduce(module, statisticsCollector); + static std::vector> storeNodeNormalizations( + { NormalizeStoreMux, + NormalizeStoreStore, + NormalizeStoreAlloca, + NormalizeStoreDuplicateState }); + + return rvsdg::NormalizeSequence( + storeNodeNormalizations, + operation, + operands); } } diff --git a/jlm/llvm/opt/reduction.hpp b/jlm/llvm/opt/reduction.hpp index 92d5a15e4..2c8f4cfb7 100644 --- a/jlm/llvm/opt/reduction.hpp +++ b/jlm/llvm/opt/reduction.hpp @@ -7,20 +7,131 @@ #define JLM_LLVM_OPT_REDUCTION_HPP #include +#include + +#include + +namespace jlm::rvsdg +{ +class Graph; +class Node; +class Region; +class output; +class StructuralNode; +} namespace jlm::llvm { +class LoadNonVolatileOperation; +class StoreNonVolatileOperation; + /** - * \brief Node Reduction Optimization + * The node reduction transformation performs a series of peephole optimizations in the RVSDG. The + * nodes in a region are visited top-down and reductions are performed until a fix-point is reached, + * i.e., until no peephole optimization can be applied any longer to any node in a region. */ -class nodereduction final : public optimization +class NodeReduction final : public optimization { public: - virtual ~nodereduction(); + class Statistics; + + ~NodeReduction() noexcept override; + + NodeReduction(); + + NodeReduction(const NodeReduction &) = delete; + + NodeReduction(NodeReduction &&) = delete; + + NodeReduction & + operator=(const NodeReduction &) = delete; + + NodeReduction & + operator=(NodeReduction &&) = delete; + + void + run(RvsdgModule & rvsdgModule, util::StatisticsCollector & statisticsCollector) override; + + void + run(RvsdgModule & rvsdgModule); + +private: + void + ReduceNodesInRegion(rvsdg::Region & region); + + /** + * Reduces the structural node \p structuralNode. + * + * \note This method only returns true if the node itself could be reduced, but not if any of + * the nodes in its subregions could be reduced. + * + * @param structuralNode The structural node that is supposed to be reduced. + * @return True, if the structural node could be reduced, otherwise false. + */ + [[nodiscard]] bool + ReduceStructuralNode(rvsdg::StructuralNode & structuralNode); + + [[nodiscard]] static bool + ReduceGammaNode(rvsdg::StructuralNode & gammaNode); + + [[nodiscard]] static bool + ReduceSimpleNode(rvsdg::Node & simpleNode); + + [[nodiscard]] static bool + ReduceLoadNode(rvsdg::Node & simpleNode); + + [[nodiscard]] static bool + ReduceStoreNode(rvsdg::Node & simpleNode); + + [[nodiscard]] static bool + ReduceBinaryNode(rvsdg::Node & simpleNode); + + static std::optional> + NormalizeLoadNode( + const LoadNonVolatileOperation & operation, + const std::vector & operands); + + static std::optional> + NormalizeStoreNode( + const StoreNonVolatileOperation & operation, + const std::vector & operands); + + std::unique_ptr Statistics_; +}; + +/** + * Represents the statistics gathered throughout the NodeReduction transformation. + */ +class NodeReduction::Statistics final : public util::Statistics +{ +public: + ~Statistics() noexcept override = default; + + explicit Statistics(const util::filepath & sourceFile) + : util::Statistics(Id::ReduceNodes, sourceFile) + {} + + void + Start(const rvsdg::Graph & graph) noexcept; + + void + End(const rvsdg::Graph & graph) noexcept; + + bool + AddIteration(const rvsdg::Region & region, size_t numIterations); + + std::optional + GetNumIterations(const rvsdg::Region & region) const noexcept; + + static std::unique_ptr + Create(const util::filepath & sourceFile) + { + return std::make_unique(sourceFile); + } - virtual void - run(RvsdgModule & module, util::StatisticsCollector & statisticsCollector) override; +private: + std::unordered_map NumIterations_; }; } diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index 060d1ef90..2ede76651 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -408,7 +408,7 @@ JlmOptCommand::CreateOptimization( case JlmOptCommandLineOptions::OptimizationId::NodePushOut: return std::make_unique(); case JlmOptCommandLineOptions::OptimizationId::NodeReduction: - return std::make_unique(); + return std::make_unique(); case JlmOptCommandLineOptions::OptimizationId::RvsdgTreePrinter: return std::make_unique( CommandLineOptions_.GetRvsdgTreePrinterConfiguration()); diff --git a/tests/jlm/llvm/opt/NodeReductionTests.cpp b/tests/jlm/llvm/opt/NodeReductionTests.cpp new file mode 100644 index 000000000..2aa82aa3d --- /dev/null +++ b/tests/jlm/llvm/opt/NodeReductionTests.cpp @@ -0,0 +1,74 @@ +/* + * Copyright 2024 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include + +#include +#include +#include +#include +#include +#include + +static int +MultipleReductionsPerRegion() +{ + using namespace jlm::llvm; + using namespace jlm::rvsdg; + + // Arrange + const auto bitType = bittype::Create(32); + const auto memoryStateType = MemoryStateType::Create(); + + jlm::llvm::RvsdgModule rvsdgModule(jlm::util::filepath(""), "", ""); + auto & graph = rvsdgModule.Rvsdg(); + + auto & sizeArgument = jlm::tests::GraphImport::Create(graph, bitType, "size"); + auto allocaResults = alloca_op::create(bitType, &sizeArgument, 4); + + const auto c3 = bitconstant_op::create(&graph.GetRootRegion(), bitvalue_repr(32, 3)); + auto storeResults = StoreNonVolatileNode::Create(allocaResults[0], c3, { allocaResults[1] }, 4); + auto loadResults = LoadNonVolatileNode::Create(allocaResults[0], { storeResults[0] }, bitType, 4); + + const auto c5 = bitconstant_op::create(&graph.GetRootRegion(), bitvalue_repr(32, 5)); + auto sum = bitadd_op::create(32, loadResults[0], c5); + + auto & sumExport = jlm::tests::GraphExport::Create(*sum, "sum"); + + view(graph, stdout); + + // Act + NodeReduction nodeReduction; + jlm::util::StatisticsCollector statisticsCollector( + jlm::util::StatisticsCollectorSettings({ jlm::util::Statistics::Id::ReduceNodes })); + nodeReduction.run(rvsdgModule, statisticsCollector); + + view(graph, stdout); + + // Assert + // We expect that two reductions are applied: + // 1. NormalizeLoadStore - This ensures that the stored constant value is directly forwarded to + // the add operation + // 2. Constant folding on the add operation + // The result is that a single constant node with value 8 is left in the graph. + assert(graph.GetRootRegion().nnodes() == 1); + + auto constantNode = TryGetOwnerNode(*sumExport.origin()); + auto constantOperation = dynamic_cast(&constantNode->GetOperation()); + assert(constantOperation->value().to_uint() == 8); + + // We expect that the node reductions transformation iterated over the root region 2 times. + auto & statistics = *statisticsCollector.CollectedStatistics().begin(); + auto & nodeReductionStatistics = dynamic_cast(statistics); + auto numIterations = nodeReductionStatistics.GetNumIterations(graph.GetRootRegion()).value(); + assert(numIterations == 2); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/opt/NodeReductionTests-MultipleReductionsPerRegion", + MultipleReductionsPerRegion) From 873bd3267f1925bbb0368623117d937b1f7ec24c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Rei=C3=9Fmann?= Date: Fri, 3 Jan 2025 16:59:40 +0100 Subject: [PATCH 150/170] Add andersen top down lifetime aware optimization to jlm-opt (#703) This PR adds the `AAAndersenTopDownLifetimeAware` optimization. It builds on the existing `Andersen` alias analysis in combination with the `TopDownMemoryNodeEliminator`. --- .../alias-analyses/EliminatedMemoryNodeProvider.hpp | 10 ++++++++++ jlm/llvm/opt/alias-analyses/Optimization.cpp | 5 +++++ jlm/tooling/Command.cpp | 6 ++++++ jlm/tooling/CommandLine.cpp | 10 ++++++++++ jlm/tooling/CommandLine.hpp | 2 ++ 5 files changed, 33 insertions(+) diff --git a/jlm/llvm/opt/alias-analyses/EliminatedMemoryNodeProvider.hpp b/jlm/llvm/opt/alias-analyses/EliminatedMemoryNodeProvider.hpp index c8a0d72bf..d92d2563e 100644 --- a/jlm/llvm/opt/alias-analyses/EliminatedMemoryNodeProvider.hpp +++ b/jlm/llvm/opt/alias-analyses/EliminatedMemoryNodeProvider.hpp @@ -59,6 +59,16 @@ class EliminatedMemoryNodeProvider final : public MemoryNodeProvider return Eliminator_.EliminateMemoryNodes(rvsdgModule, *seedProvisioning, statisticsCollector); } + static std::unique_ptr + Create( + const RvsdgModule & rvsdgModule, + const PointsToGraph & pointsToGraph, + util::StatisticsCollector & statisticsCollector) + { + EliminatedMemoryNodeProvider provider{}; + return provider.ProvisionMemoryNodes(rvsdgModule, pointsToGraph, statisticsCollector); + } + private: Provider Provider_; Eliminator Eliminator_; diff --git a/jlm/llvm/opt/alias-analyses/Optimization.cpp b/jlm/llvm/opt/alias-analyses/Optimization.cpp index d57a041cf..600398948 100644 --- a/jlm/llvm/opt/alias-analyses/Optimization.cpp +++ b/jlm/llvm/opt/alias-analyses/Optimization.cpp @@ -6,10 +6,12 @@ #include #include +#include #include #include #include #include +#include namespace jlm::llvm::aa { @@ -38,5 +40,8 @@ template class AliasAnalysisStateEncoder; template class AliasAnalysisStateEncoder; template class AliasAnalysisStateEncoder; +template class AliasAnalysisStateEncoder< + Andersen, + EliminatedMemoryNodeProvider>; } diff --git a/jlm/tooling/Command.cpp b/jlm/tooling/Command.cpp index 2ede76651..d96d2aafe 100644 --- a/jlm/tooling/Command.cpp +++ b/jlm/tooling/Command.cpp @@ -12,9 +12,11 @@ #include #include #include +#include #include #include #include +#include #include #include #include @@ -382,6 +384,8 @@ JlmOptCommand::CreateOptimization( using Steensgaard = llvm::aa::Steensgaard; using AgnosticMnp = llvm::aa::AgnosticMemoryNodeProvider; using RegionAwareMnp = llvm::aa::RegionAwareMemoryNodeProvider; + using TopDownLifetimeMnp = + llvm::aa::EliminatedMemoryNodeProvider; switch (optimizationId) { @@ -389,6 +393,8 @@ JlmOptCommand::CreateOptimization( return std::make_unique>(); case JlmOptCommandLineOptions::OptimizationId::AAAndersenRegionAware: return std::make_unique>(); + case JlmOptCommandLineOptions::OptimizationId::AAAndersenTopDownLifetimeAware: + return std::make_unique>(); case JlmOptCommandLineOptions::OptimizationId::AASteensgaardAgnostic: return std::make_unique>(); case JlmOptCommandLineOptions::OptimizationId::AASteensgaardRegionAware: diff --git a/jlm/tooling/CommandLine.cpp b/jlm/tooling/CommandLine.cpp index c38ae8aa9..4ec08a68b 100644 --- a/jlm/tooling/CommandLine.cpp +++ b/jlm/tooling/CommandLine.cpp @@ -108,6 +108,8 @@ JlmOptCommandLineOptions::FromCommandLineArgumentToOptimizationId( OptimizationId::AAAndersenAgnostic }, { OptimizationCommandLineArgument::AaAndersenRegionAware_, OptimizationId::AAAndersenRegionAware }, + { OptimizationCommandLineArgument::AaAndersenTopDownLifetimeAware_, + OptimizationId::AAAndersenTopDownLifetimeAware }, { OptimizationCommandLineArgument::AaSteensgaardAgnostic_, OptimizationId::AASteensgaardAgnostic }, { OptimizationCommandLineArgument::AaSteensgaardRegionAware_, @@ -141,6 +143,8 @@ JlmOptCommandLineOptions::ToCommandLineArgument(OptimizationId optimizationId) OptimizationCommandLineArgument::AaAndersenAgnostic_ }, { OptimizationId::AAAndersenRegionAware, OptimizationCommandLineArgument::AaAndersenRegionAware_ }, + { OptimizationId::AAAndersenTopDownLifetimeAware, + OptimizationCommandLineArgument::AaAndersenTopDownLifetimeAware_ }, { OptimizationId::AASteensgaardAgnostic, OptimizationCommandLineArgument::AaSteensgaardAgnostic_ }, { OptimizationId::AASteensgaardRegionAware, @@ -806,6 +810,8 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const auto aAAndersenAgnostic = JlmOptCommandLineOptions::OptimizationId::AAAndersenAgnostic; auto aAAndersenRegionAware = JlmOptCommandLineOptions::OptimizationId::AAAndersenRegionAware; + auto aAAndersenTopDownLifetimeAware = + JlmOptCommandLineOptions::OptimizationId::AAAndersenTopDownLifetimeAware; auto aASteensgaardAgnostic = JlmOptCommandLineOptions::OptimizationId::AASteensgaardAgnostic; auto aASteensgaardRegionAware = JlmOptCommandLineOptions::OptimizationId::AASteensgaardRegionAware; @@ -831,6 +837,10 @@ JlmOptCommandLineParser::ParseCommandLineArguments(int argc, const char * const aAAndersenRegionAware, JlmOptCommandLineOptions::ToCommandLineArgument(aAAndersenRegionAware), "Andersen alias analysis with region-aware memory state encoding"), + ::clEnumValN( + aAAndersenTopDownLifetimeAware, + JlmOptCommandLineOptions::ToCommandLineArgument(aAAndersenTopDownLifetimeAware), + "Andersen alias analysis with top-down lifetime-aware memory node elimination"), ::clEnumValN( aASteensgaardAgnostic, JlmOptCommandLineOptions::ToCommandLineArgument(aASteensgaardAgnostic), diff --git a/jlm/tooling/CommandLine.hpp b/jlm/tooling/CommandLine.hpp index cf01d645a..2b690691f 100644 --- a/jlm/tooling/CommandLine.hpp +++ b/jlm/tooling/CommandLine.hpp @@ -68,6 +68,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions AAAndersenAgnostic, AAAndersenRegionAware, + AAAndersenTopDownLifetimeAware, AASteensgaardAgnostic, AASteensgaardRegionAware, CommonNodeElimination, @@ -197,6 +198,7 @@ class JlmOptCommandLineOptions final : public CommandLineOptions { inline static const char * AaAndersenAgnostic_ = "AAAndersenAgnostic"; inline static const char * AaAndersenRegionAware_ = "AAAndersenRegionAware"; + inline static const char * AaAndersenTopDownLifetimeAware_ = "AAAndersenTopDownLifetimeAware"; inline static const char * AaSteensgaardAgnostic_ = "AASteensgaardAgnostic"; inline static const char * AaSteensgaardRegionAware_ = "AASteensgaardRegionAware"; inline static const char * CommonNodeElimination_ = "CommonNodeElimination"; From 6db940a6220bd6be1ba2203edceb6269114ea4b6 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 3 Jan 2025 23:27:44 +0100 Subject: [PATCH 151/170] Remove empty statemux.cpp/*.hpp (#708) --- jlm/rvsdg/statemux.cpp | 0 jlm/rvsdg/statemux.hpp | 0 2 files changed, 0 insertions(+), 0 deletions(-) delete mode 100644 jlm/rvsdg/statemux.cpp delete mode 100644 jlm/rvsdg/statemux.hpp diff --git a/jlm/rvsdg/statemux.cpp b/jlm/rvsdg/statemux.cpp deleted file mode 100644 index e69de29bb..000000000 diff --git a/jlm/rvsdg/statemux.hpp b/jlm/rvsdg/statemux.hpp deleted file mode 100644 index e69de29bb..000000000 From 70465c2ce5c0ad64f27aad5aeb0041f5aadf690f Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Fri, 3 Jan 2025 23:57:32 +0100 Subject: [PATCH 152/170] Port invariant variable reduction of gamma node to new interface (#710) --- jlm/rvsdg/gamma.cpp | 10 ++++++++++ jlm/rvsdg/gamma.hpp | 25 +++++++++++++++++++++++++ tests/jlm/rvsdg/test-gamma.cpp | 29 +++++++++++++++++------------ 3 files changed, 52 insertions(+), 12 deletions(-) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 933d02893..11c4d0a7d 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -270,6 +270,16 @@ ReduceGammaControlConstant(Node & node) return true; } +bool +ReduceGammaInvariantVariables(Node & node) +{ + const auto gammaNode = dynamic_cast(&node); + if (gammaNode == nullptr) + return false; + + return !perform_invariant_reduction(gammaNode); +} + GammaOperation::~GammaOperation() noexcept {} diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 5bb452388..e09d53bfb 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -417,6 +417,31 @@ ReduceGammaWithStaticallyKnownPredicate(Node & node); bool ReduceGammaControlConstant(Node & node); +/** + * Reduces all invariant variables of gamma node and diverts the users of the gamma node's exit + * variables to the respective origin of the invariant variable. + * x = ... + * xo = gamma p xi + * [xa <= xi] + * [xr <= xa] + * [xa <= xi] + * [xr <= xa] + * ... = anyOp xo + * => + * x = ... + * xo = gamma p xi + * [xa <= xi] + * [xo <= xa] + * [xa <= xi] + * [xo <= xa] + * ... = anyOp x //xo changed to x + * + * @param node A gamma node that is supposed to be reduced. + * @return True, if the transformation was successful, otherwise false. + */ +bool +ReduceGammaInvariantVariables(Node & node); + } #endif diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index 73fb12450..f56e8ed7f 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -93,23 +93,28 @@ test_invariant_reduction() { using namespace jlm::rvsdg; - auto vtype = jlm::tests::valuetype::Create(); - + // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_invariant_reduction(true); + const auto valueType = jlm::tests::valuetype::Create(); - auto pred = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), ""); - auto v = &jlm::tests::GraphImport::Create(graph, vtype, ""); + const auto predicate = &jlm::tests::GraphImport::Create(graph, ControlType::Create(2), ""); + const auto value = &jlm::tests::GraphImport::Create(graph, valueType, ""); - auto gamma = GammaNode::create(pred, 2); - auto ev = gamma->AddEntryVar(v); - gamma->AddExitVar(ev.branchArgument); + const auto gammaNode = GammaNode::create(predicate, 2); + auto [input, branchArgument] = gammaNode->AddEntryVar(value); + gammaNode->AddExitVar(branchArgument); - auto & r = jlm::tests::GraphExport::Create(*gamma->output(0), ""); + auto & ex = jlm::tests::GraphExport::Create(*gammaNode->output(0), ""); + + view(&graph.GetRootRegion(), stdout); - graph.Normalize(); - // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - assert(r.origin() == v); + // Act + const auto success = ReduceGammaInvariantVariables(*gammaNode); + view(&graph.GetRootRegion(), stdout); + + // Assert + assert(success); + assert(ex.origin() == value); graph.PruneNodes(); assert(graph.GetRootRegion().nnodes() == 0); From 6dcdeec48f9a8061668e8b04f9793a5e3c7ea5f7 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 00:26:26 +0100 Subject: [PATCH 153/170] Rename binary_op class to BinaryOperation (#709) --- .../InterProceduralGraphConversion.cpp | 2 +- .../frontend/LlvmInstructionConversion.cpp | 13 +-- jlm/llvm/ir/operators/operators.hpp | 26 +++--- jlm/llvm/opt/reduction.cpp | 6 +- jlm/rvsdg/binary.cpp | 39 ++++----- jlm/rvsdg/binary.hpp | 56 +++++++------ jlm/rvsdg/bitstring/arithmetic-impl.hpp | 16 ++-- jlm/rvsdg/bitstring/arithmetic.cpp | 36 +++++---- jlm/rvsdg/bitstring/arithmetic.hpp | 79 +++++++++++-------- jlm/rvsdg/bitstring/bitoperation-classes.hpp | 8 +- jlm/rvsdg/bitstring/comparison-impl.hpp | 16 ++-- jlm/rvsdg/bitstring/comparison.cpp | 26 +++--- jlm/rvsdg/bitstring/comparison.hpp | 70 ++++++++++------ jlm/rvsdg/bitstring/concat.cpp | 4 +- jlm/rvsdg/bitstring/concat.hpp | 6 +- tests/jlm/rvsdg/test-binary.cpp | 44 +++++++---- tests/test-operation.cpp | 2 +- tests/test-operation.hpp | 14 ++-- 18 files changed, 266 insertions(+), 197 deletions(-) diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index d23f15b35..7301de51e 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -1201,7 +1201,7 @@ ConvertInterProceduralGraphModule( nf->set_mutable(false); /* FIXME: we currently cannot handle flattened_binary_op in jlm2llvm pass */ - rvsdg::binary_op::normal_form(graph)->set_flatten(false); + rvsdg::BinaryOperation::normal_form(graph)->set_flatten(false); RegionalizedVariableMap regionalizedVariableMap( interProceduralGraphModule, diff --git a/jlm/llvm/frontend/LlvmInstructionConversion.cpp b/jlm/llvm/frontend/LlvmInstructionConversion.cpp index 72b9d8a58..08fe967f2 100644 --- a/jlm/llvm/frontend/LlvmInstructionConversion.cpp +++ b/jlm/llvm/frontend/LlvmInstructionConversion.cpp @@ -535,11 +535,14 @@ convert_icmp_instruction(::llvm::Instruction * instruction, tacsvector_t & tacs, auto type = ConvertType(i->getType(), ctx); - JLM_ASSERT(is(*binop)); + JLM_ASSERT(is(*binop)); if (t->isVectorTy()) { - tacs.push_back( - vectorbinary_op::create(*static_cast(binop.get()), op1, op2, type)); + tacs.push_back(vectorbinary_op::create( + *static_cast(binop.get()), + op1, + op2, + type)); } else { @@ -1044,11 +1047,11 @@ convert_binary_operator(::llvm::Instruction * instruction, tacsvector_t & tacs, auto op1 = ConvertValue(i->getOperand(0), tacs, ctx); auto op2 = ConvertValue(i->getOperand(1), tacs, ctx); - JLM_ASSERT(is(*operation)); + JLM_ASSERT(is(*operation)); if (i->getType()->isVectorTy()) { - auto & binop = *static_cast(operation.get()); + auto & binop = *static_cast(operation.get()); tacs.push_back(vectorbinary_op::create(binop, op1, op2, type)); } else diff --git a/jlm/llvm/ir/operators/operators.hpp b/jlm/llvm/ir/operators/operators.hpp index a5ba66b58..42b3edde5 100644 --- a/jlm/llvm/ir/operators/operators.hpp +++ b/jlm/llvm/ir/operators/operators.hpp @@ -710,13 +710,13 @@ enum class cmp le }; -class ptrcmp_op final : public jlm::rvsdg::binary_op +class ptrcmp_op final : public rvsdg::BinaryOperation { public: virtual ~ptrcmp_op(); inline ptrcmp_op(const std::shared_ptr & ptype, const llvm::cmp & cmp) - : binary_op({ ptype, ptype }, jlm::rvsdg::bittype::Create(1)), + : BinaryOperation({ ptype, ptype }, jlm::rvsdg::bittype::Create(1)), cmp_(cmp) {} @@ -938,18 +938,20 @@ enum class fpcmp uno }; -class fpcmp_op final : public jlm::rvsdg::binary_op +class fpcmp_op final : public rvsdg::BinaryOperation { public: virtual ~fpcmp_op(); inline fpcmp_op(const fpcmp & cmp, const fpsize & size) - : binary_op({ fptype::Create(size), fptype::Create(size) }, jlm::rvsdg::bittype::Create(1)), + : BinaryOperation( + { fptype::Create(size), fptype::Create(size) }, + jlm::rvsdg::bittype::Create(1)), cmp_(cmp) {} inline fpcmp_op(const fpcmp & cmp, const std::shared_ptr & fpt) - : binary_op({ fpt, fpt }, jlm::rvsdg::bittype::Create(1)), + : BinaryOperation({ fpt, fpt }, jlm::rvsdg::bittype::Create(1)), cmp_(cmp) {} @@ -1147,18 +1149,18 @@ enum class fpop mod }; -class fpbin_op final : public jlm::rvsdg::binary_op +class fpbin_op final : public rvsdg::BinaryOperation { public: virtual ~fpbin_op(); inline fpbin_op(const llvm::fpop & op, const fpsize & size) - : binary_op({ fptype::Create(size), fptype::Create(size) }, fptype::Create(size)), + : BinaryOperation({ fptype::Create(size), fptype::Create(size) }, fptype::Create(size)), op_(op) {} inline fpbin_op(const llvm::fpop & op, const std::shared_ptr & fpt) - : binary_op({ fpt, fpt }, fpt), + : BinaryOperation({ fpt, fpt }, fpt), op_(op) {} @@ -2229,7 +2231,7 @@ class vectorbinary_op final : public rvsdg::SimpleOperation virtual ~vectorbinary_op(); inline vectorbinary_op( - const jlm::rvsdg::binary_op & binop, + const rvsdg::BinaryOperation & binop, const std::shared_ptr & op1, const std::shared_ptr & op2, const std::shared_ptr & result) @@ -2282,10 +2284,10 @@ class vectorbinary_op final : public rvsdg::SimpleOperation return *this; } - inline const jlm::rvsdg::binary_op & + const rvsdg::BinaryOperation & operation() const noexcept { - return *static_cast(op_.get()); + return *static_cast(op_.get()); } virtual bool @@ -2299,7 +2301,7 @@ class vectorbinary_op final : public rvsdg::SimpleOperation static inline std::unique_ptr create( - const jlm::rvsdg::binary_op & binop, + const rvsdg::BinaryOperation & binop, const llvm::variable * op1, const llvm::variable * op2, const std::shared_ptr & type) diff --git a/jlm/llvm/opt/reduction.cpp b/jlm/llvm/opt/reduction.cpp index 490d135a2..6ed5e689f 100644 --- a/jlm/llvm/opt/reduction.cpp +++ b/jlm/llvm/opt/reduction.cpp @@ -165,7 +165,7 @@ NodeReduction::ReduceSimpleNode(rvsdg::Node & simpleNode) // See github issue #304 return false; } - if (is(&simpleNode)) + if (is(&simpleNode)) { return ReduceBinaryNode(simpleNode); } @@ -192,9 +192,9 @@ NodeReduction::ReduceStoreNode(rvsdg::Node & simpleNode) bool NodeReduction::ReduceBinaryNode(rvsdg::Node & simpleNode) { - JLM_ASSERT(is(&simpleNode)); + JLM_ASSERT(is(&simpleNode)); - return rvsdg::ReduceNode(rvsdg::NormalizeBinaryOperation, simpleNode); + return rvsdg::ReduceNode(rvsdg::NormalizeBinaryOperation, simpleNode); } std::optional> diff --git a/jlm/rvsdg/binary.cpp b/jlm/rvsdg/binary.cpp index a576715e5..93ead0de0 100644 --- a/jlm/rvsdg/binary.cpp +++ b/jlm/rvsdg/binary.cpp @@ -23,7 +23,7 @@ namespace { std::vector -reduce_operands(const jlm::rvsdg::binary_op & op, std::vector args) +reduce_operands(const BinaryOperation & op, std::vector args) { /* pair-wise reduce */ if (op.is_commutative()) @@ -80,13 +80,13 @@ bool binary_normal_form::normalize_node(Node * node) const { const Operation & base_op = node->GetOperation(); - const auto & op = *static_cast(&base_op); + const auto & op = *static_cast(&base_op); return normalize_node(node, op); } bool -binary_normal_form::normalize_node(Node * node, const binary_op & op) const +binary_normal_form::normalize_node(Node * node, const BinaryOperation & op) const { if (!get_mutable()) { @@ -157,7 +157,7 @@ binary_normal_form::normalized_create( const SimpleOperation & base_op, const std::vector & args) const { - const auto & op = *static_cast(&base_op); + const auto & op = *static_cast(&base_op); std::vector new_args(args.begin(), args.end()); @@ -310,18 +310,17 @@ flattened_binary_normal_form::normalized_create( /* binary operator */ -binary_op::~binary_op() noexcept -{} +BinaryOperation::~BinaryOperation() noexcept = default; -enum jlm::rvsdg::binary_op::flags -binary_op::flags() const noexcept +enum BinaryOperation::flags +BinaryOperation::flags() const noexcept { - return jlm::rvsdg::binary_op::flags::none; + return flags::none; } std::optional> FlattenAssociativeBinaryOperation( - const binary_op & operation, + const BinaryOperation & operation, const std::vector & operands) { JLM_ASSERT(!operands.empty()); @@ -359,7 +358,9 @@ FlattenAssociativeBinaryOperation( } std::optional> -NormalizeBinaryOperation(const binary_op & operation, const std::vector & operands) +NormalizeBinaryOperation( + const BinaryOperation & operation, + const std::vector & operands) { JLM_ASSERT(!operands.empty()); auto region = operands[0]->region(); @@ -403,7 +404,7 @@ flattened_binary_op::debug_string() const std::unique_ptr flattened_binary_op::copy() const { - std::unique_ptr copied_op(static_cast(op_->copy().release())); + std::unique_ptr copied_op(static_cast(op_->copy().release())); return std::make_unique(std::move(copied_op), narguments()); } @@ -413,11 +414,11 @@ flattened_binary_op::copy() const */ static jlm::rvsdg::output * -reduce_parallel(const binary_op & op, const std::vector & operands) +reduce_parallel(const BinaryOperation & op, const std::vector & operands) { JLM_ASSERT(operands.size() > 1); auto region = operands.front()->region(); - JLM_ASSERT(binary_op::normal_form(region->graph())->get_flatten() == false); + JLM_ASSERT(BinaryOperation::normal_form(region->graph())->get_flatten() == false); std::deque worklist(operands.begin(), operands.end()); while (worklist.size() > 1) @@ -436,11 +437,11 @@ reduce_parallel(const binary_op & op, const std::vector & } static jlm::rvsdg::output * -reduce_linear(const binary_op & op, const std::vector & operands) +reduce_linear(const BinaryOperation & op, const std::vector & operands) { JLM_ASSERT(operands.size() > 1); auto region = operands.front()->region(); - JLM_ASSERT(binary_op::normal_form(region->graph())->get_flatten() == false); + JLM_ASSERT(BinaryOperation::normal_form(region->graph())->get_flatten() == false); std::deque worklist(operands.begin(), operands.end()); while (worklist.size() > 1) @@ -469,10 +470,10 @@ flattened_binary_op::reduce( static std::unordered_map< flattened_binary_op::reduction, std::function< - jlm::rvsdg::output *(const binary_op &, const std::vector &)>> + jlm::rvsdg::output *(const BinaryOperation &, const std::vector &)>> map({ { reduction::linear, reduce_linear }, { reduction::parallel, reduce_parallel } }); - binary_op::normal_form(graph)->set_flatten(false); + BinaryOperation::normal_form(graph)->set_flatten(false); JLM_ASSERT(map.find(reduction) != map.end()); return map[reduction](bin_operation(), operands); } @@ -525,7 +526,7 @@ static void __attribute__((constructor)) register_node_normal_form() { jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::binary_op), + typeid(jlm::rvsdg::BinaryOperation), binary_operation_get_default_normal_form_); jlm::rvsdg::node_normal_form::register_factory( typeid(jlm::rvsdg::flattened_binary_op), diff --git a/jlm/rvsdg/binary.hpp b/jlm/rvsdg/binary.hpp index c75f29668..dcdfd4f51 100644 --- a/jlm/rvsdg/binary.hpp +++ b/jlm/rvsdg/binary.hpp @@ -19,7 +19,7 @@ namespace jlm::rvsdg typedef size_t binop_reduction_path_t; -class binary_op; +class BinaryOperation; class binary_normal_form final : public simple_normal_form { @@ -87,7 +87,7 @@ class binary_normal_form final : public simple_normal_form private: bool - normalize_node(Node * node, const binary_op & op) const; + normalize_node(Node * node, const BinaryOperation & op) const; bool enable_reducible_; bool enable_reorder_; @@ -119,12 +119,10 @@ class flattened_binary_normal_form final : public simple_normal_form }; /** - \brief Binary operator - - Operator taking two arguments (with well-defined reduction for more - operands if operator is associative). -*/ -class binary_op : public SimpleOperation + * Binary operation taking two arguments (with well-defined reduction for more + * operands if operator is associative). + */ +class BinaryOperation : public SimpleOperation { public: enum class flags @@ -134,9 +132,9 @@ class binary_op : public SimpleOperation commutative = 2 }; - virtual ~binary_op() noexcept; + ~BinaryOperation() noexcept override; - inline binary_op( + BinaryOperation( const std::vector> operands, std::shared_ptr result) : SimpleOperation(std::move(operands), { std::move(result) }) @@ -152,7 +150,7 @@ class binary_op : public SimpleOperation jlm::rvsdg::output * op1, jlm::rvsdg::output * op2) const = 0; - virtual jlm::rvsdg::binary_op::flags + virtual BinaryOperation::flags flags() const noexcept; inline bool @@ -165,7 +163,7 @@ class binary_op : public SimpleOperation normal_form(Graph * graph) noexcept { return static_cast( - graph->GetNodeNormalForm(typeid(binary_op))); + graph->GetNodeNormalForm(typeid(BinaryOperation))); } }; @@ -186,7 +184,7 @@ class binary_op : public SimpleOperation */ std::optional> FlattenAssociativeBinaryOperation( - const binary_op & operation, + const BinaryOperation & operation, const std::vector & operands); /** @@ -203,7 +201,7 @@ FlattenAssociativeBinaryOperation( */ std::optional> NormalizeBinaryOperation( - const binary_op & operation, + const BinaryOperation & operation, const std::vector & operands); class flattened_binary_op final : public SimpleOperation @@ -217,16 +215,16 @@ class flattened_binary_op final : public SimpleOperation virtual ~flattened_binary_op() noexcept; - inline flattened_binary_op(std::unique_ptr op, size_t narguments) noexcept + inline flattened_binary_op(std::unique_ptr op, size_t narguments) noexcept : SimpleOperation({ narguments, op->argument(0) }, { op->result(0) }), op_(std::move(op)) { JLM_ASSERT(op_->is_associative()); } - inline flattened_binary_op(const binary_op & op, size_t narguments) + flattened_binary_op(const BinaryOperation & op, size_t narguments) : SimpleOperation({ narguments, op.argument(0) }, { op.result(0) }), - op_(std::unique_ptr(static_cast(op.copy().release()))) + op_(std::unique_ptr(static_cast(op.copy().release()))) { JLM_ASSERT(op_->is_associative()); } @@ -240,7 +238,7 @@ class flattened_binary_op final : public SimpleOperation [[nodiscard]] std::unique_ptr copy() const override; - inline const binary_op & + const BinaryOperation & bin_operation() const noexcept { return *op_; @@ -268,35 +266,35 @@ class flattened_binary_op final : public SimpleOperation } private: - std::unique_ptr op_; + std::unique_ptr op_; }; /* binary flags operators */ -static inline constexpr enum binary_op::flags -operator|(enum binary_op::flags a, enum binary_op::flags b) +static constexpr enum BinaryOperation::flags +operator|(enum BinaryOperation::flags a, enum BinaryOperation::flags b) { - return static_cast(static_cast(a) | static_cast(b)); + return static_cast(static_cast(a) | static_cast(b)); } -static inline constexpr enum binary_op::flags -operator&(enum binary_op::flags a, enum binary_op::flags b) +static constexpr enum BinaryOperation::flags +operator&(enum BinaryOperation::flags a, enum BinaryOperation::flags b) { - return static_cast(static_cast(a) & static_cast(b)); + return static_cast(static_cast(a) & static_cast(b)); } /* binary methods */ inline bool -binary_op::is_associative() const noexcept +BinaryOperation::is_associative() const noexcept { - return static_cast(flags() & binary_op::flags::associative); + return static_cast(flags() & BinaryOperation::flags::associative); } inline bool -binary_op::is_commutative() const noexcept +BinaryOperation::is_commutative() const noexcept { - return static_cast(flags() & binary_op::flags::commutative); + return static_cast(flags() & BinaryOperation::flags::commutative); } static const binop_reduction_path_t binop_reduction_none = 0; diff --git a/jlm/rvsdg/bitstring/arithmetic-impl.hpp b/jlm/rvsdg/bitstring/arithmetic-impl.hpp index 44daa6db9..77c697e78 100644 --- a/jlm/rvsdg/bitstring/arithmetic-impl.hpp +++ b/jlm/rvsdg/bitstring/arithmetic-impl.hpp @@ -52,11 +52,11 @@ MakeBitUnaryOperation::create(size_t nbits) const return std::make_unique(nbits); } -template +template MakeBitBinaryOperation::~MakeBitBinaryOperation() noexcept {} -template +template bool MakeBitBinaryOperation::operator==(const Operation & other) const noexcept { @@ -64,7 +64,7 @@ MakeBitBinaryOperation::operator==(const Operation & o return op && op->type() == type(); } -template +template bitvalue_repr MakeBitBinaryOperation::reduce_constants( const bitvalue_repr & arg1, @@ -73,28 +73,28 @@ MakeBitBinaryOperation::reduce_constants( return reduction{}(arg1, arg2); } -template -enum binary_op::flags +template +enum BinaryOperation::flags MakeBitBinaryOperation::flags() const noexcept { return opflags; } -template +template std::string MakeBitBinaryOperation::debug_string() const { return jlm::util::strfmt(name, type().nbits()); } -template +template std::unique_ptr MakeBitBinaryOperation::copy() const { return std::make_unique(*this); } -template +template std::unique_ptr MakeBitBinaryOperation::create(size_t nbits) const { diff --git a/jlm/rvsdg/bitstring/arithmetic.cpp b/jlm/rvsdg/bitstring/arithmetic.cpp index b730d9309..6b46362be 100644 --- a/jlm/rvsdg/bitstring/arithmetic.cpp +++ b/jlm/rvsdg/bitstring/arithmetic.cpp @@ -55,7 +55,7 @@ const char BitAddLabel[] = "BitAdd"; template class MakeBitBinaryOperation< reduce_add, BitAddLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_and { @@ -70,7 +70,7 @@ const char BitAndLabel[] = "BitAnd"; template class MakeBitBinaryOperation< reduce_and, BitAndLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_ashr { @@ -82,7 +82,7 @@ struct reduce_ashr }; const char BitAShrLabel[] = "BitAShr"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_mul { @@ -97,7 +97,7 @@ const char BitMulLabel[] = "BitMul"; template class MakeBitBinaryOperation< reduce_mul, BitMulLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_or { @@ -112,7 +112,7 @@ const char BitOrLabel[] = "BitOr"; template class MakeBitBinaryOperation< reduce_or, BitOrLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_sdiv { @@ -124,7 +124,7 @@ struct reduce_sdiv }; const char BitSDivLabel[] = "BitSDiv"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_shl { @@ -136,7 +136,7 @@ struct reduce_shl }; const char BitShlLabel[] = "BitShl"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_shr { @@ -148,7 +148,7 @@ struct reduce_shr }; const char BitShrLabel[] = "BitShr"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_smod { @@ -160,7 +160,7 @@ struct reduce_smod }; const char BitSModLabel[] = "BitSMod"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_smulh { @@ -172,7 +172,10 @@ struct reduce_smulh }; const char BitSMulHLabel[] = "BitSMulH"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation< + reduce_smulh, + BitSMulHLabel, + BinaryOperation::flags::commutative>; struct reduce_sub { @@ -184,7 +187,7 @@ struct reduce_sub }; const char BitSubLabel[] = "BitSub"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_udiv { @@ -196,7 +199,7 @@ struct reduce_udiv }; const char BitUDivLabel[] = "BitUDiv"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_umod { @@ -208,7 +211,7 @@ struct reduce_umod }; const char BitUModLabel[] = "BitUMod"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation; struct reduce_umulh { @@ -220,7 +223,10 @@ struct reduce_umulh }; const char BitUMulHLabel[] = "BitUMulH"; -template class MakeBitBinaryOperation; +template class MakeBitBinaryOperation< + reduce_umulh, + BitUMulHLabel, + BinaryOperation::flags::commutative>; struct reduce_xor { @@ -235,6 +241,6 @@ const char BitXorLabel[] = "BitXor"; template class MakeBitBinaryOperation< reduce_xor, BitXorLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; } diff --git a/jlm/rvsdg/bitstring/arithmetic.hpp b/jlm/rvsdg/bitstring/arithmetic.hpp index fbdb75ae6..228d8a850 100644 --- a/jlm/rvsdg/bitstring/arithmetic.hpp +++ b/jlm/rvsdg/bitstring/arithmetic.hpp @@ -44,7 +44,7 @@ class MakeBitUnaryOperation final : public bitunary_op } }; -template +template class MakeBitBinaryOperation final : public bitbinary_op { public: @@ -57,7 +57,7 @@ class MakeBitBinaryOperation final : public bitbinary_op bool operator==(const Operation & other) const noexcept override; - enum binary_op::flags + enum BinaryOperation::flags flags() const noexcept override; bitvalue_repr @@ -97,113 +97,128 @@ extern const char BitAddLabel[]; using bitadd_op = MakeBitBinaryOperation< reduce_add, BitAddLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; extern template class MakeBitBinaryOperation< reduce_add, BitAddLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_and; extern const char BitAndLabel[]; using bitand_op = MakeBitBinaryOperation< reduce_and, BitAndLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; extern template class MakeBitBinaryOperation< reduce_and, BitAndLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_ashr; extern const char BitAShrLabel[]; -using bitashr_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitashr_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation< + reduce_ashr, + BitAShrLabel, + BinaryOperation::flags::none>; struct reduce_mul; extern const char BitMulLabel[]; using bitmul_op = MakeBitBinaryOperation< reduce_mul, BitMulLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; extern template class MakeBitBinaryOperation< reduce_mul, BitMulLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_or; extern const char BitOrLabel[]; using bitor_op = MakeBitBinaryOperation< reduce_or, BitOrLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; extern template class MakeBitBinaryOperation< reduce_or, BitOrLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; struct reduce_sdiv; extern const char BitSDivLabel[]; -using bitsdiv_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitsdiv_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation< + reduce_sdiv, + BitSDivLabel, + BinaryOperation::flags::none>; struct reduce_shl; extern const char BitShlLabel[]; -using bitshl_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitshl_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation; struct reduce_shr; extern const char BitShrLabel[]; -using bitshr_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitshr_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation; struct reduce_smod; extern const char BitSModLabel[]; -using bitsmod_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitsmod_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation< + reduce_smod, + BitSModLabel, + BinaryOperation::flags::none>; struct reduce_smulh; extern const char BitSMulHLabel[]; using bitsmulh_op = - MakeBitBinaryOperation; + MakeBitBinaryOperation; extern template class MakeBitBinaryOperation< reduce_smulh, BitSMulHLabel, - binary_op::flags::commutative>; + BinaryOperation::flags::commutative>; struct reduce_sub; extern const char BitSubLabel[]; -using bitsub_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitsub_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation; struct reduce_udiv; extern const char BitUDivLabel[]; -using bitudiv_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitudiv_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation< + reduce_udiv, + BitUDivLabel, + BinaryOperation::flags::none>; struct reduce_umod; extern const char BitUModLabel[]; -using bitumod_op = MakeBitBinaryOperation; -extern template class MakeBitBinaryOperation; +using bitumod_op = MakeBitBinaryOperation; +extern template class MakeBitBinaryOperation< + reduce_umod, + BitUModLabel, + BinaryOperation::flags::none>; struct reduce_umulh; extern const char BitUMulHLabel[]; using bitumulh_op = - MakeBitBinaryOperation; + MakeBitBinaryOperation; extern template class MakeBitBinaryOperation< reduce_umulh, BitUMulHLabel, - binary_op::flags::commutative>; + BinaryOperation::flags::commutative>; struct reduce_xor; extern const char BitXorLabel[]; using bitxor_op = MakeBitBinaryOperation< reduce_xor, BitXorLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; extern template class MakeBitBinaryOperation< reduce_xor, BitXorLabel, - binary_op::flags::associative | binary_op::flags::commutative>; + BinaryOperation::flags::associative | BinaryOperation::flags::commutative>; } diff --git a/jlm/rvsdg/bitstring/bitoperation-classes.hpp b/jlm/rvsdg/bitstring/bitoperation-classes.hpp index 941f49635..ae4dac144 100644 --- a/jlm/rvsdg/bitstring/bitoperation-classes.hpp +++ b/jlm/rvsdg/bitstring/bitoperation-classes.hpp @@ -49,13 +49,13 @@ class bitunary_op : public jlm::rvsdg::unary_op /* Represents a binary operation (possibly normalized n-ary if associative) * on a bitstring of a specific width, produces another bitstring of the * same width. */ -class bitbinary_op : public jlm::rvsdg::binary_op +class bitbinary_op : public BinaryOperation { public: virtual ~bitbinary_op() noexcept; inline bitbinary_op(const std::shared_ptr type, size_t arity = 2) noexcept - : binary_op({ arity, type }, type) + : BinaryOperation({ arity, type }, type) {} /* reduction methods */ @@ -89,13 +89,13 @@ enum class compare_result static_false }; -class bitcompare_op : public jlm::rvsdg::binary_op +class bitcompare_op : public BinaryOperation { public: virtual ~bitcompare_op() noexcept; inline bitcompare_op(std::shared_ptr type) noexcept - : binary_op({ type, type }, bittype::Create(1)) + : BinaryOperation({ type, type }, bittype::Create(1)) {} virtual binop_reduction_path_t diff --git a/jlm/rvsdg/bitstring/comparison-impl.hpp b/jlm/rvsdg/bitstring/comparison-impl.hpp index 9910fb923..2614bbf7c 100644 --- a/jlm/rvsdg/bitstring/comparison-impl.hpp +++ b/jlm/rvsdg/bitstring/comparison-impl.hpp @@ -11,11 +11,11 @@ namespace jlm::rvsdg { -template +template MakeBitComparisonOperation::~MakeBitComparisonOperation() noexcept {} -template +template bool MakeBitComparisonOperation::operator==( const Operation & other) const noexcept @@ -24,7 +24,7 @@ MakeBitComparisonOperation::operator==( return op && op->type() == type(); } -template +template compare_result MakeBitComparisonOperation::reduce_constants( const bitvalue_repr & arg1, @@ -41,28 +41,28 @@ MakeBitComparisonOperation::reduce_constants( } } -template -enum binary_op::flags +template +enum BinaryOperation::flags MakeBitComparisonOperation::flags() const noexcept { return opflags; } -template +template std::string MakeBitComparisonOperation::debug_string() const { return jlm::util::strfmt(name, type().nbits()); } -template +template std::unique_ptr MakeBitComparisonOperation::copy() const { return std::make_unique(*this); } -template +template std::unique_ptr MakeBitComparisonOperation::create(size_t nbits) const { diff --git a/jlm/rvsdg/bitstring/comparison.cpp b/jlm/rvsdg/bitstring/comparison.cpp index 1930eefb1..76c4e1f86 100644 --- a/jlm/rvsdg/bitstring/comparison.cpp +++ b/jlm/rvsdg/bitstring/comparison.cpp @@ -20,7 +20,10 @@ struct reduce_eq }; const char BitEqLabel[] = "BitEq"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation< + reduce_eq, + BitEqLabel, + BinaryOperation::flags::commutative>; struct reduce_ne { @@ -32,7 +35,10 @@ struct reduce_ne }; const char BitNeLabel[] = "BitNe"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation< + reduce_ne, + BitNeLabel, + BinaryOperation::flags::commutative>; struct reduce_sge { @@ -44,7 +50,7 @@ struct reduce_sge }; const char BitSgeLabel[] = "BitSge"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_sgt { @@ -56,7 +62,7 @@ struct reduce_sgt }; const char BitSgtLabel[] = "BitSgt"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_sle { @@ -68,7 +74,7 @@ struct reduce_sle }; const char BitSleLabel[] = "BitSle"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_slt { @@ -80,7 +86,7 @@ struct reduce_slt }; const char BitSltLabel[] = "BitSlt"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_uge { @@ -92,7 +98,7 @@ struct reduce_uge }; const char BitUgeLabel[] = "BitUge"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_ugt { @@ -104,7 +110,7 @@ struct reduce_ugt }; const char BitUgtLabel[] = "BitUgt"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_ule { @@ -116,7 +122,7 @@ struct reduce_ule }; const char BitUleLabel[] = "BitUle"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; struct reduce_ult { @@ -128,6 +134,6 @@ struct reduce_ult }; const char BitUltLabel[] = "BitUlt"; -template class MakeBitComparisonOperation; +template class MakeBitComparisonOperation; } diff --git a/jlm/rvsdg/bitstring/comparison.hpp b/jlm/rvsdg/bitstring/comparison.hpp index dfad6f280..353f1ce33 100644 --- a/jlm/rvsdg/bitstring/comparison.hpp +++ b/jlm/rvsdg/bitstring/comparison.hpp @@ -13,7 +13,7 @@ namespace jlm::rvsdg { -template +template class MakeBitComparisonOperation final : public bitcompare_op { public: @@ -26,7 +26,7 @@ class MakeBitComparisonOperation final : public bitcompare_op bool operator==(const Operation & other) const noexcept override; - enum binary_op::flags + enum BinaryOperation::flags flags() const noexcept override; compare_result @@ -53,59 +53,85 @@ class MakeBitComparisonOperation final : public bitcompare_op struct reduce_eq; extern const char BitEqLabel[]; -using biteq_op = MakeBitComparisonOperation; +using biteq_op = + MakeBitComparisonOperation; extern template class MakeBitComparisonOperation< reduce_eq, BitEqLabel, - binary_op::flags::commutative>; + BinaryOperation::flags::commutative>; struct reduce_ne; extern const char BitNeLabel[]; -using bitne_op = MakeBitComparisonOperation; +using bitne_op = + MakeBitComparisonOperation; extern template class MakeBitComparisonOperation< reduce_ne, BitNeLabel, - binary_op::flags::commutative>; + BinaryOperation::flags::commutative>; struct reduce_sge; extern const char BitSgeLabel[]; -using bitsge_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitsge_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_sge, + BitSgeLabel, + BinaryOperation::flags::none>; struct reduce_sgt; extern const char BitSgtLabel[]; -using bitsgt_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitsgt_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_sgt, + BitSgtLabel, + BinaryOperation::flags::none>; struct reduce_sle; extern const char BitSleLabel[]; -using bitsle_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitsle_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_sle, + BitSleLabel, + BinaryOperation::flags::none>; struct reduce_slt; extern const char BitSltLabel[]; -using bitslt_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitslt_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_slt, + BitSltLabel, + BinaryOperation::flags::none>; struct reduce_uge; extern const char BitUgeLabel[]; -using bituge_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bituge_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_uge, + BitUgeLabel, + BinaryOperation::flags::none>; struct reduce_ugt; extern const char BitUgtLabel[]; -using bitugt_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitugt_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_ugt, + BitUgtLabel, + BinaryOperation::flags::none>; struct reduce_ule; extern const char BitUleLabel[]; -using bitule_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitule_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_ule, + BitUleLabel, + BinaryOperation::flags::none>; struct reduce_ult; extern const char BitUltLabel[]; -using bitult_op = MakeBitComparisonOperation; -extern template class MakeBitComparisonOperation; +using bitult_op = MakeBitComparisonOperation; +extern template class MakeBitComparisonOperation< + reduce_ult, + BitUltLabel, + BinaryOperation::flags::none>; } diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 4d534d022..156b0e201 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -350,10 +350,10 @@ bitconcat_op::reduce_operand_pair( return NULL; } -enum jlm::rvsdg::binary_op::flags +enum BinaryOperation::flags bitconcat_op::flags() const noexcept { - return binary_op::flags::associative; + return BinaryOperation::flags::associative; } std::string diff --git a/jlm/rvsdg/bitstring/concat.hpp b/jlm/rvsdg/bitstring/concat.hpp index b8226c0a4..ae5ccb20d 100644 --- a/jlm/rvsdg/bitstring/concat.hpp +++ b/jlm/rvsdg/bitstring/concat.hpp @@ -16,13 +16,13 @@ namespace jlm::rvsdg { -class bitconcat_op final : public jlm::rvsdg::binary_op +class bitconcat_op final : public BinaryOperation { public: virtual ~bitconcat_op() noexcept; explicit inline bitconcat_op(const std::vector> types) - : binary_op({ types.begin(), types.end() }, aggregate_arguments(types)) + : BinaryOperation({ types.begin(), types.end() }, aggregate_arguments(types)) {} virtual bool @@ -38,7 +38,7 @@ class bitconcat_op final : public jlm::rvsdg::binary_op jlm::rvsdg::output * arg1, jlm::rvsdg::output * arg2) const override; - virtual enum jlm::rvsdg::binary_op::flags + enum BinaryOperation::flags flags() const noexcept override; virtual std::string diff --git a/tests/jlm/rvsdg/test-binary.cpp b/tests/jlm/rvsdg/test-binary.cpp index a0f824952..2cdaef45d 100644 --- a/tests/jlm/rvsdg/test-binary.cpp +++ b/tests/jlm/rvsdg/test-binary.cpp @@ -10,14 +10,14 @@ #include #include -class BinaryOperation final : public jlm::rvsdg::binary_op +class BinaryOperation final : public jlm::rvsdg::BinaryOperation { public: BinaryOperation( const std::shared_ptr operandType, const std::shared_ptr resultType, - const enum jlm::rvsdg::binary_op::flags & flags) - : jlm::rvsdg::binary_op({ operandType, operandType }, resultType), + const enum jlm::rvsdg::BinaryOperation::flags & flags) + : jlm::rvsdg::BinaryOperation({ operandType, operandType }, resultType), Flags_(flags) {} @@ -51,7 +51,7 @@ class BinaryOperation final : public jlm::rvsdg::binary_op return nullptr; } - [[nodiscard]] enum jlm::rvsdg::binary_op::flags + [[nodiscard]] enum jlm::rvsdg::BinaryOperation::flags flags() const noexcept override { return Flags_; @@ -76,7 +76,7 @@ class BinaryOperation final : public jlm::rvsdg::binary_op } private: - enum jlm::rvsdg::binary_op::flags Flags_; + enum jlm::rvsdg::BinaryOperation::flags Flags_; }; static int @@ -85,7 +85,7 @@ FlattenedBinaryReduction() using namespace jlm::rvsdg; auto vt = jlm::tests::valuetype::Create(); - jlm::tests::binary_op op(vt, vt, binary_op::flags::associative); + jlm::tests::binary_op op(vt, vt, jlm::rvsdg::BinaryOperation::flags::associative); /* test paralell reduction */ { @@ -175,7 +175,10 @@ FlattenAssociativeBinaryOperation_NotAssociativeBinary() auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); auto i2 = &jlm::tests::GraphImport::Create(graph, valueType, "i2"); - jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::none); + jlm::tests::binary_op binaryOperation( + valueType, + valueType, + jlm::rvsdg::BinaryOperation::flags::none); auto o1 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { i0, i1 }); auto o2 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { o1->output(0), i2 }); @@ -185,7 +188,7 @@ FlattenAssociativeBinaryOperation_NotAssociativeBinary() // Act auto node = TryGetOwnerNode(*ex.origin()); - auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); + auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); jlm::rvsdg::view(graph, stdout); @@ -213,7 +216,10 @@ FlattenAssociativeBinaryOperation_NoNewOperands() auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); jlm::tests::unary_op unaryOperation(valueType, valueType); - jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); + jlm::tests::binary_op binaryOperation( + valueType, + valueType, + jlm::rvsdg::BinaryOperation::flags::associative); auto u1 = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { i0 }); auto u2 = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { i1 }); auto b2 = @@ -225,7 +231,7 @@ FlattenAssociativeBinaryOperation_NoNewOperands() // Act auto node = TryGetOwnerNode(*ex.origin()); - auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); + auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); jlm::rvsdg::view(graph, stdout); @@ -253,7 +259,10 @@ FlattenAssociativeBinaryOperation_Success() auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); auto i2 = &jlm::tests::GraphImport::Create(graph, valueType, "i2"); - jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); + jlm::tests::binary_op binaryOperation( + valueType, + valueType, + jlm::rvsdg::BinaryOperation::flags::associative); auto o1 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { i0, i1 }); auto o2 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { o1->output(0), i2 }); @@ -263,7 +272,7 @@ FlattenAssociativeBinaryOperation_Success() // Act auto node = TryGetOwnerNode(*ex.origin()); - auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); + auto success = ReduceNode(FlattenAssociativeBinaryOperation, *node); jlm::rvsdg::view(graph, stdout); @@ -292,7 +301,10 @@ NormalizeBinaryOperation_NoNewOperands() auto i0 = &jlm::tests::GraphImport::Create(graph, valueType, "i0"); auto i1 = &jlm::tests::GraphImport::Create(graph, valueType, "i1"); - jlm::tests::binary_op binaryOperation(valueType, valueType, binary_op::flags::associative); + jlm::tests::binary_op binaryOperation( + valueType, + valueType, + jlm::rvsdg::BinaryOperation::flags::associative); auto o1 = SimpleNode::create(&graph.GetRootRegion(), binaryOperation, { i0, i1 }); auto & ex = jlm::tests::GraphExport::Create(*o1->output(0), "o2"); @@ -301,7 +313,7 @@ NormalizeBinaryOperation_NoNewOperands() // Act auto node = TryGetOwnerNode(*ex.origin()); - auto success = ReduceNode(NormalizeBinaryOperation, *node); + auto success = ReduceNode(NormalizeBinaryOperation, *node); jlm::rvsdg::view(graph, stdout); @@ -324,7 +336,7 @@ NormalizeBinaryOperation_SingleOperand() auto valueType = jlm::tests::valuetype::Create(); jlm::tests::unary_op unaryOperation(valueType, valueType); - BinaryOperation binaryOperation(valueType, valueType, binary_op::flags::none); + ::BinaryOperation binaryOperation(valueType, valueType, jlm::rvsdg::BinaryOperation::flags::none); Graph graph; auto s0 = &jlm::tests::GraphImport::Create(graph, valueType, "s0"); @@ -342,7 +354,7 @@ NormalizeBinaryOperation_SingleOperand() // Act auto node = TryGetOwnerNode(*ex.origin()); - auto success = ReduceNode(NormalizeBinaryOperation, *node); + auto success = ReduceNode<::BinaryOperation>(NormalizeBinaryOperation, *node); jlm::rvsdg::view(graph, stdout); diff --git a/tests/test-operation.cpp b/tests/test-operation.cpp index a0f7d81da..6fc7feb2f 100644 --- a/tests/test-operation.cpp +++ b/tests/test-operation.cpp @@ -78,7 +78,7 @@ binary_op::reduce_operand_pair(rvsdg::binop_reduction_path_t, rvsdg::output *, r return nullptr; } -enum rvsdg::binary_op::flags +enum rvsdg::BinaryOperation::flags binary_op::flags() const noexcept { return flags_; diff --git a/tests/test-operation.hpp b/tests/test-operation.hpp index a388d48f8..0ed022946 100644 --- a/tests/test-operation.hpp +++ b/tests/test-operation.hpp @@ -132,7 +132,7 @@ is_unary_node(const rvsdg::Node * node) noexcept /* binary operation */ -class binary_op final : public rvsdg::binary_op +class binary_op final : public rvsdg::BinaryOperation { public: virtual ~binary_op() noexcept; @@ -140,8 +140,8 @@ class binary_op final : public rvsdg::binary_op inline binary_op( const std::shared_ptr & srctype, std::shared_ptr dsttype, - const enum rvsdg::binary_op::flags & flags) noexcept - : rvsdg::binary_op({ srctype, srctype }, std::move(dsttype)), + const enum BinaryOperation::flags & flags) noexcept + : BinaryOperation({ srctype, srctype }, std::move(dsttype)), flags_(flags) {} @@ -156,7 +156,7 @@ class binary_op final : public rvsdg::binary_op reduce_operand_pair(rvsdg::unop_reduction_path_t path, rvsdg::output * op1, rvsdg::output * op2) const override; - virtual enum rvsdg::binary_op::flags + enum BinaryOperation::flags flags() const noexcept override; virtual std::string @@ -172,7 +172,7 @@ class binary_op final : public rvsdg::binary_op rvsdg::output * op1, rvsdg::output * op2) { - binary_op op(srctype, std::move(dsttype), rvsdg::binary_op::flags::none); + binary_op op(srctype, std::move(dsttype), BinaryOperation::flags::none); return rvsdg::SimpleNode::create(op1->region(), op, { op1, op2 }); } @@ -183,12 +183,12 @@ class binary_op final : public rvsdg::binary_op rvsdg::output * op1, rvsdg::output * op2) { - binary_op op(srctype, std::move(dsttype), rvsdg::binary_op::flags::none); + binary_op op(srctype, std::move(dsttype), BinaryOperation::flags::none); return rvsdg::SimpleNode::create_normalized(op1->region(), op, { op1, op2 })[0]; } private: - enum rvsdg::binary_op::flags flags_; + enum BinaryOperation::flags flags_; }; /* structural operation */ From aee83359bdc2f894c04931612d3cf8445255759b Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 02:57:56 +0100 Subject: [PATCH 154/170] Move load-mux reduction tests into LoadTests.cpp (#711) Move load-mux reduction tests into LoadTest.cpp and utilize new interface in the tests. --- jlm/llvm/Makefile.sub | 1 - tests/jlm/llvm/ir/operators/LoadTests.cpp | 138 ++++++++++++++--- tests/jlm/llvm/opt/TestLoadMuxReduction.cpp | 162 -------------------- 3 files changed, 120 insertions(+), 181 deletions(-) delete mode 100644 tests/jlm/llvm/opt/TestLoadMuxReduction.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 143d95186..7fc105124 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -208,7 +208,6 @@ libllvm_TESTS += \ tests/jlm/llvm/opt/TestDeadNodeElimination \ tests/jlm/llvm/opt/test-inlining \ tests/jlm/llvm/opt/test-inversion \ - tests/jlm/llvm/opt/TestLoadMuxReduction \ tests/jlm/llvm/opt/TestLoadStoreReduction \ tests/jlm/llvm/opt/test-pull \ tests/jlm/llvm/opt/test-push \ diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index 06ef6e7d4..a647ad373 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -127,21 +127,21 @@ JLM_UNIT_TEST_REGISTER( TestLoadAllocaReduction) static int -TestLoadMuxReduction() +LoadMuxReduction_Success() { using namespace jlm::llvm; // Arrange - auto memoryStateType = MemoryStateType::Create(); - auto pointerType = PointerType::Create(); - auto bitstringType = jlm::rvsdg::bittype::Create(32); + const auto memoryStateType = MemoryStateType::Create(); + const auto pointerType = PointerType::Create(); + const auto bitstringType = jlm::rvsdg::bittype::Create(32); jlm::rvsdg::Graph graph; auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); nf->set_mutable(false); nf->set_load_mux_reducible(false); - auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); + const auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); auto s1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state1"); auto s2 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state2"); auto s3 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state3"); @@ -149,32 +149,134 @@ TestLoadMuxReduction() auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); auto & loadNode = LoadNonVolatileNode::CreateNode(*address, { mux }, bitstringType, 4); - auto & ex = GraphExport::Create(*loadNode.output(0), "l"); + auto & ex1 = GraphExport::Create(*loadNode.output(0), "l"); + auto & ex2 = GraphExport::Create(*loadNode.output(1), "s"); - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); + view(&graph.GetRootRegion(), stdout); // Act - auto success = jlm::rvsdg::ReduceNode(NormalizeLoadMux, loadNode); + const auto success = jlm::rvsdg::ReduceNode(NormalizeLoadMux, loadNode); graph.PruneNodes(); - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); + view(&graph.GetRootRegion(), stdout); // Assert assert(success); - auto node = jlm::rvsdg::output::GetNode(*ex.origin()); - assert(is(node)); - assert(node->ninputs() == 4); - assert(node->input(0)->origin() == address); - assert(node->input(1)->origin() == s1); - assert(node->input(2)->origin() == s2); - assert(node->input(3)->origin() == s3); + const auto reducedLoadNode = jlm::rvsdg::output::GetNode(*ex1.origin()); + assert(is(reducedLoadNode)); + assert(reducedLoadNode->ninputs() == 4); + assert(reducedLoadNode->input(0)->origin() == address); + assert(reducedLoadNode->input(1)->origin() == s1); + assert(reducedLoadNode->input(2)->origin() == s2); + assert(reducedLoadNode->input(3)->origin() == s3); + + const auto merge = jlm::rvsdg::output::GetNode(*ex2.origin()); + assert(is(merge)); + assert(merge->ninputs() == 3); + for (size_t n = 0; n < merge->ninputs(); n++) + { + const auto expectedLoadNode = jlm::rvsdg::output::GetNode(*merge->input(n)->origin()); + assert(expectedLoadNode == reducedLoadNode); + } + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadMuxReduction_Success", + LoadMuxReduction_Success) + +static int +LoadMuxReduction_WrongNumberOfOperands() +{ + // Arrange + using namespace jlm::llvm; + + const auto vt = jlm::tests::valuetype::Create(); + const auto pt = PointerType::Create(); + const auto mt = MemoryStateType::Create(); + + jlm::rvsdg::Graph graph; + auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_load_mux_reducible(false); + + const auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); + const auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); + const auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); + + auto merge = MemoryStateMergeOperation::Create(std::vector{ s1, s2 }); + auto & loadNode = LoadNonVolatileNode::CreateNode(*a, { merge, merge }, vt, 4); + + auto & ex1 = GraphExport::Create(*loadNode.output(0), "v"); + auto & ex2 = GraphExport::Create(*loadNode.output(1), "s1"); + auto & ex3 = GraphExport::Create(*loadNode.output(2), "s2"); + + view(&graph.GetRootRegion(), stdout); + + // Act + const auto success = jlm::rvsdg::ReduceNode(NormalizeLoadMux, loadNode); + graph.PruneNodes(); + + view(&graph.GetRootRegion(), stdout); + + // Assert + // The LoadMux reduction should not be performed, as the current implementation does not correctly + // take care of the two identical load state operands originating from the merge node. + assert(success == false); + assert(loadNode.noutputs() == 3); + assert(ex1.origin() == loadNode.output(0)); + assert(ex2.origin() == loadNode.output(1)); + assert(ex3.origin() == loadNode.output(2)); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadMuxReduction_WrongNumberOfOperands", + LoadMuxReduction_WrongNumberOfOperands) + +static int +LoadMuxReduction_LoadWithoutStates() +{ + using namespace jlm::llvm; + + // Arrange + const auto valueType = jlm::tests::valuetype::Create(); + const auto pointerType = PointerType::Create(); + + jlm::rvsdg::Graph graph; + auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_load_mux_reducible(false); + + const auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); + + auto & loadNode = LoadNonVolatileNode::CreateNode(*address, {}, valueType, 4); + + auto & ex = GraphExport::Create(*loadNode.output(0), "v"); + + view(&graph.GetRootRegion(), stdout); + + // Act + const auto success = jlm::rvsdg::ReduceNode(NormalizeLoadMux, loadNode); + graph.PruneNodes(); + + view(&graph.GetRootRegion(), stdout); + + // Assert + // The load node has no states. Nothing needs to be done. + assert(success == false); + const auto expectedLoadNode = jlm::rvsdg::output::GetNode(*ex.origin()); + assert(expectedLoadNode == &loadNode); + assert(expectedLoadNode->ninputs() == 1); return 0; } JLM_UNIT_TEST_REGISTER( - "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadMuxReduction", - TestLoadMuxReduction) + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadMuxReduction_LoadWithoutStates", + LoadMuxReduction_LoadWithoutStates) static int TestDuplicateStateReduction() diff --git a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp b/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp deleted file mode 100644 index 575597032..000000000 --- a/tests/jlm/llvm/opt/TestLoadMuxReduction.cpp +++ /dev/null @@ -1,162 +0,0 @@ -/* - * Copyright 2021 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include -#include -#include - -#include -#include -#include - -#include - -static void -TestSuccess() -{ - using namespace jlm::llvm; - - // Arrange - auto vt = jlm::tests::valuetype::Create(); - auto pt = PointerType::Create(); - auto mt = MemoryStateType::Create(); - - jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_mux_reducible(false); - - auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); - auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); - auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); - auto s3 = &jlm::tests::GraphImport::Create(graph, mt, "s3"); - - auto mux = MemoryStateMergeOperation::Create({ s1, s2, s3 }); - auto ld = LoadNonVolatileNode::Create(a, { mux }, vt, 4); - - auto & ex1 = GraphExport::Create(*ld[0], "v"); - auto & ex2 = GraphExport::Create(*ld[1], "s"); - - // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - - // Act - nf->set_mutable(true); - nf->set_load_mux_reducible(true); - graph.Normalize(); - graph.PruneNodes(); - - // jlm::rvsdg::view(graph.GetRootRegion(), stdout); - - // Assert - auto load = jlm::rvsdg::output::GetNode(*ex1.origin()); - assert(is(load)); - assert(load->ninputs() == 4); - assert(load->input(1)->origin() == s1); - assert(load->input(2)->origin() == s2); - assert(load->input(3)->origin() == s3); - - auto merge = jlm::rvsdg::output::GetNode(*ex2.origin()); - assert(is(merge)); - assert(merge->ninputs() == 3); - for (size_t n = 0; n < merge->ninputs(); n++) - { - auto node = jlm::rvsdg::output::GetNode(*merge->input(n)->origin()); - assert(node == load); - } -} - -static void -TestWrongNumberOfOperands() -{ - // Arrange - using namespace jlm::llvm; - - auto vt = jlm::tests::valuetype::Create(); - auto pt = PointerType::Create(); - auto mt = MemoryStateType::Create(); - - jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_mux_reducible(false); - - auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); - auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); - auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); - - auto merge = MemoryStateMergeOperation::Create(std::vector{ s1, s2 }); - auto ld = LoadNonVolatileNode::Create(a, { merge, merge }, vt, 4); - - auto & ex1 = GraphExport::Create(*ld[0], "v"); - auto & ex2 = GraphExport::Create(*ld[1], "s1"); - auto & ex3 = GraphExport::Create(*ld[2], "s2"); - - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - - // Act - nf->set_mutable(true); - nf->set_load_mux_reducible(true); - graph.Normalize(); - graph.PruneNodes(); - - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - - // Assert - - // The LoadMux reduction should not be performed, as the current implementation does not correctly - // take care of the two identical load state operands originating from the merge node. - assert(ld.size() == 3); - assert(ex1.origin() == ld[0]); - assert(ex2.origin() == ld[1]); - assert(ex3.origin() == ld[2]); -} - -static void -TestLoadWithoutStates() -{ - using namespace jlm::llvm; - - // Arrange - auto valueType = jlm::tests::valuetype::Create(); - auto pointerType = PointerType::Create(); - - jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_mux_reducible(false); - - auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); - - auto loadResults = LoadNonVolatileNode::Create(address, {}, valueType, 4); - - auto & ex = GraphExport::Create(*loadResults[0], "v"); - - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - - // Act - nf->set_mutable(true); - nf->set_load_mux_reducible(true); - graph.Normalize(); - graph.PruneNodes(); - - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - - // Assert - auto load = jlm::rvsdg::output::GetNode(*ex.origin()); - assert(is(load)); - assert(load->ninputs() == 1); -} - -static int -TestLoadMuxReduction() -{ - TestSuccess(); - TestWrongNumberOfOperands(); - TestLoadWithoutStates(); - - return 0; -} - -JLM_UNIT_TEST_REGISTER("jlm/llvm/opt/TestLoadMuxReduction", TestLoadMuxReduction) From d04ede4ea68635bc241368ee4345300fe30b58b8 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 11:22:21 +0100 Subject: [PATCH 155/170] Remove nullary_op normal form (#712) --- jlm/rvsdg/nullary.cpp | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/jlm/rvsdg/nullary.cpp b/jlm/rvsdg/nullary.cpp index a8baa960f..f6ba58b9a 100644 --- a/jlm/rvsdg/nullary.cpp +++ b/jlm/rvsdg/nullary.cpp @@ -10,45 +10,7 @@ namespace jlm::rvsdg { -class nullary_normal_form final : public simple_normal_form -{ -public: - virtual ~nullary_normal_form() noexcept - {} - - nullary_normal_form( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) - : simple_normal_form(operator_class, parent, graph) - {} -}; - -/* nullary operator */ - nullary_op::~nullary_op() noexcept {} } - -namespace -{ - -jlm::rvsdg::node_normal_form * -nullary_operation_get_default_normal_form_( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::Graph * graph) -{ - return new jlm::rvsdg::nullary_normal_form(operator_class, parent, graph); -} - -static void __attribute__((constructor)) -register_node_normal_form() -{ - jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::nullary_op), - nullary_operation_get_default_normal_form_); -} - -} From ada669990748844bcaa8a087dd07986b227e408d Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 13:40:43 +0100 Subject: [PATCH 156/170] Remove store normal form (#713) --- jlm/llvm/ir/operators/Store.cpp | 161 --------------------- jlm/llvm/ir/operators/Store.hpp | 71 --------- tests/jlm/llvm/ir/operators/StoreTests.cpp | 12 -- 3 files changed, 244 deletions(-) diff --git a/jlm/llvm/ir/operators/Store.cpp b/jlm/llvm/ir/operators/Store.cpp index 51e20a036..2f2e66f3e 100644 --- a/jlm/llvm/ir/operators/Store.cpp +++ b/jlm/llvm/ir/operators/Store.cpp @@ -331,145 +331,6 @@ perform_multiple_origin_reduction( return results; } -store_normal_form::~store_normal_form() -{} - -store_normal_form::store_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - rvsdg::Graph * graph) noexcept - : simple_normal_form(opclass, parent, graph), - enable_store_mux_(false), - enable_store_store_(false), - enable_store_alloca_(false), - enable_multiple_origin_(false) -{ - if (auto p = dynamic_cast(parent)) - { - enable_multiple_origin_ = p->enable_multiple_origin_; - enable_store_store_ = p->enable_store_store_; - enable_store_mux_ = p->enable_store_mux_; - } -} - -bool -store_normal_form::normalize_node(rvsdg::Node * node) const -{ - JLM_ASSERT(is(node->GetOperation())); - auto op = static_cast(&node->GetOperation()); - auto operands = jlm::rvsdg::operands(node); - - if (!get_mutable()) - return true; - - if (get_store_mux_reducible() && is_store_mux_reducible(operands)) - { - divert_users(node, perform_store_mux_reduction(*op, operands)); - node->region()->remove_node(node); - return false; - } - - if (get_store_store_reducible() && is_store_store_reducible(*op, operands)) - { - divert_users(node, perform_store_store_reduction(*op, operands)); - remove(node); - return false; - } - - if (get_store_alloca_reducible() && is_store_alloca_reducible(operands)) - { - divert_users(node, perform_store_alloca_reduction(*op, operands)); - node->region()->remove_node(node); - return false; - } - - if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) - { - divert_users(node, perform_multiple_origin_reduction(*op, operands)); - node->region()->remove_node(node); - return false; - } - - return simple_normal_form::normalize_node(node); -} - -std::vector -store_normal_form::normalized_create( - rvsdg::Region * region, - const rvsdg::SimpleOperation & op, - const std::vector & ops) const -{ - JLM_ASSERT(is(op)); - auto sop = static_cast(&op); - - if (!get_mutable()) - return simple_normal_form::normalized_create(region, op, ops); - - auto operands = ops; - if (get_store_mux_reducible() && is_store_mux_reducible(operands)) - return perform_store_mux_reduction(*sop, operands); - - if (get_store_alloca_reducible() && is_store_alloca_reducible(operands)) - return perform_store_alloca_reduction(*sop, operands); - - if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) - return perform_multiple_origin_reduction(*sop, operands); - - return simple_normal_form::normalized_create(region, op, operands); -} - -void -store_normal_form::set_store_mux_reducible(bool enable) -{ - if (get_store_mux_reducible() == enable) - return; - - children_set(enable); - - enable_store_mux_ = enable; - if (get_mutable() && enable) - graph()->MarkDenormalized(); -} - -void -store_normal_form::set_store_store_reducible(bool enable) -{ - if (get_store_store_reducible() == enable) - return; - - children_set(enable); - - enable_store_store_ = enable; - if (get_mutable() && enable) - graph()->MarkDenormalized(); -} - -void -store_normal_form::set_store_alloca_reducible(bool enable) -{ - if (get_store_alloca_reducible() == enable) - return; - - children_set(enable); - - enable_store_alloca_ = enable; - if (get_mutable() && enable) - graph()->MarkDenormalized(); -} - -void -store_normal_form::set_multiple_origin_reducible(bool enable) -{ - if (get_multiple_origin_reducible() == enable) - return; - - children_set(enable); - - enable_multiple_origin_ = enable; - if (get_mutable() && enable) - graph()->MarkDenormalized(); -} - std::optional> NormalizeStoreMux( const StoreNonVolatileOperation & operation, @@ -515,25 +376,3 @@ NormalizeStoreDuplicateState( } } - -namespace -{ - -static jlm::rvsdg::node_normal_form * -create_store_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::Graph * graph) -{ - return new jlm::llvm::store_normal_form(opclass, parent, graph); -} - -static void __attribute__((constructor)) -register_normal_form() -{ - jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::llvm::StoreNonVolatileOperation), - create_store_normal_form); -} - -} diff --git a/jlm/llvm/ir/operators/Store.hpp b/jlm/llvm/ir/operators/Store.hpp index b48f8d79d..f71cf6ef5 100644 --- a/jlm/llvm/ir/operators/Store.hpp +++ b/jlm/llvm/ir/operators/Store.hpp @@ -17,70 +17,6 @@ namespace jlm::llvm { -/* store normal form */ - -class store_normal_form final : public jlm::rvsdg::simple_normal_form -{ -public: - virtual ~store_normal_form(); - - store_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - rvsdg::Graph * graph) noexcept; - - virtual bool - normalize_node(rvsdg::Node * node) const override; - - virtual std::vector - normalized_create( - rvsdg::Region * region, - const rvsdg::SimpleOperation & op, - const std::vector & operands) const override; - - virtual void - set_store_mux_reducible(bool enable); - - virtual void - set_store_store_reducible(bool enable); - - virtual void - set_store_alloca_reducible(bool enable); - - virtual void - set_multiple_origin_reducible(bool enable); - - inline bool - get_store_mux_reducible() const noexcept - { - return enable_store_mux_; - } - - inline bool - get_store_store_reducible() const noexcept - { - return enable_store_store_; - } - - inline bool - get_store_alloca_reducible() const noexcept - { - return enable_store_alloca_; - } - - inline bool - get_multiple_origin_reducible() const noexcept - { - return enable_multiple_origin_; - } - -private: - bool enable_store_mux_; - bool enable_store_store_; - bool enable_store_alloca_; - bool enable_multiple_origin_; -}; - /** * Abstract base class for store operations. * @@ -167,13 +103,6 @@ class StoreNonVolatileOperation final : public StoreOperation [[nodiscard]] size_t NumMemoryStates() const noexcept override; - static store_normal_form * - GetNormalForm(rvsdg::Graph * graph) noexcept - { - return util::AssertedCast( - graph->GetNodeNormalForm(typeid(StoreNonVolatileOperation))); - } - static std::unique_ptr Create(const variable * address, const variable * value, const variable * state, size_t alignment) { diff --git a/tests/jlm/llvm/ir/operators/StoreTests.cpp b/tests/jlm/llvm/ir/operators/StoreTests.cpp index 4d23e525f..ca9de4079 100644 --- a/tests/jlm/llvm/ir/operators/StoreTests.cpp +++ b/tests/jlm/llvm/ir/operators/StoreTests.cpp @@ -220,10 +220,6 @@ TestStoreMuxNormalization() auto mt = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_store_mux_reducible(false); - auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); auto v = &jlm::tests::GraphImport::Create(graph, vt, "v"); auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); @@ -273,10 +269,6 @@ TestDuplicateStateReduction() auto memoryStateType = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - const auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_multiple_origin_reducible(false); - auto a = &jlm::tests::GraphImport::Create(graph, pointerType, "a"); auto v = &jlm::tests::GraphImport::Create(graph, valueType, "v"); auto s1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "s1"); @@ -330,10 +322,6 @@ TestStoreAllocaReduction() auto bt = jlm::rvsdg::bittype::Create(32); jlm::rvsdg::Graph graph; - auto nf = StoreNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_store_alloca_reducible(false); - auto size = &jlm::tests::GraphImport::Create(graph, bt, "size"); auto value = &jlm::tests::GraphImport::Create(graph, vt, "value"); auto s = &jlm::tests::GraphImport::Create(graph, mt, "s"); From a145d70ded73bc59df827e75ca4cb7b04740e050 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 14:07:45 +0100 Subject: [PATCH 157/170] Add bugprone-move-forwarding-reference check to clang-tidy (#695) --- .clang-tidy | 2 ++ jlm/rvsdg/reduction-helpers.hpp | 14 +++++++------- 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 71006fce4..a93725439 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -1,11 +1,13 @@ --- Checks: '-*, + bugprone-move-forwarding-reference, misc-unused-parameters, modernize-deprecated-headers, modernize-redundant-void-arg, ' WarningsAsErrors: ' + bugprone-move-forwarding-reference, misc-unused-parameters, modernize-deprecated-headers, modernize-redundant-void-arg, diff --git a/jlm/rvsdg/reduction-helpers.hpp b/jlm/rvsdg/reduction-helpers.hpp index 5cdd31be0..a7e676452 100644 --- a/jlm/rvsdg/reduction-helpers.hpp +++ b/jlm/rvsdg/reduction-helpers.hpp @@ -6,10 +6,10 @@ #ifndef JLM_RVSDG_REDUCTION_HELPERS_HPP #define JLM_RVSDG_REDUCTION_HELPERS_HPP -#include - #include +#include + namespace jlm::rvsdg { namespace base @@ -48,11 +48,11 @@ pairwise_test_reduce(const Container & args, const ReductionTester & reduction_t * with the result if not nullptr. */ template Container -pairwise_reduce(Container && args, const Reductor & reductor) +pairwise_reduce(Container args, const Reductor & reductor) { if (args.empty()) { - return std::move(args); + return args; } auto left = args.begin(); @@ -75,7 +75,7 @@ pairwise_reduce(Container && args, const Reductor & reductor) } args.erase(std::next(left), args.end()); - return std::move(args); + return args; } /* Test whether any pair of elements of "args" can be reduced according @@ -111,7 +111,7 @@ commutative_pairwise_test_reduce( * with the result if not nullptr. */ template Container -commutative_pairwise_reduce(Container && args, const Reductor & reductor) +commutative_pairwise_reduce(Container args, const Reductor & reductor) { auto left = args.begin(); while (left != args.end()) @@ -137,7 +137,7 @@ commutative_pairwise_reduce(Container && args, const Reductor & reductor) ++left; } - return std::move(args); + return args; } /* Test whether "flatten_tester" applies to any element of "args". */ From 44cfb7acbf452bf76d338f22d489b75f0ca9a82b Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 21:40:27 +0100 Subject: [PATCH 158/170] Port unary operation reduction to new normalization interface (#714) --- jlm/rvsdg/Makefile.sub | 1 + jlm/rvsdg/unary.cpp | 15 +++ jlm/rvsdg/unary.hpp | 18 ++- tests/jlm/rvsdg/UnaryOperationTests.cpp | 170 ++++++++++++++++++++++++ 4 files changed, 203 insertions(+), 1 deletion(-) create mode 100644 tests/jlm/rvsdg/UnaryOperationTests.cpp diff --git a/jlm/rvsdg/Makefile.sub b/jlm/rvsdg/Makefile.sub index 5f441ee26..1b17fc55e 100644 --- a/jlm/rvsdg/Makefile.sub +++ b/jlm/rvsdg/Makefile.sub @@ -81,6 +81,7 @@ librvsdg_TESTS = \ tests/jlm/rvsdg/test-topdown \ tests/jlm/rvsdg/test-typemismatch \ tests/jlm/rvsdg/TestStructuralNode \ + tests/jlm/rvsdg/UnaryOperationTests \ librvsdg_TEST_LIBS = \ libjlmtest \ diff --git a/jlm/rvsdg/unary.cpp b/jlm/rvsdg/unary.cpp index dd1bbf6a3..77971b290 100644 --- a/jlm/rvsdg/unary.cpp +++ b/jlm/rvsdg/unary.cpp @@ -95,6 +95,21 @@ unary_normal_form::set_reducible(bool enable) unary_op::~unary_op() noexcept {} +std::optional> +NormalizeUnaryOperation(const unary_op & operation, const std::vector & operands) +{ + JLM_ASSERT(operands.size() == 1); + auto & operand = *operands[0]; + + if (const auto reduction = operation.can_reduce_operand(&operand); + reduction != unop_reduction_none) + { + return { { operation.reduce_operand(reduction, &operand) } }; + } + + return std::nullopt; +} + } jlm::rvsdg::node_normal_form * diff --git a/jlm/rvsdg/unary.hpp b/jlm/rvsdg/unary.hpp index 43be3af9d..d52f6f108 100644 --- a/jlm/rvsdg/unary.hpp +++ b/jlm/rvsdg/unary.hpp @@ -10,7 +10,8 @@ #include #include #include -#include + +#include namespace jlm::rvsdg { @@ -90,6 +91,21 @@ static const unop_reduction_path_t unop_reduction_narrow = 5; /* operation can be distributed into operands of preceding operation */ static const unop_reduction_path_t unop_reduction_distribute = 6; +/** + * \brief Applies the reductions implemented in the unary operations reduction functions. + * + * @param operation The unary operation on which the transformation is performed. + * @param operands The single(!) operand of the unary node. It should only be a single operand. + * + * @return If the normalization could be applied, then the single(!) result of the unary operation + * after the transformation. Otherwise, std::nullopt. + * + * \see unary_op::can_reduce_operand() + * \see unary_op::reduce_operand() + */ +std::optional> +NormalizeUnaryOperation(const unary_op & operation, const std::vector & operands); + } #endif diff --git a/tests/jlm/rvsdg/UnaryOperationTests.cpp b/tests/jlm/rvsdg/UnaryOperationTests.cpp new file mode 100644 index 000000000..4dff9c6b1 --- /dev/null +++ b/tests/jlm/rvsdg/UnaryOperationTests.cpp @@ -0,0 +1,170 @@ +/* + * Copyright 2025 Nico Reißmann + * See COPYING for terms of redistribution. + */ + +#include +#include +#include + +#include +#include +#include +#include + +class NullaryOperation final : public jlm::rvsdg::nullary_op +{ +public: + explicit NullaryOperation(const std::shared_ptr & resultType) + : nullary_op(resultType) + {} + + bool + operator==(const Operation &) const noexcept override + { + JLM_UNREACHABLE("Not implemented."); + } + + [[nodiscard]] std::string + debug_string() const override + { + return "NullaryOperation"; + } + + [[nodiscard]] std::unique_ptr + copy() const override + { + return std::make_unique(this->result(0)); + } +}; + +class UnaryOperation final : public jlm::rvsdg::unary_op +{ +public: + UnaryOperation( + const std::shared_ptr & operandType, + const std::shared_ptr & resultType) + : unary_op(operandType, resultType) + {} + + jlm::rvsdg::unop_reduction_path_t + can_reduce_operand(const jlm::rvsdg::output * operand) const noexcept override + { + + if (const auto node = jlm::rvsdg::TryGetOwnerNode(*operand); + jlm::rvsdg::is(node)) + { + return jlm::rvsdg::unop_reduction_constant; + } + + return jlm::rvsdg::unop_reduction_none; + } + + jlm::rvsdg::output * + reduce_operand(jlm::rvsdg::unop_reduction_path_t path, jlm::rvsdg::output * operand) + const override + { + if (path == jlm::rvsdg::unop_reduction_constant) + { + return operand; + } + + return nullptr; + } + + bool + operator==(const Operation &) const noexcept override + { + JLM_UNREACHABLE("Not implemented."); + } + + [[nodiscard]] std::string + debug_string() const override + { + return "UnaryOperation"; + } + + [[nodiscard]] std::unique_ptr + copy() const override + { + return std::make_unique(this->argument(0), this->result(0)); + } +}; + +static int +NormalizeUnaryOperation_Success() +{ + using namespace jlm::rvsdg; + + // Arrange + Graph graph; + const auto valueType = jlm::tests::valuetype::Create(); + + const NullaryOperation nullaryOperation(valueType); + const auto nullaryNode = SimpleNode::create(&graph.GetRootRegion(), nullaryOperation, {}); + + const UnaryOperation unaryOperation(valueType, valueType); + const auto unaryNode = + SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { nullaryNode->output(0) }); + + auto & ex = jlm::tests::GraphExport::Create(*unaryNode->output(0), "o2"); + + view(graph, stdout); + + // Act + const auto success = ReduceNode(NormalizeUnaryOperation, *unaryNode); + view(graph, stdout); + + // Assert + assert(success == true); + + graph.PruneNodes(); + assert(graph.GetRootRegion().nnodes() == 1); + + const auto node = TryGetOwnerNode(*ex.origin()); + assert(node == nullaryNode); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/UnaryOperationTests-NormalizeUnaryOperation_Success", + NormalizeUnaryOperation_Success) + +static int +NormalizeUnaryOperation_Failure() +{ + using namespace jlm::rvsdg; + + // Arrange + const auto valueType = jlm::tests::valuetype::Create(); + + Graph graph; + auto i0 = &jlm::tests::GraphImport::Create(graph, valueType, "i0"); + + const UnaryOperation unaryOperation(valueType, valueType); + const auto unaryNode = SimpleNode::create(&graph.GetRootRegion(), unaryOperation, { i0 }); + + auto & ex = jlm::tests::GraphExport::Create(*unaryNode->output(0), "o2"); + + view(graph, stdout); + + // Act + const auto success = ReduceNode(NormalizeUnaryOperation, *unaryNode); + view(graph, stdout); + + // Assert + assert(success == false); + + graph.PruneNodes(); + assert(graph.GetRootRegion().nnodes() == 1); + + const auto node = TryGetOwnerNode(*ex.origin()); + assert(node == unaryNode); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/UnaryOperationTests-NormalizeUnaryOperation_Failure", + NormalizeUnaryOperation_Failure) From 84c8433719464397b8d3bd0f245c1b9d0ba7fbf0 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sat, 4 Jan 2025 22:11:57 +0100 Subject: [PATCH 159/170] Move load-store reduction test to LoadTests.cpp (#716) --- jlm/llvm/Makefile.sub | 1 - tests/jlm/llvm/ir/operators/LoadTests.cpp | 64 +++++++++++++++++- tests/jlm/llvm/opt/TestLoadStoreReduction.cpp | 66 ------------------- 3 files changed, 61 insertions(+), 70 deletions(-) delete mode 100644 tests/jlm/llvm/opt/TestLoadStoreReduction.cpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index 7fc105124..e956bd35c 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -208,7 +208,6 @@ libllvm_TESTS += \ tests/jlm/llvm/opt/TestDeadNodeElimination \ tests/jlm/llvm/opt/test-inlining \ tests/jlm/llvm/opt/test-inversion \ - tests/jlm/llvm/opt/TestLoadStoreReduction \ tests/jlm/llvm/opt/test-pull \ tests/jlm/llvm/opt/test-push \ tests/jlm/llvm/opt/test-unroll \ diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index a647ad373..ec26c09fe 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -392,7 +392,7 @@ JLM_UNIT_TEST_REGISTER( TestLoadStoreStateReduction) static int -TestLoadStoreReduction() +TestLoadStoreReduction_Success() { using namespace jlm::llvm; @@ -434,8 +434,66 @@ TestLoadStoreReduction() } JLM_UNIT_TEST_REGISTER( - "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadStoreReduction", - TestLoadStoreReduction) + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadStoreReduction_Success", + TestLoadStoreReduction_Success) + +/** + * Tests the load-store reduction with the value type of the store being different from the + * value type of the load. + */ +static int +LoadStoreReduction_DifferentValueOperandType() +{ + using namespace jlm::llvm; + + // Arrange + const auto pointerType = PointerType::Create(); + const auto memoryStateType = MemoryStateType::Create(); + + jlm::rvsdg::Graph graph; + auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); + nf->set_mutable(false); + nf->set_load_store_reducible(false); + + auto & address = jlm::tests::GraphImport::Create(graph, pointerType, "address"); + auto & value = jlm::tests::GraphImport::Create(graph, jlm::rvsdg::bittype::Create(32), "value"); + auto memoryState = &jlm::tests::GraphImport::Create(graph, memoryStateType, "memoryState"); + + auto & storeNode = StoreNonVolatileNode::CreateNode(address, value, { memoryState }, 4); + auto & loadNode = LoadNonVolatileNode::CreateNode( + address, + outputs(&storeNode), + jlm::rvsdg::bittype::Create(8), + 4); + + auto & exportedValue = GraphExport::Create(*loadNode.output(0), "v"); + GraphExport::Create(*loadNode.output(1), "s"); + + view(&graph.GetRootRegion(), stdout); + + // Act + const auto success = + jlm::rvsdg::ReduceNode(NormalizeLoadStore, loadNode); + graph.PruneNodes(); + + view(&graph.GetRootRegion(), stdout); + + // Assert + assert(success == false); + + const auto expectedLoadNode = jlm::rvsdg::output::GetNode(*exportedValue.origin()); + assert(expectedLoadNode == &loadNode); + assert(expectedLoadNode->ninputs() == 2); + + const auto expectedStoreNode = jlm::rvsdg::output::GetNode(*expectedLoadNode->input(1)->origin()); + assert(expectedStoreNode == &storeNode); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/llvm/ir/operators/LoadNonVolatileTests-LoadStoreReduction_DifferentValueOperandType", + LoadStoreReduction_DifferentValueOperandType) static int TestLoadLoadReduction() diff --git a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp b/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp deleted file mode 100644 index 195adea5c..000000000 --- a/tests/jlm/llvm/opt/TestLoadStoreReduction.cpp +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright 2024 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include -#include - -#include -#include -#include -#include -#include - -/** - * Tests the load-store reduction with the value type of the store being different than the - * value type of the load. - */ -static int -TestLoadStoreReductionWithDifferentValueOperandType() -{ - using namespace jlm::llvm; - - // Arrange - auto pointerType = PointerType::Create(); - auto memoryStateType = MemoryStateType::Create(); - - jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_store_reducible(false); - - auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); - auto value = &jlm::tests::GraphImport::Create(graph, jlm::rvsdg::bittype::Create(32), "value"); - auto memoryState = &jlm::tests::GraphImport::Create(graph, memoryStateType, "memoryState"); - - auto storeResults = StoreNonVolatileNode::Create(address, value, { memoryState }, 4); - auto loadResults = - LoadNonVolatileNode::Create(address, storeResults, jlm::rvsdg::bittype::Create(8), 4); - - auto & exportedValue = GraphExport::Create(*loadResults[0], "v"); - GraphExport::Create(*loadResults[1], "s"); - - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - - // Act - nf->set_mutable(true); - nf->set_load_store_reducible(true); - graph.Normalize(); - graph.PruneNodes(); - - jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - - // Assert - auto load = jlm::rvsdg::output::GetNode(*exportedValue.origin()); - assert(is(load)); - assert(load->ninputs() == 2); - auto store = jlm::rvsdg::output::GetNode(*load->input(1)->origin()); - assert(is(store)); - - return 0; -} - -JLM_UNIT_TEST_REGISTER( - "jlm/llvm/opt/TestLoadStoreReductionWithDifferentValueOperandType", - TestLoadStoreReductionWithDifferentValueOperandType) From 07884f13257da75714fc91034eef08047a45c3bb Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Sat, 4 Jan 2025 23:23:48 +0100 Subject: [PATCH 160/170] SimpleNode: convenience builder functions (#704) Add constructor as well as helper function to create simple and its operator in a single call. Allow to move the created operator in (instead of copying it). --- jlm/rvsdg/simple-node.cpp | 26 ++++++++++ jlm/rvsdg/simple-node.hpp | 99 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 125 insertions(+) diff --git a/jlm/rvsdg/simple-node.cpp b/jlm/rvsdg/simple-node.cpp index 457d91689..0596cfc7e 100644 --- a/jlm/rvsdg/simple-node.cpp +++ b/jlm/rvsdg/simple-node.cpp @@ -67,6 +67,32 @@ SimpleNode::SimpleNode( on_node_create(this); } +SimpleNode::SimpleNode( + rvsdg::Region & region, + std::unique_ptr operation, + const std::vector & operands) + : Node(std::move(operation), ®ion) +{ + if (SimpleNode::GetOperation().narguments() != operands.size()) + throw jlm::util::error(jlm::util::strfmt( + "Argument error - expected ", + SimpleNode::GetOperation().narguments(), + ", received ", + operands.size(), + " arguments.")); + + for (size_t n = 0; n < SimpleNode::GetOperation().narguments(); n++) + { + add_input( + std::make_unique(this, operands[n], SimpleNode::GetOperation().argument(n))); + } + + for (size_t n = 0; n < SimpleNode::GetOperation().nresults(); n++) + add_output(std::make_unique(this, SimpleNode::GetOperation().result(n))); + + on_node_create(this); +} + const SimpleOperation & SimpleNode::GetOperation() const noexcept { diff --git a/jlm/rvsdg/simple-node.hpp b/jlm/rvsdg/simple-node.hpp index 606e3e1c2..e13fc28b0 100644 --- a/jlm/rvsdg/simple-node.hpp +++ b/jlm/rvsdg/simple-node.hpp @@ -29,6 +29,11 @@ class SimpleNode : public Node const SimpleOperation & op, const std::vector & operands); + SimpleNode( + rvsdg::Region & region, + std::unique_ptr operation, + const std::vector & operands); + public: jlm::rvsdg::simple_input * input(size_t index) const noexcept; @@ -54,6 +59,15 @@ class SimpleNode : public Node return new SimpleNode(region, op, operands); } + static inline jlm::rvsdg::SimpleNode & + Create( + rvsdg::Region & region, + std::unique_ptr operation, + const std::vector & operands) + { + return *new SimpleNode(region, std::move(operation), operands); + } + static inline std::vector create_normalized( rvsdg::Region * region, @@ -120,6 +134,91 @@ SimpleNode::output(size_t index) const noexcept return static_cast(Node::output(index)); } +/** + * \brief Creates a simple node characterized by its operator. + * + * \tparam OperatorType + * The type of operator wrapped by the node. + * + * \tparam OperatorArguments + * Argument types of the operator to be constructed (should be + * implied, just specify the OperatorType). + * + * \param operands + * The operands to the operator (i.e. inputs to the node to be constructed). + * + * \param operatorArguments + * Constructor arguments for the operator to be constructed. + * + * \returns + * Reference to the node constructed. + * + * \pre + * \p operands must be non-empty, must be in the same region, and their + * types must match the operator constructed by this call. + * + * Constructs a new operator of type \p OperatorType using \p operatorArguments + * as constructor arguments. Creates a simple node using the constructed operator + * and the given \p operands as operands to the constructed operator. + * + * Usage example: + * \code + * auto element_ptr = CreateOpNode( + * { ptr }, offsetTypes, pointeeTypes).outputs(0); + * \endcode + */ +template +SimpleNode & +CreateOpNode(const std::vector & operands, OperatorArguments... operatorArguments) +{ + JLM_ASSERT(!operands.empty()); + return SimpleNode::Create( + *operands[0]->region(), + std::make_unique(std::move(operatorArguments)...), + operands); +} + +/** + * \brief Creates a simple node characterized by its operator. + * + * \tparam OperatorType + * The type of operator wrapped by the node. + * + * \tparam OperatorArguments + * Argument types of the operator to be constructed (should be + * implied, just specify the OperatorType). + * + * \param region + * The region to create the node in. + * + * \param operatorArguments + * Constructor arguments for the operator to be constructed. + * + * \returns + * Reference to the node constructed. + * + * \pre + * The given operator must not take any operands. + * + * Constructs a new operator of type \p OperatorType using \p operatorArguments + * as constructor arguments. Creates a simple node using the constructed operator + * with no operands in the specified region. + * + * Usage example: + * \code + * auto val = CreateOpNode(region, 42).outputs(0); + * \endcode + */ +template +SimpleNode & +CreateOpNode(Region & region, OperatorArguments... operatorArguments) +{ + return SimpleNode::Create( + region, + std::make_unique(std::move(operatorArguments)...), + {}); +} + } #endif From 0235808524fa9b7acd243403c060d0e884ab11fa Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Sat, 4 Jan 2025 23:57:34 +0100 Subject: [PATCH 161/170] Function pointer conversion (#705) Add function / pointer conversion llvm operators. Accept these operators as top-level nodes in the various transformation passes, and handle them in instruction conversion. There are presently no "users" of these operators or anything that puts them into the graph -- so this is preparatory for making function and pointer distinction in a later commit. --- jlm/llvm/Makefile.sub | 2 + jlm/llvm/backend/jlm2llvm/instruction.cpp | 25 ++- jlm/llvm/ir/operators/FunctionPointer.cpp | 158 ++++++++++++++++++ jlm/llvm/ir/operators/FunctionPointer.hpp | 95 +++++++++++ jlm/llvm/opt/InvariantValueRedirection.cpp | 7 + .../RegionAwareMemoryNodeProvider.cpp | 9 + .../TopDownMemoryNodeEliminator.cpp | 7 + 7 files changed, 302 insertions(+), 1 deletion(-) create mode 100644 jlm/llvm/ir/operators/FunctionPointer.cpp create mode 100644 jlm/llvm/ir/operators/FunctionPointer.hpp diff --git a/jlm/llvm/Makefile.sub b/jlm/llvm/Makefile.sub index e956bd35c..3fa612e29 100644 --- a/jlm/llvm/Makefile.sub +++ b/jlm/llvm/Makefile.sub @@ -27,6 +27,7 @@ libllvm_SOURCES = \ jlm/llvm/ir/operators/alloca.cpp \ jlm/llvm/ir/operators/call.cpp \ jlm/llvm/ir/operators/delta.cpp \ + jlm/llvm/ir/operators/FunctionPointer.cpp \ jlm/llvm/ir/operators/GetElementPtr.cpp \ jlm/llvm/ir/operators/lambda.cpp \ jlm/llvm/ir/operators/Load.cpp \ @@ -123,6 +124,7 @@ libllvm_HEADERS = \ jlm/llvm/ir/operators/Load.hpp \ jlm/llvm/ir/operators/MemCpy.hpp \ jlm/llvm/ir/operators/MemoryStateOperations.hpp \ + jlm/llvm/ir/operators/FunctionPointer.hpp \ jlm/llvm/ir/operators/GetElementPtr.hpp \ jlm/llvm/ir/operators/delta.hpp \ jlm/llvm/ir/operators/Store.hpp \ diff --git a/jlm/llvm/backend/jlm2llvm/instruction.cpp b/jlm/llvm/backend/jlm2llvm/instruction.cpp index 5272ccc73..c5aeff41e 100644 --- a/jlm/llvm/backend/jlm2llvm/instruction.cpp +++ b/jlm/llvm/backend/jlm2llvm/instruction.cpp @@ -9,6 +9,7 @@ #include #include #include +#include #include #include @@ -1001,6 +1002,26 @@ convert( return nullptr; } +static ::llvm::Value * +convert( + const PointerToFunctionOperation &, + const std::vector & operands, + ::llvm::IRBuilder<> &, + context & ctx) +{ + return ctx.value(operands[0]); +} + +static ::llvm::Value * +convert( + const FunctionToPointerOperation &, + const std::vector & operands, + ::llvm::IRBuilder<> &, + context & ctx) +{ + return ctx.value(operands[0]); +} + template static ::llvm::Value * convert( @@ -1094,7 +1115,9 @@ convert_operation( { typeid(CallEntryMemoryStateMergeOperation), convert }, { typeid(CallExitMemoryStateSplitOperation), - convert } }); + convert }, + { typeid(PointerToFunctionOperation), convert }, + { typeid(FunctionToPointerOperation), convert } }); /* FIXME: AddrSpaceCast instruction is not supported */ JLM_ASSERT(map.find(std::type_index(typeid(op))) != map.end()); diff --git a/jlm/llvm/ir/operators/FunctionPointer.cpp b/jlm/llvm/ir/operators/FunctionPointer.cpp new file mode 100644 index 000000000..76f823db8 --- /dev/null +++ b/jlm/llvm/ir/operators/FunctionPointer.cpp @@ -0,0 +1,158 @@ +/* + * Copyright 2024 Helge Bahmann + * See COPYING for terms of redistribution. + */ + +#include +#include + +namespace jlm::llvm +{ + +FunctionToPointerOperation::~FunctionToPointerOperation() noexcept +{} + +FunctionToPointerOperation::FunctionToPointerOperation(std::shared_ptr fn) + : unary_op(fn, llvm::PointerType::Create()), + FunctionType_(std::move(fn)) +{} + +bool +FunctionToPointerOperation::operator==(const Operation & other) const noexcept +{ + if (auto o = dynamic_cast(&other)) + { + return *FunctionType() == *o->FunctionType(); + } + else + { + return false; + } +} + +[[nodiscard]] std::string +FunctionToPointerOperation::debug_string() const +{ + return "FunPtr(" + FunctionType()->debug_string() + ")"; +} + +[[nodiscard]] std::unique_ptr +FunctionToPointerOperation::copy() const +{ + return Create(FunctionType()); +} + +rvsdg::unop_reduction_path_t +FunctionToPointerOperation::can_reduce_operand(const jlm::rvsdg::output * arg) const noexcept +{ + if (auto node = rvsdg::TryGetOwnerNode(*arg)) + { + if (auto op = dynamic_cast(&node->GetOperation())) + { + if (*op->FunctionType() == *FunctionType()) + { + return rvsdg::unop_reduction_inverse; + } + } + } + return rvsdg::unop_reduction_none; +} + +jlm::rvsdg::output * +FunctionToPointerOperation::reduce_operand( + rvsdg::unop_reduction_path_t path, + jlm::rvsdg::output * arg) const +{ + if (auto node = rvsdg::TryGetOwnerNode(*arg)) + { + if (auto op = dynamic_cast(&node->GetOperation())) + { + if (*op->FunctionType() == *FunctionType() && path == rvsdg::unop_reduction_inverse) + { + return node->input(0)->origin(); + } + } + } + return arg; +} + +std::unique_ptr +FunctionToPointerOperation::Create(std::shared_ptr fn) +{ + return std::make_unique(std::move(fn)); +} + +PointerToFunctionOperation::~PointerToFunctionOperation() noexcept +{} + +PointerToFunctionOperation::PointerToFunctionOperation(std::shared_ptr fn) + : unary_op(llvm::PointerType::Create(), fn), + FunctionType_(std::move(fn)) +{} + +bool +PointerToFunctionOperation::operator==(const Operation & other) const noexcept +{ + if (auto o = dynamic_cast(&other)) + { + return *FunctionType() == *o->FunctionType(); + } + else + { + return false; + } +} + +[[nodiscard]] std::string +PointerToFunctionOperation::debug_string() const +{ + return "PtrFun(" + FunctionType()->debug_string() + ")"; +} + +[[nodiscard]] std::unique_ptr +PointerToFunctionOperation::copy() const +{ + return Create(FunctionType()); +} + +rvsdg::unop_reduction_path_t +PointerToFunctionOperation::can_reduce_operand(const jlm::rvsdg::output * arg) const noexcept +{ + if (auto node = rvsdg::TryGetOwnerNode(*arg)) + { + if (auto op = dynamic_cast(&node->GetOperation())) + { + if (*op->FunctionType() == *FunctionType()) + { + return rvsdg::unop_reduction_inverse; + } + } + } + return rvsdg::unop_reduction_none; +} + +jlm::rvsdg::output * +PointerToFunctionOperation::reduce_operand( + rvsdg::unop_reduction_path_t path, + jlm::rvsdg::output * arg) const +{ + if (auto node = rvsdg::TryGetOwnerNode(*arg)) + { + if (auto op = dynamic_cast(&node->GetOperation())) + { + if (*op->FunctionType() == *FunctionType() && path == rvsdg::unop_reduction_inverse) + { + return node->input(0)->origin(); + } + } + } + return arg; +} + +std::unique_ptr +PointerToFunctionOperation::Create(std::shared_ptr fn) +{ + return std::make_unique(std::move(fn)); +} + +} diff --git a/jlm/llvm/ir/operators/FunctionPointer.hpp b/jlm/llvm/ir/operators/FunctionPointer.hpp new file mode 100644 index 000000000..b7b0d4743 --- /dev/null +++ b/jlm/llvm/ir/operators/FunctionPointer.hpp @@ -0,0 +1,95 @@ +/* + * Copyright 2024 Helge Bahmann + * See COPYING for terms of redistribution. + */ + +#ifndef JLM_LLVM_IR_OPERATORS_FUNCTIONPOINTER_HPP +#define JLM_LLVM_IR_OPERATORS_FUNCTIONPOINTER_HPP + +#include +#include +#include +#include + +namespace jlm::llvm +{ + +/** + \brief Get address of compiled function object. + */ +class FunctionToPointerOperation final : public rvsdg::unary_op +{ +public: + ~FunctionToPointerOperation() noexcept override; + + FunctionToPointerOperation(std::shared_ptr fn); + + bool + operator==(const Operation & other) const noexcept override; + + [[nodiscard]] std::string + debug_string() const override; + + [[nodiscard]] std::unique_ptr + copy() const override; + + rvsdg::unop_reduction_path_t + can_reduce_operand(const jlm::rvsdg::output * arg) const noexcept override; + + jlm::rvsdg::output * + reduce_operand(rvsdg::unop_reduction_path_t path, jlm::rvsdg::output * arg) const override; + + static std::unique_ptr + Create(std::shared_ptr fn); + + inline const std::shared_ptr & + FunctionType() const noexcept + { + return FunctionType_; + } + +private: + std::shared_ptr FunctionType_; +}; + +/** + \brief Interpret pointer as callable function. + */ +class PointerToFunctionOperation final : public rvsdg::unary_op +{ +public: + ~PointerToFunctionOperation() noexcept override; + + PointerToFunctionOperation(std::shared_ptr fn); + + bool + operator==(const Operation & other) const noexcept override; + + [[nodiscard]] std::string + debug_string() const override; + + [[nodiscard]] std::unique_ptr + copy() const override; + + rvsdg::unop_reduction_path_t + can_reduce_operand(const jlm::rvsdg::output * arg) const noexcept override; + + jlm::rvsdg::output * + reduce_operand(rvsdg::unop_reduction_path_t path, jlm::rvsdg::output * arg) const override; + + static std::unique_ptr + Create(std::shared_ptr fn); + + inline const std::shared_ptr & + FunctionType() const noexcept + { + return FunctionType_; + } + +private: + std::shared_ptr FunctionType_; +}; + +} + +#endif // JLM_LLVM_IR_OPERATORS_FUNCTIONPOINTER_HPP diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index 8b842532f..c6253d83a 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -5,6 +5,7 @@ #include #include +#include #include #include #include @@ -84,6 +85,12 @@ InvariantValueRedirection::RedirectInRootRegion(rvsdg::Graph & rvsdg) // Nothing needs to be done. // Delta nodes are irrelevant for invariant value redirection. } + else if ( + is(node->GetOperation()) + || is(node->GetOperation())) + { + // Nothing needs to be done. + } else { JLM_UNREACHABLE("Unhandled node type."); diff --git a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp index 62fa0fba5..48728abba 100644 --- a/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp +++ b/jlm/llvm/opt/alias-analyses/RegionAwareMemoryNodeProvider.cpp @@ -4,6 +4,7 @@ */ #include +#include #include #include #include @@ -874,6 +875,14 @@ RegionAwareMemoryNodeProvider::Propagate(const RvsdgModule & rvsdgModule) // Nothing needs to be done for delta nodes. continue; } + else if ( + is(node->GetOperation()) + || is(node->GetOperation())) + { + // Few operators may appear as top-level constructs and simply must + // be ignored. + continue; + } else { JLM_UNREACHABLE("Unhandled node type!"); diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index d0f4313ea..eede0c420 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -3,6 +3,7 @@ * See COPYING for terms of redistribution. */ +#include #include #include #include @@ -492,6 +493,12 @@ TopDownMemoryNodeEliminator::EliminateTopDownRootRegion(rvsdg::Region & region) { // Nothing needs to be done. } + else if ( + is(node->GetOperation()) + || is(node->GetOperation())) + { + // Nothing needs to be done. + } else { JLM_UNREACHABLE("Unhandled node type!"); From 69a77ca9ded59223d1d6cef00b50310b1c956e40 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 5 Jan 2025 20:32:45 +0100 Subject: [PATCH 162/170] Remove gamma node normal form (#715) Co-authored-by: HKrogstie --- jlm/rvsdg/gamma.cpp | 110 --------------------------------- jlm/rvsdg/gamma.hpp | 57 ----------------- tests/jlm/rvsdg/test-gamma.cpp | 4 -- 3 files changed, 171 deletions(-) diff --git a/jlm/rvsdg/gamma.cpp b/jlm/rvsdg/gamma.cpp index 11c4d0a7d..fa0e8c906 100644 --- a/jlm/rvsdg/gamma.cpp +++ b/jlm/rvsdg/gamma.cpp @@ -149,99 +149,6 @@ perform_control_constant_reduction(std::unordered_set & ou } } -gamma_normal_form::~gamma_normal_form() noexcept -{} - -gamma_normal_form::gamma_normal_form( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) noexcept - : structural_normal_form(operator_class, parent, graph), - enable_predicate_reduction_(false), - enable_invariant_reduction_(false), - enable_control_constant_reduction_(false) -{ - if (auto p = dynamic_cast(parent)) - { - enable_predicate_reduction_ = p->enable_predicate_reduction_; - enable_invariant_reduction_ = p->enable_invariant_reduction_; - enable_control_constant_reduction_ = p->enable_control_constant_reduction_; - } -} - -bool -gamma_normal_form::normalize_node(Node * node_) const -{ - auto node = util::AssertedCast(node_); - - if (!get_mutable()) - return true; - - if (get_predicate_reduction() && is_predicate_reducible(node)) - { - perform_predicate_reduction(node); - return false; - } - - bool was_normalized = true; - if (get_invariant_reduction()) - was_normalized |= perform_invariant_reduction(node); - - auto outputs = is_control_constant_reducible(node); - if (get_control_constant_reduction() && !outputs.empty()) - { - perform_control_constant_reduction(outputs); - was_normalized = false; - } - - return was_normalized; -} - -void -gamma_normal_form::set_predicate_reduction(bool enable) -{ - if (enable_predicate_reduction_ == enable) - { - return; - } - - children_set(enable); - - enable_predicate_reduction_ = enable; - - if (enable && get_mutable()) - graph()->MarkDenormalized(); -} - -void -gamma_normal_form::set_invariant_reduction(bool enable) -{ - if (enable_invariant_reduction_ == enable) - { - return; - } - - children_set(enable); - - enable_invariant_reduction_ = enable; - - if (enable && get_mutable()) - graph()->MarkDenormalized(); -} - -void -gamma_normal_form::set_control_constant_reduction(bool enable) -{ - if (enable_control_constant_reduction_ == enable) - return; - - children_set(enable); - - enable_control_constant_reduction_ = enable; - if (enable && get_mutable()) - graph()->MarkDenormalized(); -} - bool ReduceGammaWithStaticallyKnownPredicate(Node & node) { @@ -498,20 +405,3 @@ GetGammaInvariantOrigin(const GammaNode & gamma, const GammaNode::ExitVar & exit } } - -jlm::rvsdg::node_normal_form * -gamma_node_get_default_normal_form_( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::Graph * graph) -{ - return new jlm::rvsdg::gamma_normal_form(operator_class, parent, graph); -} - -static void __attribute__((constructor)) -register_node_normal_form() -{ - jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::GammaOperation), - gamma_node_get_default_normal_form_); -} diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index e09d53bfb..93f56fed5 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -17,56 +17,6 @@ namespace jlm::rvsdg { -/* gamma normal form */ - -class gamma_normal_form final : public structural_normal_form -{ -public: - virtual ~gamma_normal_form() noexcept; - - gamma_normal_form( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) noexcept; - - virtual bool - normalize_node(Node * node) const override; - - virtual void - set_predicate_reduction(bool enable); - - inline bool - get_predicate_reduction() const noexcept - { - return enable_predicate_reduction_; - } - - virtual void - set_invariant_reduction(bool enable); - - inline bool - get_invariant_reduction() const noexcept - { - return enable_invariant_reduction_; - } - - virtual void - set_control_constant_reduction(bool enable); - - inline bool - get_control_constant_reduction() const noexcept - { - return enable_control_constant_reduction_; - } - -private: - bool enable_predicate_reduction_; - bool enable_invariant_reduction_; - bool enable_control_constant_reduction_; -}; - -/* gamma operation */ - class output; class Type; @@ -95,13 +45,6 @@ class GammaOperation final : public StructuralOperation virtual bool operator==(const Operation & other) const noexcept override; - static jlm::rvsdg::gamma_normal_form * - normal_form(Graph * graph) noexcept - { - return static_cast( - graph->GetNodeNormalForm(typeid(GammaOperation))); - } - private: size_t nalternatives_; }; diff --git a/tests/jlm/rvsdg/test-gamma.cpp b/tests/jlm/rvsdg/test-gamma.cpp index f56e8ed7f..63e315e10 100644 --- a/tests/jlm/rvsdg/test-gamma.cpp +++ b/tests/jlm/rvsdg/test-gamma.cpp @@ -56,8 +56,6 @@ test_predicate_reduction() // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_predicate_reduction(false); - bittype bits2(2); auto v0 = &jlm::tests::GraphImport::Create(graph, bittype::Create(32), ""); @@ -127,7 +125,6 @@ test_control_constant_reduction() // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_control_constant_reduction(false); auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(1), "x"); @@ -170,7 +167,6 @@ test_control_constant_reduction2() // Arrange Graph graph; - GammaOperation::normal_form(&graph)->set_control_constant_reduction(false); auto import = &jlm::tests::GraphImport::Create(graph, bittype::Create(2), "import"); From 4905632ecedd9566f429730c0b69142fc0dab9e3 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Sun, 5 Jan 2025 21:12:17 +0100 Subject: [PATCH 163/170] Remvoe structural normal form class (#718) --- jlm/rvsdg/Makefile.sub | 2 -- jlm/rvsdg/gamma.hpp | 2 -- jlm/rvsdg/operation.cpp | 8 ------ jlm/rvsdg/operation.hpp | 3 --- jlm/rvsdg/structural-normal-form.cpp | 39 ---------------------------- jlm/rvsdg/structural-normal-form.hpp | 27 ------------------- 6 files changed, 81 deletions(-) delete mode 100644 jlm/rvsdg/structural-normal-form.cpp delete mode 100644 jlm/rvsdg/structural-normal-form.hpp diff --git a/jlm/rvsdg/Makefile.sub b/jlm/rvsdg/Makefile.sub index 1b17fc55e..75269042c 100644 --- a/jlm/rvsdg/Makefile.sub +++ b/jlm/rvsdg/Makefile.sub @@ -11,7 +11,6 @@ librvsdg_SOURCES = \ jlm/rvsdg/region.cpp \ jlm/rvsdg/simple-normal-form.cpp \ jlm/rvsdg/simple-node.cpp \ - jlm/rvsdg/structural-normal-form.cpp \ jlm/rvsdg/structural-node.cpp \ jlm/rvsdg/theta.cpp \ jlm/rvsdg/tracker.cpp \ @@ -53,7 +52,6 @@ librvsdg_HEADERS = \ jlm/rvsdg/type.hpp \ jlm/rvsdg/binary.hpp \ jlm/rvsdg/theta.hpp \ - jlm/rvsdg/structural-normal-form.hpp \ jlm/rvsdg/reduction-helpers.hpp \ jlm/rvsdg/RvsdgModule.hpp \ jlm/rvsdg/bitstring.hpp \ diff --git a/jlm/rvsdg/gamma.hpp b/jlm/rvsdg/gamma.hpp index 93f56fed5..3b96d2a78 100644 --- a/jlm/rvsdg/gamma.hpp +++ b/jlm/rvsdg/gamma.hpp @@ -10,9 +10,7 @@ #include #include -#include #include -#include namespace jlm::rvsdg { diff --git a/jlm/rvsdg/operation.cpp b/jlm/rvsdg/operation.cpp index e22da1ee2..62a8443af 100644 --- a/jlm/rvsdg/operation.cpp +++ b/jlm/rvsdg/operation.cpp @@ -6,7 +6,6 @@ #include #include -#include namespace jlm::rvsdg { @@ -59,11 +58,4 @@ StructuralOperation::operator==(const Operation & other) const noexcept return typeid(*this) == typeid(other); } -jlm::rvsdg::structural_normal_form * -StructuralOperation::normal_form(Graph * graph) noexcept -{ - return static_cast( - graph->GetNodeNormalForm(typeid(StructuralOperation))); -} - } diff --git a/jlm/rvsdg/operation.hpp b/jlm/rvsdg/operation.hpp index f8c63cce5..a8f4a2c2e 100644 --- a/jlm/rvsdg/operation.hpp +++ b/jlm/rvsdg/operation.hpp @@ -96,9 +96,6 @@ class StructuralOperation : public Operation public: virtual bool operator==(const Operation & other) const noexcept override; - - static jlm::rvsdg::structural_normal_form * - normal_form(Graph * graph) noexcept; }; } diff --git a/jlm/rvsdg/structural-normal-form.cpp b/jlm/rvsdg/structural-normal-form.cpp deleted file mode 100644 index bf2aee5d9..000000000 --- a/jlm/rvsdg/structural-normal-form.cpp +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2017 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#include -#include - -namespace jlm::rvsdg -{ - -structural_normal_form::~structural_normal_form() noexcept -{} - -structural_normal_form::structural_normal_form( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) noexcept - : node_normal_form(operator_class, parent, graph) -{} - -} - -static jlm::rvsdg::node_normal_form * -get_default_normal_form( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::Graph * graph) -{ - return new jlm::rvsdg::structural_normal_form(operator_class, parent, graph); -} - -static void __attribute__((constructor)) -register_node_normal_form() -{ - jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::rvsdg::StructuralOperation), - get_default_normal_form); -} diff --git a/jlm/rvsdg/structural-normal-form.hpp b/jlm/rvsdg/structural-normal-form.hpp deleted file mode 100644 index 2c754f2ca..000000000 --- a/jlm/rvsdg/structural-normal-form.hpp +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2017 Nico Reißmann - * See COPYING for terms of redistribution. - */ - -#ifndef JLM_RVSDG_STRUCTURAL_NORMAL_FORM_HPP -#define JLM_RVSDG_STRUCTURAL_NORMAL_FORM_HPP - -#include - -namespace jlm::rvsdg -{ - -class structural_normal_form : public node_normal_form -{ -public: - virtual ~structural_normal_form() noexcept; - - structural_normal_form( - const std::type_info & operator_class, - jlm::rvsdg::node_normal_form * parent, - Graph * graph) noexcept; -}; - -} - -#endif From 7c2e404f539e03a5d8e08a3c5740ba4fe812ce6d Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Mon, 6 Jan 2025 20:10:33 +0100 Subject: [PATCH 164/170] Split up huge bitslice and bitconcat test into smaller unit tests (#719) --- tests/jlm/rvsdg/bitstring/bitstring.cpp | 297 ++++++++++++++++++------ 1 file changed, 223 insertions(+), 74 deletions(-) diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index 692c1bd11..f2f589c07 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -1075,108 +1075,258 @@ types_bitstring_test_reduction() } static int -types_bitstring_test_slice_concat() +SliceOfConstant() { using namespace jlm::rvsdg; + // Arrange & Act + const Graph graph; + const auto constant = create_bitconstant(&graph.GetRootRegion(), "00110111"); + const auto slice = bitslice(constant, 2, 6); + auto & ex = jlm::tests::GraphExport::Create(*slice, "dummy"); + + view(graph, stdout); + + // Assert + const auto node = output::GetNode(*ex.origin()); + auto & operation = dynamic_cast(node->GetOperation()); + assert(operation.value() == bitvalue_repr("1101")); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-SliceOfConstant", SliceOfConstant); + +static int +SliceOfSlice() +{ + using namespace jlm::rvsdg; + + // Arrange & Act Graph graph; + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); - auto base_const1 = create_bitconstant(&graph.GetRootRegion(), "00110111"); - auto base_const2 = create_bitconstant(&graph.GetRootRegion(), "11001000"); + auto slice1 = bitslice(x, 2, 6); + auto slice2 = bitslice(slice1, 1, 3); - auto base_x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); - auto base_y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); - auto base_z = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "z"); + auto & ex = jlm::tests::GraphExport::Create(*slice2, "dummy"); + view(graph, stdout); - { - /* slice of constant */ - auto a = output::GetNode(*jlm::rvsdg::bitslice(base_const1, 2, 6)); + // Assert + const auto node = output::GetNode(*ex.origin()); + const auto operation = dynamic_cast(&node->GetOperation()); + assert(operation->low() == 3 && operation->high() == 5); - auto & op = dynamic_cast(a->GetOperation()); - assert(op.value() == bitvalue_repr("1101")); - } + return 0; +} - { - /* slice of slice */ - auto a = jlm::rvsdg::bitslice(base_x, 2, 6); - auto b = output::GetNode(*jlm::rvsdg::bitslice(a, 1, 3)); +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-SliceOfSlice", SliceOfSlice); - assert(dynamic_cast(&b->GetOperation())); - const bitslice_op * attrs; - attrs = dynamic_cast(&b->GetOperation()); - assert(attrs->low() == 3 && attrs->high() == 5); - } +static int +SliceOfFullNode() +{ + using namespace jlm::rvsdg; - { - /* slice of full node */ - auto a = jlm::rvsdg::bitslice(base_x, 0, 8); + // Arrange & Act + Graph graph; + const auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); - assert(a == base_x); - } + auto sliceResult = bitslice(x, 0, 8); - { - /* slice of concat */ - auto a = jlm::rvsdg::bitconcat({ base_x, base_y }); - auto b = jlm::rvsdg::bitslice(a, 0, 8); + auto & ex = jlm::tests::GraphExport::Create(*sliceResult, "dummy"); + view(graph, stdout); - assert(static_cast(&b->type())->nbits() == 8); + // Assert + assert(ex.origin() == x); - assert(b == base_x); - } + return 0; +} - { - /* concat flattening */ - auto a = jlm::rvsdg::bitconcat({ base_x, base_y }); - auto b = output::GetNode(*jlm::rvsdg::bitconcat({ a, base_z })); - - assert(dynamic_cast(&b->GetOperation())); - assert(b->ninputs() == 3); - assert(b->input(0)->origin() == base_x); - assert(b->input(1)->origin() == base_y); - assert(b->input(2)->origin() == base_z); - } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-SliceOfFullNode", SliceOfFullNode); - { - /* concat of single node */ - auto a = jlm::rvsdg::bitconcat({ base_x }); +static int +SliceOfConcat() +{ + using namespace jlm::rvsdg; - assert(a == base_x); - } + // Arrange & Act + Graph graph; - { - /* concat of slices */ - auto a = jlm::rvsdg::bitslice(base_x, 0, 4); - auto b = jlm::rvsdg::bitslice(base_x, 4, 8); - auto c = jlm::rvsdg::bitconcat({ a, b }); + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + auto y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); - assert(c == base_x); - } + auto concatResult = bitconcat({ x, y }); + auto sliceResult = bitslice(concatResult, 0, 8); - { - /* concat of constants */ - auto a = output::GetNode(*jlm::rvsdg::bitconcat({ base_const1, base_const2 })); + auto & ex = jlm::tests::GraphExport::Create(*sliceResult, "dummy"); + view(graph, stdout); - auto & op = dynamic_cast(a->GetOperation()); - assert(op.value() == bitvalue_repr("0011011111001000")); - } + // Assert + const auto bitType = dynamic_cast(&ex.origin()->type()); + assert(bitType && bitType->nbits() == 8); + assert(ex.origin() == x); - { - /* CSE */ - auto b = create_bitconstant(&graph.GetRootRegion(), "00110111"); - assert(b == base_const1); + return 0; +} - auto c = jlm::rvsdg::bitslice(base_x, 2, 6); - auto d = jlm::rvsdg::bitslice(base_x, 2, 6); - assert(c == d); +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-SliceOfConcat", SliceOfConcat); - auto e = jlm::rvsdg::bitconcat({ base_x, base_y }); - auto f = jlm::rvsdg::bitconcat({ base_x, base_y }); - assert(e == f); - } +static int +ConcatFlattening() +{ + using namespace jlm::rvsdg; + + // Arrange & Act + Graph graph; + + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + auto y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); + auto z = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "z"); + + auto concatResult1 = bitconcat({ x, y }); + auto concatResult2 = bitconcat({ concatResult1, z }); + + auto & ex = jlm::tests::GraphExport::Create(*concatResult2, "dummy"); + view(graph, stdout); + + // Assert + auto node = output::GetNode(*ex.origin()); + assert(dynamic_cast(&node->GetOperation())); + assert(node->ninputs() == 3); + assert(node->input(0)->origin() == x); + assert(node->input(1)->origin() == y); + assert(node->input(2)->origin() == z); return 0; } +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-ConcatFlattening", ConcatFlattening); + +static int +ConcatWithSingleOperand() +{ + using namespace jlm::rvsdg; + + // Arrange & Act + Graph graph; + + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + + const auto concatResult = bitconcat({ x }); + + auto & ex = jlm::tests::GraphExport::Create(*concatResult, "dummy"); + view(graph, stdout); + + // Assert + assert(ex.origin() == x); + + return 0; +} + +JLM_UNIT_TEST_REGISTER( + "jlm/rvsdg/bitstring/bitstring-ConcatWithSingleOperand", + ConcatWithSingleOperand); + +static int +ConcatOfSlices() +{ + using namespace jlm::rvsdg; + + // Assert & Act + Graph graph; + + const auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + + auto sliceResult1 = bitslice(x, 0, 4); + auto sliceResult2 = bitslice(x, 4, 8); + const auto concatResult = bitconcat({ sliceResult1, sliceResult2 }); + + auto & ex = jlm::tests::GraphExport::Create(*concatResult, "dummy"); + view(graph, stdout); + + // Assert + assert(ex.origin() == x); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-ConcatWithOfSlices", ConcatOfSlices); + +static int +ConcatOfConstants() +{ + using namespace jlm::rvsdg; + + // Arrange & Act + Graph graph; + + auto c1 = create_bitconstant(&graph.GetRootRegion(), "00110111"); + auto c2 = create_bitconstant(&graph.GetRootRegion(), "11001000"); + + auto concatResult = bitconcat({ c1, c2 }); + + auto & ex = jlm::tests::GraphExport::Create(*concatResult, "dummy"); + view(graph, stdout); + + // Assert + auto node = output::GetNode(*ex.origin()); + auto operation = dynamic_cast(node->GetOperation()); + assert(operation.value() == bitvalue_repr("0011011111001000")); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-ConcatOfConstants", ConcatOfConstants); + +static int +ConcatCne() +{ + using namespace jlm::rvsdg; + + // Arrange & Act + Graph graph; + + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + auto y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); + + auto slice1 = bitslice(x, 2, 6); + auto slice2 = bitslice(x, 2, 6); + assert(slice1 == slice2); + + auto concat1 = bitconcat({ x, y }); + auto concat2 = bitconcat({ x, y }); + assert(concat1 == concat2); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-ConcatCne", ConcatCne); + +static int +SliceCne() +{ + using namespace jlm::rvsdg; + + // Arrange & Act + Graph graph; + + auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); + + auto slice1 = bitslice(x, 2, 6); + auto slice2 = bitslice(x, 2, 6); + + auto & ex1 = jlm::tests::GraphExport::Create(*slice1, "dummy"); + auto & ex2 = jlm::tests::GraphExport::Create(*slice2, "dummy"); + view(graph, stdout); + + // Assert + assert(ex1.origin() == ex2.origin()); + + return 0; +} + +JLM_UNIT_TEST_REGISTER("jlm/rvsdg/bitstring/bitstring-SliceCne", SliceCne); + static const char * bs[] = { "00000000", "11111111", "10000000", "01111111", "00001111", "XXXX0011", "XD001100", "XXXXDDDD", "10XDDX01", "0DDDDDD1" }; @@ -1704,7 +1854,6 @@ RunTests() types_bitstring_test_constant(); types_bitstring_test_normalize(); types_bitstring_test_reduction(); - types_bitstring_test_slice_concat(); types_bitstring_test_value_representation(); return 0; From 06c826de084de92c01d0f853991add23c24f7a2b Mon Sep 17 00:00:00 2001 From: caleridas <36173465+caleridas@users.noreply.github.com> Date: Mon, 6 Jan 2025 20:38:48 +0100 Subject: [PATCH 165/170] theta: change API for mapping loop variables (#675) Provide an API to loop nodes that allows mapping the various representation pieces of a loop variable. Remove Theta{Input|Output|Argument|Result}. --- .../backend/rvsdg2rhls/ThetaConversion.cpp | 17 +- jlm/hls/backend/rvsdg2rhls/add-prints.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/add-triggers.cpp | 2 +- .../rvsdg2rhls/distribute-constants.cpp | 24 +- jlm/hls/backend/rvsdg2rhls/mem-queue.cpp | 2 +- jlm/hls/backend/rvsdg2rhls/mem-sep.cpp | 21 +- jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp | 4 +- jlm/hls/ir/hls.cpp | 2 +- jlm/hls/ir/hls.hpp | 2 +- jlm/hls/opt/cne.cpp | 66 ++- jlm/hls/util/view.cpp | 5 +- .../InterProceduralGraphConversion.cpp | 13 +- jlm/llvm/ir/operators/call.cpp | 35 +- jlm/llvm/ir/operators/lambda.cpp | 12 +- jlm/llvm/opt/DeadNodeElimination.cpp | 25 +- jlm/llvm/opt/InvariantValueRedirection.cpp | 8 +- jlm/llvm/opt/alias-analyses/Andersen.cpp | 18 +- .../opt/alias-analyses/MemoryStateEncoder.cpp | 17 +- .../opt/alias-analyses/MemoryStateEncoder.hpp | 4 +- jlm/llvm/opt/alias-analyses/Steensgaard.cpp | 22 +- jlm/llvm/opt/cne.cpp | 63 ++- jlm/llvm/opt/inlining.cpp | 2 +- jlm/llvm/opt/inversion.cpp | 88 ++-- jlm/llvm/opt/push.cpp | 37 +- jlm/llvm/opt/unroll.cpp | 130 ++--- jlm/llvm/opt/unroll.hpp | 20 +- jlm/mlir/frontend/MlirToJlmConverter.cpp | 2 +- jlm/rvsdg/node.cpp | 14 +- jlm/rvsdg/theta.cpp | 262 +++++++--- jlm/rvsdg/theta.hpp | 471 +++++++----------- tests/TestRvsdgs.cpp | 26 +- .../rvsdg2rhls/DeadNodeEliminationTests.cpp | 4 +- .../rvsdg2rhls/MemoryConverterTests.cpp | 28 +- .../backend/rvsdg2rhls/MemoryQueueTests.cpp | 42 +- tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp | 8 +- .../jlm/hls/backend/rvsdg2rhls/TestTheta.cpp | 17 +- .../rvsdg2rhls/UnusedStateRemovalTests.cpp | 29 +- .../rvsdg2rhls/test-loop-passthrough.cpp | 2 +- tests/jlm/llvm/ir/operators/TestCall.cpp | 40 +- .../opt/InvariantValueRedirectionTests.cpp | 23 +- .../jlm/llvm/opt/TestDeadNodeElimination.cpp | 66 +-- .../llvm/opt/alias-analyses/TestAndersen.cpp | 2 +- .../alias-analyses/TestMemoryStateEncoder.cpp | 36 +- .../opt/alias-analyses/TestSteensgaard.cpp | 2 +- tests/jlm/llvm/opt/test-cne.cpp | 138 ++--- tests/jlm/llvm/opt/test-inversion.cpp | 24 +- tests/jlm/llvm/opt/test-push.cpp | 40 +- tests/jlm/llvm/opt/test-unroll.cpp | 74 +-- .../mlir/backend/TestJlmToMlirConverter.cpp | 4 +- .../mlir/frontend/TestMlirToJlmConverter.cpp | 2 +- tests/jlm/rvsdg/test-theta.cpp | 117 ++--- 51 files changed, 1070 insertions(+), 1044 deletions(-) diff --git a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp index 6ef9fc707..57e65889a 100644 --- a/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp +++ b/jlm/hls/backend/rvsdg2rhls/ThetaConversion.cpp @@ -22,26 +22,27 @@ ConvertThetaNode(rvsdg::ThetaNode & theta) // smap. for (size_t i = 0; i < theta.ninputs(); i++) { + auto loopvar = theta.MapInputLoopVar(*theta.input(i)); // Check if the input is a loop invariant such that a loop constant buffer should be created. // Memory state inputs are not loop variables containting a value, so we ignor these. - if (is_invariant(theta.input(i)) - && !jlm::rvsdg::is(theta.input(i)->Type())) + if (ThetaLoopVarIsInvariant(loopvar) + && !jlm::rvsdg::is(loopvar.input->Type())) { - smap.insert(theta.input(i)->argument(), loop->add_loopconst(theta.input(i)->origin())); + smap.insert(loopvar.pre, loop->add_loopconst(loopvar.input->origin())); branches.push_back(nullptr); // The HLS loop has no output for this input. The users of the theta output is // therefore redirected to the input origin, as the value is loop invariant. - theta.output(i)->divert_users(theta.input(i)->origin()); + loopvar.output->divert_users(loopvar.input->origin()); } else { jlm::rvsdg::output * buffer; - loop->add_loopvar(theta.input(i)->origin(), &buffer); - smap.insert(theta.input(i)->argument(), buffer); + loop->AddLoopVar(loopvar.input->origin(), &buffer); + smap.insert(loopvar.pre, buffer); // buffer out is only used by branch branches.push_back(*buffer->begin()); // divert theta outputs - theta.output(i)->divert_users(loop->output(loop->noutputs() - 1)); + loopvar.output->divert_users(loop->output(loop->noutputs() - 1)); } } @@ -54,7 +55,7 @@ ConvertThetaNode(rvsdg::ThetaNode & theta) { if (branches[i]) { - branches[i]->divert_to(smap.lookup(theta.input(i)->result()->origin())); + branches[i]->divert_to(smap.lookup(theta.MapInputLoopVar(*theta.input(i)).post->origin())); } } diff --git a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp index d4b8495fe..af3533d1c 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-prints.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-prints.cpp @@ -84,7 +84,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) } else if (auto theta = dynamic_cast(region->node())) { - output = theta->add_loopvar(output)->argument(); + output = theta->AddLoopVar(output).pre; } else if (auto lambda = dynamic_cast(region->node())) { diff --git a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp index b19624b40..a016c1b06 100644 --- a/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp +++ b/jlm/hls/backend/rvsdg2rhls/add-triggers.cpp @@ -106,7 +106,7 @@ add_triggers(rvsdg::Region * region) { JLM_ASSERT(trigger != nullptr); JLM_ASSERT(get_trigger(t->subregion()) == nullptr); - t->add_loopvar(trigger); + t->AddLoopVar(trigger); add_triggers(t->subregion()); } else if (auto gn = dynamic_cast(node)) diff --git a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp index 6c4b64b05..7fd439520 100644 --- a/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp +++ b/jlm/hls/backend/rvsdg2rhls/distribute-constants.cpp @@ -24,29 +24,27 @@ distribute_constant(const rvsdg::SimpleOperation & op, rvsdg::simple_output * ou changed = false; for (auto user : *out) { - auto node = rvsdg::input::GetNode(*user); - if (auto ti = dynamic_cast(user)) + if (auto theta = rvsdg::TryGetOwnerNode(*user)) { - auto arg = ti->argument(); - auto res = ti->result(); - if (res->origin() == arg) + auto loopvar = theta->MapInputLoopVar(*user); + if (loopvar.post->origin() == loopvar.pre) { // pass-through auto arg_replacement = dynamic_cast( - rvsdg::SimpleNode::create_normalized(ti->node()->subregion(), op, {})[0]); - ti->argument()->divert_users(arg_replacement); - ti->output()->divert_users( + rvsdg::SimpleNode::create_normalized(theta->subregion(), op, {})[0]); + loopvar.pre->divert_users(arg_replacement); + loopvar.output->divert_users( rvsdg::SimpleNode::create_normalized(out->region(), op, {})[0]); distribute_constant(op, arg_replacement); - arg->region()->RemoveResult(res->index()); - arg->region()->RemoveArgument(arg->index()); - arg->region()->node()->RemoveInput(arg->input()->index()); - arg->region()->node()->RemoveOutput(res->output()->index()); + theta->subregion()->RemoveResult(loopvar.post->index()); + theta->subregion()->RemoveArgument(loopvar.pre->index()); + theta->RemoveInput(loopvar.input->index()); + theta->RemoveOutput(loopvar.output->index()); changed = true; break; } } - if (auto gammaNode = dynamic_cast(node)) + if (auto gammaNode = rvsdg::TryGetOwnerNode(*user)) { if (gammaNode->predicate() == user) { diff --git a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp index 67ed15714..c3b2609a9 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-queue.cpp @@ -213,7 +213,7 @@ separate_load_edge( auto loop_node = jlm::util::AssertedCast(sti->node()); jlm::rvsdg::output * buffer; - addr_edge = loop_node->add_loopvar(addr_edge, &buffer); + addr_edge = loop_node->AddLoopVar(addr_edge, &buffer); addr_edge_user->divert_to(addr_edge); mem_edge = find_loop_output(sti); auto sti_arg = sti->arguments.first(); diff --git a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp index 883175cf8..0b18e52f2 100644 --- a/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp +++ b/jlm/hls/backend/rvsdg2rhls/mem-sep.cpp @@ -116,9 +116,9 @@ route_through(rvsdg::Region * target, jlm::rvsdg::output * response) } else if (auto tn = dynamic_cast(target->node())) { - auto lv = tn->add_loopvar(parent_response); - parrent_user->divert_to(lv); - return lv->argument(); + auto lv = tn->AddLoopVar(parent_response); + parrent_user->divert_to(lv.output); + return lv.pre; } JLM_UNREACHABLE("THIS SHOULD NOT HAPPEN"); } @@ -183,13 +183,12 @@ trace_edge( JLM_ASSERT(new_edge->nusers() == 1); auto user = *common_edge->begin(); auto new_next = *new_edge->begin(); - auto node = rvsdg::input::GetNode(*user); if (auto res = dynamic_cast(user)) { // end of region reached return res; } - else if (auto gammaNode = dynamic_cast(node)) + else if (auto gammaNode = rvsdg::TryGetOwnerNode(*user)) { auto ip = gammaNode->AddEntryVar(new_edge); std::vector vec; @@ -208,13 +207,13 @@ trace_edge( common_edge = subres->output(); } } - else if (auto ti = dynamic_cast(user)) + else if (auto theta = rvsdg::TryGetOwnerNode(*user)) { - auto tn = ti->node(); - auto lv = tn->add_loopvar(new_edge); - trace_edge(ti->argument(), lv->argument(), load_nodes, store_nodes, decouple_nodes); - common_edge = ti->output(); - new_edge = lv; + auto olv = theta->MapInputLoopVar(*user); + auto lv = theta->AddLoopVar(new_edge); + trace_edge(olv.pre, lv.pre, load_nodes, store_nodes, decouple_nodes); + common_edge = olv.output; + new_edge = lv.output; new_next->divert_to(new_edge); } else if (auto si = dynamic_cast(user)) diff --git a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp index 3567f468b..f5e1dfaff 100644 --- a/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp +++ b/jlm/hls/backend/rvsdg2rhls/rvsdg2rhls.cpp @@ -113,9 +113,9 @@ trace_call(jlm::rvsdg::input * input) auto argument = dynamic_cast(input->origin()); const jlm::rvsdg::output * result; - if (auto to = dynamic_cast(input->origin())) + if (auto theta = rvsdg::TryGetOwnerNode(*input->origin())) { - result = trace_call(to->input()); + result = trace_call(theta->MapOutputLoopVar(*input->origin()).input); } else if (argument == nullptr) { diff --git a/jlm/hls/ir/hls.cpp b/jlm/hls/ir/hls.cpp index afb5eb0e1..6628ce87c 100644 --- a/jlm/hls/ir/hls.cpp +++ b/jlm/hls/ir/hls.cpp @@ -66,7 +66,7 @@ ExitResult::Copy(rvsdg::output & origin, rvsdg::StructuralOutput * output) } rvsdg::StructuralOutput * -loop_node::add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) +loop_node::AddLoopVar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer) { auto input = rvsdg::StructuralInput::create(this, origin, origin->Type()); auto output = rvsdg::StructuralOutput::create(this, origin->Type()); diff --git a/jlm/hls/ir/hls.hpp b/jlm/hls/ir/hls.hpp index b67444dfc..997e8ddf4 100644 --- a/jlm/hls/ir/hls.hpp +++ b/jlm/hls/ir/hls.hpp @@ -787,7 +787,7 @@ class loop_node final : public rvsdg::StructuralNode add_backedge(std::shared_ptr type); rvsdg::StructuralOutput * - add_loopvar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer = nullptr); + AddLoopVar(jlm::rvsdg::output * origin, jlm::rvsdg::output ** buffer = nullptr); jlm::rvsdg::output * add_loopconst(jlm::rvsdg::output * origin); diff --git a/jlm/hls/opt/cne.cpp b/jlm/hls/opt/cne.cpp index 94745c79c..751b2605c 100644 --- a/jlm/hls/opt/cne.cpp +++ b/jlm/hls/opt/cne.cpp @@ -183,33 +183,43 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (o1->type() != o2->type()) return false; - if (is(o1) && is(o2)) + if (auto theta1 = rvsdg::TryGetRegionParentNode(*o1)) { - JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); - vs.insert(a1, a2); - auto i1 = a1->input(), i2 = a2->input(); - if (!congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx)) - return false; + if (auto theta2 = rvsdg::TryGetRegionParentNode(*o2)) + { + JLM_ASSERT(o1->region()->node() == o2->region()->node()); + auto loopvar1 = theta1->MapPreLoopVar(*o1); + auto loopvar2 = theta2->MapPreLoopVar(*o2); + vs.insert(o1, o2); + auto i1 = loopvar1.input, i2 = loopvar2.input; + if (!congruent(loopvar1.input->origin(), loopvar2.input->origin(), vs, ctx)) + return false; - auto output1 = o1->region()->node()->output(i1->index()); - auto output2 = o2->region()->node()->output(i2->index()); - return congruent(output1, output2, vs, ctx); + auto output1 = o1->region()->node()->output(i1->index()); + auto output2 = o2->region()->node()->output(i2->index()); + return congruent(output1, output2, vs, ctx); + } } - auto n1 = jlm::rvsdg::output::GetNode(*o1); - auto n2 = jlm::rvsdg::output::GetNode(*o2); - if (is(n1) && is(n2) && n1 == n2) + if (auto theta1 = rvsdg::TryGetOwnerNode(*o1)) { - auto so1 = static_cast(o1); - auto so2 = static_cast(o2); - vs.insert(o1, o2); - auto r1 = so1->results.first(); - auto r2 = so2->results.first(); - return congruent(r1->origin(), r2->origin(), vs, ctx); + if (auto theta2 = rvsdg::TryGetOwnerNode(*o2)) + { + if (theta1 == theta2) + { + vs.insert(o1, o2); + auto loopvar1 = theta1->MapOutputLoopVar(*o1); + auto loopvar2 = theta2->MapOutputLoopVar(*o2); + auto r1 = loopvar1.post; + auto r2 = loopvar2.post; + return congruent(r1->origin(), r2->origin(), vs, ctx); + } + } } + auto n1 = jlm::rvsdg::output::GetNode(*o1); + auto n2 = jlm::rvsdg::output::GetNode(*o2); + auto a1 = dynamic_cast(o1); auto a2 = dynamic_cast(o2); if (a1 && is(a1->region()->node()) && a2 && is(a2->region()->node())) @@ -331,10 +341,12 @@ mark_theta(const rvsdg::StructuralNode * node, cnectx & ctx) { auto input1 = theta->input(i1); auto input2 = theta->input(i2); - if (congruent(input1->argument(), input2->argument(), ctx)) + auto loopvar1 = theta->MapInputLoopVar(*input1); + auto loopvar2 = theta->MapInputLoopVar(*input2); + if (congruent(loopvar1.pre, loopvar2.pre, ctx)) { - ctx.mark(input1->argument(), input2->argument()); - ctx.mark(input1->output(), input2->output()); + ctx.mark(loopvar1.pre, loopvar2.pre); + ctx.mark(loopvar1.output, loopvar2.output); } } } @@ -530,11 +542,11 @@ divert_theta(rvsdg::StructuralNode * node, cnectx & ctx) auto theta = static_cast(node); auto subregion = node->subregion(0); - for (const auto & lv : *theta) + for (const auto & lv : theta->GetLoopVars()) { - JLM_ASSERT(ctx.set(lv->argument())->size() == ctx.set(lv)->size()); - divert_users(lv->argument(), ctx); - divert_users(lv, ctx); + JLM_ASSERT(ctx.set(lv.pre)->size() == ctx.set(lv.output)->size()); + divert_users(lv.pre, ctx); + divert_users(lv.output, ctx); } divert(subregion, ctx); diff --git a/jlm/hls/util/view.cpp b/jlm/hls/util/view.cpp index c1a7a8561..e296140dc 100644 --- a/jlm/hls/util/view.cpp +++ b/jlm/hls/util/view.cpp @@ -367,9 +367,10 @@ region_to_dot(rvsdg::Region * region) { dot << edge(be->argument(), be, true); } - else if (auto to = dynamic_cast(region->result(i)->output())) + else if (auto theta = rvsdg::TryGetOwnerNode(*region->result(i)->output())) { - dot << edge(to->argument(), to->result(), true); + auto loopvar = theta->MapOutputLoopVar(*region->result(i)->output()); + dot << edge(loopvar.pre, loopvar.post, true); } } diff --git a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp index 7301de51e..9e585664a 100644 --- a/jlm/llvm/frontend/InterProceduralGraphConversion.cpp +++ b/jlm/llvm/frontend/InterProceduralGraphConversion.cpp @@ -765,7 +765,7 @@ Convert( * Add loop variables */ auto & demandSet = demandMap.Lookup(loopAggregationNode); - std::unordered_map thetaOutputMap; + std::unordered_map thetaLoopVarMap; for (auto & v : demandSet.LoopVariables().Variables()) { rvsdg::output * value = nullptr; @@ -778,8 +778,9 @@ Convert( { value = outerVariableMap.lookup(&v); } - thetaOutputMap[&v] = theta->add_loopvar(value); - thetaVariableMap.insert(&v, thetaOutputMap[&v]->argument()); + auto loopvar = theta->AddLoopVar(value); + thetaLoopVarMap[&v] = loopvar; + thetaVariableMap.insert(&v, loopvar.pre); } /* @@ -797,8 +798,8 @@ Convert( */ for (auto & v : demandSet.LoopVariables().Variables()) { - JLM_ASSERT(thetaOutputMap.find(&v) != thetaOutputMap.end()); - thetaOutputMap[&v]->result()->divert_to(thetaVariableMap.lookup(&v)); + JLM_ASSERT(thetaLoopVarMap.find(&v) != thetaLoopVarMap.end()); + thetaLoopVarMap[&v].post->divert_to(thetaVariableMap.lookup(&v)); } /* @@ -820,7 +821,7 @@ Convert( for (auto & v : demandSet.LoopVariables().Variables()) { JLM_ASSERT(outerVariableMap.contains(&v)); - outerVariableMap.insert(&v, thetaOutputMap[&v]); + outerVariableMap.insert(&v, thetaLoopVarMap[&v].output); } } diff --git a/jlm/llvm/ir/operators/call.cpp b/jlm/llvm/ir/operators/call.cpp index b77882da3..eff1c8b13 100644 --- a/jlm/llvm/ir/operators/call.cpp +++ b/jlm/llvm/ir/operators/call.cpp @@ -66,17 +66,22 @@ invariantInput( return nullptr; } -static rvsdg::ThetaInput * -invariantInput(const rvsdg::ThetaOutput & output, InvariantOutputMap & invariantOutputs) +static rvsdg::input * +invariantInput( + const rvsdg::ThetaNode & theta, + const rvsdg::output & output, + InvariantOutputMap & invariantOutputs) { - auto origin = output.result()->origin(); + auto loopvar = theta.MapOutputLoopVar(output); + + auto origin = loopvar.post->origin(); while (true) { - if (origin == output.argument()) + if (origin == loopvar.pre) { - invariantOutputs[&output] = output.input(); - return output.input(); + invariantOutputs[&output] = loopvar.input; + return loopvar.input; } if (auto input = invariantInput(*origin, invariantOutputs)) @@ -101,13 +106,13 @@ invariantInput(const rvsdg::output & output, InvariantOutputMap & invariantOutpu if (invariantOutputs.find(&output) != invariantOutputs.end()) return invariantOutputs[&output]; - if (auto thetaOutput = dynamic_cast(&output)) - return invariantInput(*thetaOutput, invariantOutputs); + if (auto theta = rvsdg::TryGetOwnerNode(output)) + return invariantInput(*theta, output, invariantOutputs); - if (auto thetaArgument = dynamic_cast(&output)) + if (auto theta = rvsdg::TryGetRegionParentNode(output)) { - auto thetaInput = static_cast(thetaArgument->input()); - return invariantInput(*thetaInput->output(), invariantOutputs); + auto loopvar = theta->MapPreLoopVar(output); + return invariantInput(*loopvar.output, invariantOutputs); } if (auto gamma = rvsdg::TryGetOwnerNode(output)) @@ -205,9 +210,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) continue; } - if (auto thetaOutput = dynamic_cast(origin)) + if (rvsdg::TryGetOwnerNode(*origin)) { - if (auto input = invariantInput(*thetaOutput)) + if (auto input = invariantInput(*origin)) { origin = input->origin(); continue; @@ -216,9 +221,9 @@ CallNode::TraceFunctionInput(const CallNode & callNode) return origin; } - if (auto thetaArgument = dynamic_cast(origin)) + if (rvsdg::TryGetRegionParentNode(*origin)) { - if (auto input = invariantInput(*thetaArgument)) + if (auto input = invariantInput(*origin)) { origin = input->origin(); continue; diff --git a/jlm/llvm/ir/operators/lambda.cpp b/jlm/llvm/ir/operators/lambda.cpp index f0dbabc58..099e82082 100644 --- a/jlm/llvm/ir/operators/lambda.cpp +++ b/jlm/llvm/ir/operators/lambda.cpp @@ -304,17 +304,17 @@ node::ComputeCallSummary() const continue; } - if (auto theta_input = dynamic_cast(input)) + if (auto theta = rvsdg::TryGetOwnerNode(*input)) { - auto argument = theta_input->argument(); - worklist.insert(worklist.end(), argument->begin(), argument->end()); + auto loopvar = theta->MapInputLoopVar(*input); + worklist.insert(worklist.end(), loopvar.pre->begin(), loopvar.pre->end()); continue; } - if (auto thetaResult = dynamic_cast(input)) + if (auto theta = rvsdg::TryGetRegionParentNode(*input)) { - auto output = thetaResult->output(); - worklist.insert(worklist.end(), output->begin(), output->end()); + auto loopvar = theta->MapPostLoopVar(*input); + worklist.insert(worklist.end(), loopvar.output->begin(), loopvar.output->end()); continue; } diff --git a/jlm/llvm/opt/DeadNodeElimination.cpp b/jlm/llvm/opt/DeadNodeElimination.cpp index d672d69a3..1945f2f6d 100644 --- a/jlm/llvm/opt/DeadNodeElimination.cpp +++ b/jlm/llvm/opt/DeadNodeElimination.cpp @@ -214,19 +214,20 @@ DeadNodeElimination::MarkOutput(const jlm::rvsdg::output & output) return; } - if (auto thetaOutput = dynamic_cast(&output)) + if (auto theta = rvsdg::TryGetOwnerNode(output)) { - MarkOutput(*thetaOutput->node()->predicate()->origin()); - MarkOutput(*thetaOutput->result()->origin()); - MarkOutput(*thetaOutput->input()->origin()); + auto loopvar = theta->MapOutputLoopVar(output); + MarkOutput(*theta->predicate()->origin()); + MarkOutput(*loopvar.post->origin()); + MarkOutput(*loopvar.input->origin()); return; } - if (auto thetaArgument = dynamic_cast(&output)) + if (auto theta = rvsdg::TryGetRegionParentNode(output)) { - auto thetaInput = util::AssertedCast(thetaArgument->input()); - MarkOutput(*thetaInput->output()); - MarkOutput(*thetaInput->origin()); + auto loopvar = theta->MapPreLoopVar(output); + MarkOutput(*loopvar.output); + MarkOutput(*loopvar.input->origin()); return; } @@ -435,16 +436,16 @@ DeadNodeElimination::SweepTheta(rvsdg::ThetaNode & thetaNode) const { auto & thetaSubregion = *thetaNode.subregion(); - auto matchOutput = [&](const rvsdg::ThetaOutput & output) + auto matchOutput = [&](const rvsdg::output & output) { - auto & argument = *output.argument(); - return !Context_->IsAlive(argument) && !Context_->IsAlive(output); + auto loopvar = thetaNode.MapOutputLoopVar(output); + return !Context_->IsAlive(*loopvar.pre) && !Context_->IsAlive(*loopvar.output); }; auto deadInputs = thetaNode.RemoveThetaOutputsWhere(matchOutput); SweepRegion(thetaSubregion); - auto matchInput = [&](const rvsdg::ThetaInput & input) + auto matchInput = [&](const rvsdg::input & input) { return deadInputs.Contains(&input); }; diff --git a/jlm/llvm/opt/InvariantValueRedirection.cpp b/jlm/llvm/opt/InvariantValueRedirection.cpp index c6253d83a..abb9a64d9 100644 --- a/jlm/llvm/opt/InvariantValueRedirection.cpp +++ b/jlm/llvm/opt/InvariantValueRedirection.cpp @@ -159,15 +159,15 @@ InvariantValueRedirection::RedirectGammaOutputs(rvsdg::GammaNode & gammaNode) void InvariantValueRedirection::RedirectThetaOutputs(rvsdg::ThetaNode & thetaNode) { - for (const auto & thetaOutput : thetaNode) + for (const auto & loopVar : thetaNode.GetLoopVars()) { // FIXME: In order to also redirect I/O state type variables, we need to know whether a loop // terminates. - if (rvsdg::is(thetaOutput->type())) + if (rvsdg::is(loopVar.input->type())) continue; - if (rvsdg::is_invariant(thetaOutput)) - thetaOutput->divert_users(thetaOutput->input()->origin()); + if (rvsdg::ThetaLoopVarIsInvariant(loopVar)) + loopVar.output->divert_users(loopVar.input->origin()); } } diff --git a/jlm/llvm/opt/alias-analyses/Andersen.cpp b/jlm/llvm/opt/alias-analyses/Andersen.cpp index d71a33515..0c142403e 100644 --- a/jlm/llvm/opt/alias-analyses/Andersen.cpp +++ b/jlm/llvm/opt/alias-analyses/Andersen.cpp @@ -1126,13 +1126,13 @@ Andersen::AnalyzeTheta(const rvsdg::ThetaNode & theta) { // Create a PointerObject for each argument in the inner region // And make it point to a superset of the corresponding input register - for (const auto thetaOutput : theta) + for (const auto & loopVar : theta.GetLoopVars()) { - if (!IsOrContainsPointerType(thetaOutput->type())) + if (!IsOrContainsPointerType(loopVar.input->type())) continue; - auto & inputReg = *thetaOutput->input()->origin(); - auto & innerArgumentReg = *thetaOutput->argument(); + auto & inputReg = *loopVar.input->origin(); + auto & innerArgumentReg = *loopVar.pre; const auto inputRegPO = Set_->GetRegisterPointerObject(inputReg); const auto innerArgumentRegPO = Set_->CreateRegisterPointerObject(innerArgumentReg); @@ -1144,14 +1144,14 @@ Andersen::AnalyzeTheta(const rvsdg::ThetaNode & theta) // Iterate over loop variables again, making the inner arguments point to a superset // of what the corresponding result registers point to - for (const auto thetaOutput : theta) + for (const auto & loopVar : theta.GetLoopVars()) { - if (!IsOrContainsPointerType(thetaOutput->type())) + if (!IsOrContainsPointerType(loopVar.input->type())) continue; - auto & innerArgumentReg = *thetaOutput->argument(); - auto & innerResultReg = *thetaOutput->result()->origin(); - auto & outputReg = *thetaOutput; + auto & innerArgumentReg = *loopVar.pre; + auto & innerResultReg = *loopVar.post->origin(); + auto & outputReg = *loopVar.output; const auto innerArgumentRegPO = Set_->GetRegisterPointerObject(innerArgumentReg); const auto innerResultRegPO = Set_->GetRegisterPointerObject(innerResultReg); diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp index 81f02bb8c..29f508897 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.cpp @@ -899,20 +899,20 @@ MemoryStateEncoder::EncodeTheta(rvsdg::ThetaNode & thetaNode) Context_->GetRegionalizedStateMap().PopRegion(*thetaNode.subregion()); } -std::vector +std::vector MemoryStateEncoder::EncodeThetaEntry(rvsdg::ThetaNode & thetaNode) { auto region = thetaNode.region(); auto & stateMap = Context_->GetRegionalizedStateMap(); auto & memoryNodes = Context_->GetMemoryNodeProvisioning().GetThetaEntryExitNodes(thetaNode); - std::vector thetaStateOutputs; + std::vector thetaStateOutputs; auto memoryNodeStatePairs = stateMap.GetStates(*region, memoryNodes); for (auto & memoryNodeStatePair : memoryNodeStatePairs) { - auto thetaStateOutput = thetaNode.add_loopvar(&memoryNodeStatePair->State()); - stateMap.InsertState(memoryNodeStatePair->MemoryNode(), *thetaStateOutput->argument()); - thetaStateOutputs.push_back(thetaStateOutput); + auto loopvar = thetaNode.AddLoopVar(&memoryNodeStatePair->State()); + stateMap.InsertState(memoryNodeStatePair->MemoryNode(), *loopvar.pre); + thetaStateOutputs.push_back(loopvar.output); } return thetaStateOutputs; @@ -921,7 +921,7 @@ MemoryStateEncoder::EncodeThetaEntry(rvsdg::ThetaNode & thetaNode) void MemoryStateEncoder::EncodeThetaExit( rvsdg::ThetaNode & thetaNode, - const std::vector & thetaStateOutputs) + const std::vector & thetaStateOutputs) { auto subregion = thetaNode.subregion(); auto & stateMap = Context_->GetRegionalizedStateMap(); @@ -934,10 +934,11 @@ MemoryStateEncoder::EncodeThetaExit( auto thetaStateOutput = thetaStateOutputs[n]; auto & memoryNodeStatePair = memoryNodeStatePairs[n]; auto & memoryNode = memoryNodeStatePair->MemoryNode(); - JLM_ASSERT(thetaStateOutput->input()->origin() == &memoryNodeStatePair->State()); + auto loopvar = thetaNode.MapOutputLoopVar(*thetaStateOutput); + JLM_ASSERT(loopvar.input->origin() == &memoryNodeStatePair->State()); auto & subregionState = stateMap.GetState(*subregion, memoryNode)->State(); - thetaStateOutput->result()->divert_to(&subregionState); + loopvar.post->divert_to(&subregionState); memoryNodeStatePair->ReplaceState(*thetaStateOutput); } } diff --git a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp index 9929f2bb2..63833aa73 100644 --- a/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp +++ b/jlm/llvm/opt/alias-analyses/MemoryStateEncoder.hpp @@ -157,13 +157,13 @@ class MemoryStateEncoder final void EncodeTheta(rvsdg::ThetaNode & thetaNode); - std::vector + std::vector EncodeThetaEntry(rvsdg::ThetaNode & thetaNode); void EncodeThetaExit( rvsdg::ThetaNode & thetaNode, - const std::vector & thetaStateOutputs); + const std::vector & thetaStateOutputs); /** * Replace \p loadNode with a new copy that takes the provided \p memoryStates. All users of the diff --git a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp index ebf3ff311..9ec56963d 100644 --- a/jlm/llvm/opt/alias-analyses/Steensgaard.cpp +++ b/jlm/llvm/opt/alias-analyses/Steensgaard.cpp @@ -236,13 +236,13 @@ class RegisterLocation final : public Location return jlm::util::strfmt(dbgstr, ":arg", index); } - if (is(Output_)) + if (rvsdg::TryGetRegionParentNode(*Output_)) { auto dbgstr = Output_->region()->node()->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":arg", index); } - if (is(Output_)) + if (rvsdg::TryGetOwnerNode(*Output_)) { auto dbgstr = jlm::rvsdg::output::GetNode(*Output_)->GetOperation().debug_string(); return jlm::util::strfmt(dbgstr, ":out", index); @@ -1658,12 +1658,12 @@ Steensgaard::AnalyzeGamma(const rvsdg::GammaNode & node) void Steensgaard::AnalyzeTheta(const rvsdg::ThetaNode & theta) { - for (auto thetaOutput : theta) + for (const auto & loopVar : theta.GetLoopVars()) { - if (HasOrContainsPointerType(*thetaOutput)) + if (HasOrContainsPointerType(*loopVar.output)) { - auto & originLocation = Context_->GetLocation(*thetaOutput->input()->origin()); - auto & argumentLocation = Context_->GetOrInsertRegisterLocation(*thetaOutput->argument()); + auto & originLocation = Context_->GetLocation(*loopVar.input->origin()); + auto & argumentLocation = Context_->GetOrInsertRegisterLocation(*loopVar.pre); Context_->Join(argumentLocation, originLocation); } @@ -1671,13 +1671,13 @@ Steensgaard::AnalyzeTheta(const rvsdg::ThetaNode & theta) AnalyzeRegion(*theta.subregion()); - for (auto thetaOutput : theta) + for (const auto & loopVar : theta.GetLoopVars()) { - if (HasOrContainsPointerType(*thetaOutput)) + if (HasOrContainsPointerType(*loopVar.output)) { - auto & originLocation = Context_->GetLocation(*thetaOutput->result()->origin()); - auto & argumentLocation = Context_->GetLocation(*thetaOutput->argument()); - auto & outputLocation = Context_->GetOrInsertRegisterLocation(*thetaOutput); + auto & originLocation = Context_->GetLocation(*loopVar.post->origin()); + auto & argumentLocation = Context_->GetLocation(*loopVar.pre); + auto & outputLocation = Context_->GetOrInsertRegisterLocation(*loopVar.output); Context_->Join(originLocation, argumentLocation); Context_->Join(originLocation, outputLocation); diff --git a/jlm/llvm/opt/cne.cpp b/jlm/llvm/opt/cne.cpp index def2530aa..96c364792 100644 --- a/jlm/llvm/opt/cne.cpp +++ b/jlm/llvm/opt/cne.cpp @@ -180,33 +180,40 @@ congruent(jlm::rvsdg::output * o1, jlm::rvsdg::output * o2, vset & vs, cnectx & if (o1->type() != o2->type()) return false; - if (is(o1) && is(o2)) + if (auto theta1 = rvsdg::TryGetRegionParentNode(*o1)) { - JLM_ASSERT(o1->region()->node() == o2->region()->node()); - auto a1 = static_cast(o1); - auto a2 = static_cast(o2); - vs.insert(a1, a2); - auto i1 = a1->input(), i2 = a2->input(); - if (!congruent(a1->input()->origin(), a2->input()->origin(), vs, ctx)) - return false; + if (auto theta2 = rvsdg::TryGetRegionParentNode(*o2)) + { + JLM_ASSERT(o1->region()->node() == o2->region()->node()); + auto loopvar1 = theta1->MapPreLoopVar(*o1); + auto loopvar2 = theta2->MapPreLoopVar(*o2); + vs.insert(o1, o2); + auto i1 = loopvar1.input, i2 = loopvar2.input; + if (!congruent(loopvar1.input->origin(), loopvar2.input->origin(), vs, ctx)) + return false; - auto output1 = o1->region()->node()->output(i1->index()); - auto output2 = o2->region()->node()->output(i2->index()); - return congruent(output1, output2, vs, ctx); + auto output1 = o1->region()->node()->output(i1->index()); + auto output2 = o2->region()->node()->output(i2->index()); + return congruent(output1, output2, vs, ctx); + } } - auto n1 = jlm::rvsdg::output::GetNode(*o1); - auto n2 = jlm::rvsdg::output::GetNode(*o2); - if (is(n1) && is(n2) && n1 == n2) + if (auto theta1 = rvsdg::TryGetOwnerNode(*o1)) { - auto so1 = static_cast(o1); - auto so2 = static_cast(o2); - vs.insert(o1, o2); - auto r1 = so1->results.first(); - auto r2 = so2->results.first(); - return congruent(r1->origin(), r2->origin(), vs, ctx); + if (auto theta2 = rvsdg::TryGetOwnerNode(*o2)) + { + vs.insert(o1, o2); + auto loopvar1 = theta1->MapOutputLoopVar(*o1); + auto loopvar2 = theta2->MapOutputLoopVar(*o2); + auto r1 = loopvar1.post; + auto r2 = loopvar2.post; + return congruent(r1->origin(), r2->origin(), vs, ctx); + } } + auto n1 = jlm::rvsdg::output::GetNode(*o1); + auto n2 = jlm::rvsdg::output::GetNode(*o2); + if (rvsdg::is(n1) && n1 == n2) { auto so1 = static_cast(o1); @@ -315,10 +322,12 @@ mark_theta(const rvsdg::StructuralNode * node, cnectx & ctx) { auto input1 = theta->input(i1); auto input2 = theta->input(i2); - if (congruent(input1->argument(), input2->argument(), ctx)) + auto loopvar1 = theta->MapInputLoopVar(*input1); + auto loopvar2 = theta->MapInputLoopVar(*input2); + if (congruent(loopvar1.pre, loopvar2.pre, ctx)) { - ctx.mark(input1->argument(), input2->argument()); - ctx.mark(input1->output(), input2->output()); + ctx.mark(loopvar1.pre, loopvar2.pre); + ctx.mark(loopvar1.output, loopvar2.output); } } } @@ -491,11 +500,11 @@ divert_theta(rvsdg::StructuralNode * node, cnectx & ctx) auto theta = static_cast(node); auto subregion = node->subregion(0); - for (const auto & lv : *theta) + for (const auto & lv : theta->GetLoopVars()) { - JLM_ASSERT(ctx.set(lv->argument())->size() == ctx.set(lv)->size()); - divert_users(lv->argument(), ctx); - divert_users(lv, ctx); + JLM_ASSERT(ctx.set(lv.pre)->size() == ctx.set(lv.output)->size()); + divert_users(lv.pre, ctx); + divert_users(lv.output, ctx); } divert(subregion, ctx); diff --git a/jlm/llvm/opt/inlining.cpp b/jlm/llvm/opt/inlining.cpp index e24406333..ec9aba8b0 100644 --- a/jlm/llvm/opt/inlining.cpp +++ b/jlm/llvm/opt/inlining.cpp @@ -78,7 +78,7 @@ route_to_region(jlm::rvsdg::output * output, rvsdg::Region * region) } else if (auto theta = dynamic_cast(region->node())) { - output = theta->add_loopvar(output)->argument(); + output = theta->AddLoopVar(output).pre; } else if (auto lambda = dynamic_cast(region->node())) { diff --git a/jlm/llvm/opt/inversion.cpp b/jlm/llvm/opt/inversion.cpp index e75d0cb7a..a0cbce507 100644 --- a/jlm/llvm/opt/inversion.cpp +++ b/jlm/llvm/opt/inversion.cpp @@ -77,14 +77,14 @@ static void pullin(rvsdg::GammaNode * gamma, rvsdg::ThetaNode * theta) { pullin_bottom(gamma); - for (const auto & lv : *theta) + for (const auto & lv : theta->GetLoopVars()) { - if (jlm::rvsdg::output::GetNode(*lv->result()->origin()) != gamma) + if (jlm::rvsdg::output::GetNode(*lv.post->origin()) != gamma) { - auto ev = gamma->AddEntryVar(lv->result()->origin()); + auto ev = gamma->AddEntryVar(lv.post->origin()); JLM_ASSERT(ev.branchArgument.size() == 2); auto xv = gamma->AddExitVar({ ev.branchArgument[0], ev.branchArgument[1] }).output; - lv->result()->divert_to(xv); + lv.post->divert_to(xv); } } pullin_top(gamma); @@ -124,16 +124,16 @@ copy_condition_nodes( } } -static rvsdg::RegionArgument * -to_argument(jlm::rvsdg::output * output) +static jlm::rvsdg::StructuralOutput * +to_structural_output(jlm::rvsdg::output * output) { - return dynamic_cast(output); + return dynamic_cast(output); } -static rvsdg::StructuralOutput * -to_structural_output(jlm::rvsdg::output * output) +static rvsdg::RegionArgument * +to_argument(jlm::rvsdg::output * output) { - return dynamic_cast(output); + return dynamic_cast(output); } static void @@ -148,8 +148,8 @@ invert(rvsdg::ThetaNode * otheta) /* copy condition nodes for new gamma node */ rvsdg::SubstitutionMap smap; auto cnodes = collect_condition_nodes(otheta, ogamma); - for (const auto & olv : *otheta) - smap.insert(olv->argument(), olv->input()->origin()); + for (const auto & olv : otheta->GetLoopVars()) + smap.insert(olv.pre, olv.input->origin()); copy_condition_nodes(otheta->region(), smap, cnodes); auto ngamma = @@ -179,11 +179,11 @@ invert(rvsdg::ThetaNode * otheta) osubregion0->copy(ngamma->subregion(0), r0map, false, false); /* update substitution map for insertion of exit variables */ - for (const auto & olv : *otheta) + for (const auto & olv : otheta->GetLoopVars()) { - auto output = to_structural_output(olv->result()->origin()); + auto output = to_structural_output(olv.post->origin()); auto substitute = r0map.lookup(osubregion0->result(output->index())->origin()); - r0map.insert(olv->result()->origin(), substitute); + r0map.insert(olv.post->origin(), substitute); } } @@ -195,25 +195,25 @@ invert(rvsdg::ThetaNode * otheta) /* add loop variables to new theta node and setup substitution map */ auto osubregion0 = ogamma->subregion(0); auto osubregion1 = ogamma->subregion(1); - std::unordered_map nlvs; - for (const auto & olv : *otheta) + std::unordered_map nlvs; + for (const auto & olv : otheta->GetLoopVars()) { - auto ev = ngamma->AddEntryVar(olv->input()->origin()); - auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); - r1map.insert(olv->argument(), nlv->argument()); - nlvs[olv->input()] = nlv; + auto ev = ngamma->AddEntryVar(olv.input->origin()); + auto nlv = ntheta->AddLoopVar(ev.branchArgument[1]); + r1map.insert(olv.pre, nlv.pre); + nlvs[olv.input] = nlv; } for (const auto & oev : ogamma->GetEntryVars()) { if (auto argument = to_argument(oev.input->origin())) { - r1map.insert(oev.branchArgument[1], nlvs[argument->input()]->argument()); + r1map.insert(oev.branchArgument[1], nlvs[argument->input()].pre); } else { auto ev = ngamma->AddEntryVar(smap.lookup(oev.input->origin())); - auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); - r1map.insert(oev.branchArgument[1], nlv->argument()); + auto nlv = ntheta->AddLoopVar(ev.branchArgument[1]); + r1map.insert(oev.branchArgument[1], nlv.pre); nlvs[oev.input] = nlv; } } @@ -222,11 +222,11 @@ invert(rvsdg::ThetaNode * otheta) osubregion1->copy(ntheta->subregion(), r1map, false, false); /* adjust values in substitution map for condition node copying */ - for (const auto & olv : *otheta) + for (const auto & olv : otheta->GetLoopVars()) { - auto output = to_structural_output(olv->result()->origin()); + auto output = to_structural_output(olv.post->origin()); auto substitute = r1map.lookup(osubregion1->result(output->index())->origin()); - r1map.insert(olv->argument(), substitute); + r1map.insert(olv.pre, substitute); } /* copy condition nodes */ @@ -234,24 +234,24 @@ invert(rvsdg::ThetaNode * otheta) auto predicate = r1map.lookup(ogamma->predicate()->origin()); /* redirect results of loop variables and adjust substitution map for exit region copying */ - for (const auto & olv : *otheta) + for (const auto & olv : otheta->GetLoopVars()) { - auto output = to_structural_output(olv->result()->origin()); + auto output = to_structural_output(olv.post->origin()); auto substitute = r1map.lookup(osubregion1->result(output->index())->origin()); - nlvs[olv->input()]->result()->divert_to(substitute); - r1map.insert(olv->result()->origin(), nlvs[olv->input()]); + nlvs[olv.input].post->divert_to(substitute); + r1map.insert(olv.post->origin(), nlvs[olv.input].output); } for (const auto & oev : ogamma->GetEntryVars()) { if (auto argument = to_argument(oev.input->origin())) { - r1map.insert(oev.branchArgument[0], nlvs[argument->input()]); + r1map.insert(oev.branchArgument[0], nlvs[argument->input()].output); } else { auto substitute = r1map.lookup(oev.input->origin()); - nlvs[oev.input]->result()->divert_to(substitute); - r1map.insert(oev.branchArgument[0], nlvs[oev.input]); + nlvs[oev.input].post->divert_to(substitute); + r1map.insert(oev.branchArgument[0], nlvs[oev.input].output); } } @@ -261,26 +261,26 @@ invert(rvsdg::ThetaNode * otheta) osubregion0->copy(ngamma->subregion(1), r1map, false, false); /* adjust values in substitution map for exit variable creation */ - for (const auto & olv : *otheta) + for (const auto & olv : otheta->GetLoopVars()) { - auto output = to_structural_output(olv->result()->origin()); + auto output = to_structural_output(olv.post->origin()); auto substitute = r1map.lookup(osubregion0->result(output->index())->origin()); - r1map.insert(olv->result()->origin(), substitute); + r1map.insert(olv.post->origin(), substitute); } } /* add exit variables to new gamma */ - for (const auto & olv : *otheta) + for (const auto & olv : otheta->GetLoopVars()) { - auto o0 = r0map.lookup(olv->result()->origin()); - auto o1 = r1map.lookup(olv->result()->origin()); - auto ex = ngamma->AddExitVar({ o0, o1 }).output; - smap.insert(olv, ex); + auto o0 = r0map.lookup(olv.post->origin()); + auto o1 = r1map.lookup(olv.post->origin()); + auto ex = ngamma->AddExitVar({ o0, o1 }); + smap.insert(olv.output, ex.output); } /* replace outputs */ - for (const auto & olv : *otheta) - olv->divert_users(smap.lookup(olv)); + for (const auto & olv : otheta->GetLoopVars()) + olv.output->divert_users(smap.lookup(olv.output)); remove(otheta); } diff --git a/jlm/llvm/opt/push.cpp b/jlm/llvm/opt/push.cpp index aedb52ac8..abba1f5ec 100644 --- a/jlm/llvm/opt/push.cpp +++ b/jlm/llvm/opt/push.cpp @@ -123,7 +123,7 @@ copy_from_gamma(rvsdg::Node * node, size_t r) return arguments; } -static std::vector +static std::vector copy_from_theta(rvsdg::Node * node) { JLM_ASSERT(is(node->region()->node())); @@ -140,13 +140,13 @@ copy_from_theta(rvsdg::Node * node) operands.push_back(argument->input()->origin()); } - std::vector arguments; + std::vector arguments; auto copy = node->copy(target, operands); for (size_t n = 0; n < copy->noutputs(); n++) { - auto lv = theta->add_loopvar(copy->output(n)); - node->output(n)->divert_users(lv->argument()); - arguments.push_back(lv->argument()); + auto lv = theta->AddLoopVar(copy->output(n)); + node->output(n)->divert_users(lv.pre); + arguments.push_back(lv.pre); } return arguments; @@ -210,9 +210,7 @@ push(rvsdg::GammaNode * gamma) } static bool -is_theta_invariant( - const rvsdg::Node * node, - const std::unordered_set & invariants) +is_theta_invariant(const rvsdg::Node * node, const std::unordered_set & invariants) { JLM_ASSERT(is(node->region()->node())); JLM_ASSERT(node->depth() == 0); @@ -241,18 +239,18 @@ push_top(rvsdg::ThetaNode * theta) } /* collect loop invariant arguments */ - std::unordered_set invariants; - for (const auto & lv : *theta) + std::unordered_set invariants; + for (const auto & lv : theta->GetLoopVars()) { - if (lv->result()->origin() == lv->argument()) - invariants.insert(lv->argument()); + if (lv.post->origin() == lv.pre) + invariants.insert(lv.pre); } /* initialize worklist */ worklist wl; - for (const auto & lv : *theta) + for (const auto & lv : theta->GetLoopVars()) { - auto argument = lv->argument(); + auto argument = lv.pre; for (const auto & user : *argument) { auto tmp = jlm::rvsdg::input::GetNode(*user); @@ -334,8 +332,8 @@ pushout_store(rvsdg::Node * storenode) auto ovalue = storenode->input(1)->origin(); /* insert new value for store */ - auto nvalue = theta->add_loopvar(UndefValueOperation::Create(*theta->region(), ovalue->Type())); - nvalue->result()->divert_to(ovalue); + auto nvalue = theta->AddLoopVar(UndefValueOperation::Create(*theta->region(), ovalue->Type())); + nvalue.post->divert_to(ovalue); /* collect store operands */ std::vector states; @@ -349,7 +347,8 @@ pushout_store(rvsdg::Node * storenode) } /* create new store and redirect theta output users */ - auto nstates = StoreNonVolatileNode::Create(address, nvalue, states, storeop->GetAlignment()); + auto nstates = + StoreNonVolatileNode::Create(address, nvalue.output, states, storeop->GetAlignment()); for (size_t n = 0; n < states.size(); n++) { std::unordered_set users; @@ -369,9 +368,9 @@ pushout_store(rvsdg::Node * storenode) void push_bottom(rvsdg::ThetaNode * theta) { - for (const auto & lv : *theta) + for (const auto & lv : theta->GetLoopVars()) { - auto storenode = jlm::rvsdg::output::GetNode(*lv->result()->origin()); + auto storenode = jlm::rvsdg::output::GetNode(*lv.post->origin()); if (jlm::rvsdg::is(storenode) && is_movable_store(storenode)) { pushout_store(storenode); diff --git a/jlm/llvm/opt/unroll.cpp b/jlm/llvm/opt/unroll.cpp index f5a7b62c9..32bb7ec63 100644 --- a/jlm/llvm/opt/unroll.cpp +++ b/jlm/llvm/opt/unroll.cpp @@ -65,14 +65,15 @@ is_theta_invariant(const jlm::rvsdg::output * output) if (jlm::rvsdg::is(jlm::rvsdg::output::GetNode(*output))) return true; - auto argument = dynamic_cast(output); - if (!argument) + auto theta = rvsdg::TryGetRegionParentNode(*output); + if (!theta) return false; - return is_invariant(static_cast(argument->input())); + auto loopVar = theta->MapPreLoopVar(*output); + return ThetaLoopVarIsInvariant(loopVar); } -static rvsdg::RegionArgument * +static rvsdg::output * push_from_theta(jlm::rvsdg::output * output) { auto argument = dynamic_cast(output); @@ -85,10 +86,10 @@ push_from_theta(jlm::rvsdg::output * output) auto theta = static_cast(tmp->region()->node()); auto node = tmp->copy(theta->region(), {}); - auto lv = theta->add_loopvar(node->output(0)); - output->divert_users(lv->argument()); + auto lv = theta->AddLoopVar(node->output(0)); + output->divert_users(lv.pre); - return lv->argument(); + return lv.pre; } static bool @@ -99,12 +100,13 @@ is_idv(jlm::rvsdg::input * input) auto node = rvsdg::input::GetNode(*input); JLM_ASSERT(is(node) || is(node)); - auto a = dynamic_cast(input->origin()); - if (!a) - return false; + if (auto theta = rvsdg::TryGetRegionParentNode(*input->origin())) + { + auto loopvar = theta->MapPreLoopVar(*input->origin()); + return jlm::rvsdg::output::GetNode(*loopvar.post->origin()) == node; + } - auto tinput = static_cast(a->input()); - return jlm::rvsdg::output::GetNode(*tinput->result()->origin()) == node; + return false; } std::unique_ptr @@ -184,8 +186,8 @@ unroll_body( { theta->subregion()->copy(target, smap, false, false); rvsdg::SubstitutionMap tmap; - for (const auto & olv : *theta) - tmap.insert(olv->argument(), smap.lookup(olv->result()->origin())); + for (const auto & olv : theta->GetLoopVars()) + tmap.insert(olv.pre, smap.lookup(olv.post->origin())); smap = tmap; } theta->subregion()->copy(target, smap, false, false); @@ -200,13 +202,13 @@ static void copy_body_and_unroll(const rvsdg::ThetaNode * theta, size_t factor) { rvsdg::SubstitutionMap smap; - for (const auto & olv : *theta) - smap.insert(olv->argument(), olv->input()->origin()); + for (const auto & olv : theta->GetLoopVars()) + smap.insert(olv.pre, olv.input->origin()); unroll_body(theta, theta->region(), smap, factor); - for (const auto & olv : *theta) - olv->divert_users(smap.lookup(olv->result()->origin())); + for (const auto & olv : theta->GetLoopVars()) + olv.output->divert_users(smap.lookup(olv.post->origin())); } /* @@ -219,20 +221,24 @@ unroll_theta(const unrollinfo & ui, rvsdg::SubstitutionMap & smap, size_t factor auto remainder = ui.remainder(factor); auto unrolled_theta = rvsdg::ThetaNode::create(theta->region()); - for (const auto & olv : *theta) + auto oldLoopVars = theta->GetLoopVars(); + for (const auto & olv : oldLoopVars) { - auto nlv = unrolled_theta->add_loopvar(olv->input()->origin()); - smap.insert(olv->argument(), nlv->argument()); + auto nlv = unrolled_theta->AddLoopVar(olv.input->origin()); + smap.insert(olv.pre, nlv.pre); } unroll_body(theta, unrolled_theta->subregion(), smap, factor); unrolled_theta->set_predicate(smap.lookup(theta->predicate()->origin())); - for (auto olv = theta->begin(), nlv = unrolled_theta->begin(); olv != theta->end(); olv++, nlv++) + auto newLoopVars = unrolled_theta->GetLoopVars(); + for (size_t i = 0; i < oldLoopVars.size(); ++i) { - auto origin = smap.lookup((*olv)->result()->origin()); - (*nlv)->result()->divert_to(origin); - smap.insert(*olv, *nlv); + const auto & olv = oldLoopVars[i]; + const auto & nlv = newLoopVars[i]; + auto origin = smap.lookup(olv.post->origin()); + nlv.post->divert_to(origin); + smap.insert(olv.output, nlv.output); } if (remainder != 0) @@ -270,8 +276,8 @@ add_remainder(const unrollinfo & ui, rvsdg::SubstitutionMap & smap, size_t facto We only need to redirect the users of the outputs of the old theta node to the outputs of the new theta node, as there are no residual iterations. */ - for (const auto & olv : *theta) - olv->divert_users(smap.lookup(olv)); + for (const auto & olv : theta->GetLoopVars()) + olv.output->divert_users(smap.lookup(olv.output)); return remove(theta); } @@ -280,8 +286,8 @@ add_remainder(const unrollinfo & ui, rvsdg::SubstitutionMap & smap, size_t facto redirecting the inputs of the old theta to the outputs of the unrolled theta. */ - for (const auto & olv : *theta) - olv->input()->divert_to(smap.lookup(olv)); + for (const auto & olv : theta->GetLoopVars()) + olv.input->divert_to(smap.lookup(olv.output)); if (remainder == 1) { @@ -332,8 +338,8 @@ create_unrolled_gamma_predicate(const unrollinfo & ui, size_t factor) { auto region = ui.theta()->region(); auto nbits = ui.nbits(); - auto step = ui.step()->input()->origin(); - auto end = ui.end()->input()->origin(); + auto step = ui.theta()->MapPreLoopVar(*ui.step()).input->origin(); + auto end = ui.theta()->MapPreLoopVar(*ui.end()).input->origin(); auto uf = jlm::rvsdg::create_bitconstant(region, nbits, factor); auto mul = jlm::rvsdg::bitmul_op::create(nbits, step, uf); @@ -380,8 +386,8 @@ static jlm::rvsdg::output * create_residual_gamma_predicate(const rvsdg::SubstitutionMap & smap, const unrollinfo & ui) { auto region = ui.theta()->region(); - auto idv = smap.lookup(ui.theta()->output(ui.idv()->input()->index())); - auto end = ui.end()->input()->origin(); + auto idv = smap.lookup(ui.theta()->MapPreLoopVar(*ui.idv()).output); + auto end = ui.theta()->MapPreLoopVar(*ui.end()).input->origin(); /* FIXME: order of operands */ auto cmp = jlm::rvsdg::SimpleNode::create_normalized(region, ui.cmpoperation(), { idv, end })[0]; @@ -403,29 +409,34 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); rvsdg::SubstitutionMap rmap[2]; - for (const auto & olv : *otheta) + for (const auto & olv : otheta->GetLoopVars()) { - auto ev = ngamma->AddEntryVar(olv->input()->origin()); - auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); - rmap[0].insert(olv, ev.branchArgument[0]); - rmap[1].insert(olv->argument(), nlv->argument()); + auto ev = ngamma->AddEntryVar(olv.input->origin()); + auto nlv = ntheta->AddLoopVar(ev.branchArgument[1]); + rmap[0].insert(olv.output, ev.branchArgument[0]); + rmap[1].insert(olv.pre, nlv.pre); } unroll_body(otheta, ntheta->subregion(), rmap[1], factor); pred = create_unrolled_theta_predicate(ntheta->subregion(), rmap[1], ui, factor); ntheta->set_predicate(pred); - for (auto olv = otheta->begin(), nlv = ntheta->begin(); olv != otheta->end(); olv++, nlv++) + auto oldLoopVars = otheta->GetLoopVars(); + auto newLoopVars = ntheta->GetLoopVars(); + for (std::size_t n = 0; n < oldLoopVars.size(); ++n) { - auto origin = rmap[1].lookup((*olv)->result()->origin()); - (*nlv)->result()->divert_to(origin); - rmap[1].insert(*olv, *nlv); + auto & olv = oldLoopVars[n]; + auto & nlv = newLoopVars[n]; + auto origin = rmap[1].lookup(olv.post->origin()); + nlv.post->divert_to(origin); + rmap[1].insert(olv.output, nlv.output); } - for (const auto & olv : *otheta) + for (const auto & olv : oldLoopVars) { - auto xv = ngamma->AddExitVar({ rmap[0].lookup(olv), rmap[1].lookup(olv) }).output; - smap.insert(olv, xv); + auto xv = + ngamma->AddExitVar({ rmap[0].lookup(olv.output), rmap[1].lookup(olv.output) }).output; + smap.insert(olv.output, xv); } } @@ -436,27 +447,32 @@ unroll_unknown_theta(const unrollinfo & ui, size_t factor) auto ntheta = rvsdg::ThetaNode::create(ngamma->subregion(1)); rvsdg::SubstitutionMap rmap[2]; - for (const auto & olv : *otheta) + auto oldLoopVars = otheta->GetLoopVars(); + for (const auto & olv : oldLoopVars) { - auto ev = ngamma->AddEntryVar(smap.lookup(olv)); - auto nlv = ntheta->add_loopvar(ev.branchArgument[1]); - rmap[0].insert(olv, ev.branchArgument[0]); - rmap[1].insert(olv->argument(), nlv->argument()); + auto ev = ngamma->AddEntryVar(smap.lookup(olv.output)); + auto nlv = ntheta->AddLoopVar(ev.branchArgument[1]); + rmap[0].insert(olv.output, ev.branchArgument[0]); + rmap[1].insert(olv.pre, nlv.pre); } otheta->subregion()->copy(ntheta->subregion(), rmap[1], false, false); ntheta->set_predicate(rmap[1].lookup(otheta->predicate()->origin())); - for (auto olv = otheta->begin(), nlv = ntheta->begin(); olv != otheta->end(); olv++, nlv++) + auto newLoopVars = ntheta->GetLoopVars(); + + for (std::size_t n = 0; n < oldLoopVars.size(); ++n) { - auto origin = rmap[1].lookup((*olv)->result()->origin()); - (*nlv)->result()->divert_to(origin); - auto xv = ngamma->AddExitVar({ rmap[0].lookup(*olv), *nlv }).output; - smap.insert(*olv, xv); + auto & olv = oldLoopVars[n]; + auto & nlv = newLoopVars[n]; + auto origin = rmap[1].lookup(olv.post->origin()); + nlv.post->divert_to(origin); + auto xv = ngamma->AddExitVar({ rmap[0].lookup(olv.output), nlv.output }).output; + smap.insert(olv.output, xv); } } - for (const auto & olv : *otheta) - olv->divert_users(smap.lookup(olv)); + for (const auto & olv : otheta->GetLoopVars()) + olv.output->divert_users(smap.lookup(olv.output)); remove(otheta); } diff --git a/jlm/llvm/opt/unroll.hpp b/jlm/llvm/opt/unroll.hpp index 5550743ba..7fa5d3244 100644 --- a/jlm/llvm/opt/unroll.hpp +++ b/jlm/llvm/opt/unroll.hpp @@ -53,9 +53,9 @@ class unrollinfo final inline unrollinfo( rvsdg::Node * cmpnode, rvsdg::Node * armnode, - rvsdg::RegionArgument * idv, - rvsdg::RegionArgument * step, - rvsdg::RegionArgument * end) + rvsdg::output * idv, + rvsdg::output * step, + rvsdg::output * end) : end_(end), step_(step), cmpnode_(cmpnode), @@ -132,7 +132,7 @@ class unrollinfo final return *static_cast(&armnode()->GetOperation()); } - inline rvsdg::RegionArgument * + inline rvsdg::output * idv() const noexcept { return idv_; @@ -141,7 +141,7 @@ class unrollinfo final inline jlm::rvsdg::output * init() const noexcept { - return idv()->input()->origin(); + return theta()->MapPreLoopVar(*idv()).input->origin(); } inline const jlm::rvsdg::bitvalue_repr * @@ -150,7 +150,7 @@ class unrollinfo final return value(init()); } - inline rvsdg::RegionArgument * + inline rvsdg::output * step() const noexcept { return step_; @@ -162,7 +162,7 @@ class unrollinfo final return value(step()); } - inline rvsdg::RegionArgument * + inline rvsdg::output * end() const noexcept { return end_; @@ -224,11 +224,11 @@ class unrollinfo final return &static_cast(&p->GetOperation())->value(); } - rvsdg::RegionArgument * end_; - rvsdg::RegionArgument * step_; + rvsdg::output * end_; + rvsdg::output * step_; rvsdg::Node * cmpnode_; rvsdg::Node * armnode_; - rvsdg::RegionArgument * idv_; + rvsdg::output * idv_; }; /** diff --git a/jlm/mlir/frontend/MlirToJlmConverter.cpp b/jlm/mlir/frontend/MlirToJlmConverter.cpp index a7005e0f2..4306968ad 100644 --- a/jlm/mlir/frontend/MlirToJlmConverter.cpp +++ b/jlm/mlir/frontend/MlirToJlmConverter.cpp @@ -367,7 +367,7 @@ MlirToJlmConverter::ConvertOperation( // Add loop vars to the theta node for (size_t i = 0; i < inputs.size(); i++) { - rvsdgThetaNode->add_loopvar(inputs[i]); + rvsdgThetaNode->AddLoopVar(inputs[i]); } auto regionResults = ConvertRegion(mlirThetaNode.getRegion(), *rvsdgThetaNode->subregion()); diff --git a/jlm/rvsdg/node.cpp b/jlm/rvsdg/node.cpp index 05e43c70a..4b6be734d 100644 --- a/jlm/rvsdg/node.cpp +++ b/jlm/rvsdg/node.cpp @@ -340,16 +340,22 @@ producer(const jlm::rvsdg::output * output) noexcept if (auto node = output::GetNode(*output)) return node; + if (auto theta = TryGetRegionParentNode(*output)) + { + auto loopvar = theta->MapPreLoopVar(*output); + if (loopvar.post->origin() != output) + { + return nullptr; + } + return producer(loopvar.input->origin()); + } + JLM_ASSERT(dynamic_cast(output)); auto argument = static_cast(output); if (!argument->input()) return nullptr; - if (is(argument->region()->node()) - && (argument->region()->result(argument->index() + 1)->origin() != argument)) - return nullptr; - return producer(argument->input()->origin()); } diff --git a/jlm/rvsdg/theta.cpp b/jlm/rvsdg/theta.cpp index d9001654c..1f665dcdf 100644 --- a/jlm/rvsdg/theta.cpp +++ b/jlm/rvsdg/theta.cpp @@ -24,122 +24,230 @@ ThetaOperation::copy() const return std::make_unique(*this); } +ThetaNode::~ThetaNode() noexcept = default; + ThetaNode::ThetaNode(rvsdg::Region & parent) : StructuralNode(ThetaOperation(), &parent, 1) { auto predicate = control_false(subregion()); - ThetaPredicateResult::Create(*predicate); + RegionResult::Create(*subregion(), *predicate, nullptr, ControlType::Create(2)); } -ThetaInput::~ThetaInput() noexcept +ThetaNode::LoopVar +ThetaNode::AddLoopVar(rvsdg::output * origin) { - if (output_) - output_->input_ = nullptr; -} + Node::add_input(std::make_unique(this, origin, origin->Type())); + Node::add_output(std::make_unique(this, origin->Type())); -/* theta output */ + auto input = ThetaNode::input(ninputs() - 1); + auto output = ThetaNode::output(noutputs() - 1); + auto & thetaArgument = RegionArgument::Create(*subregion(), input, input->Type()); + auto & thetaResult = RegionResult::Create(*subregion(), thetaArgument, output, output->Type()); -ThetaOutput::~ThetaOutput() noexcept -{ - if (input_) - input_->output_ = nullptr; + return LoopVar{ input, &thetaArgument, &thetaResult, output }; } -ThetaArgument::~ThetaArgument() noexcept = default; - -ThetaArgument & -ThetaArgument::Copy(rvsdg::Region & region, StructuralInput * input) +ThetaNode * +ThetaNode::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const { - auto thetaInput = util::AssertedCast(input); - return ThetaArgument::Create(region, *thetaInput); -} + auto nf = graph()->GetNodeNormalForm(typeid(Operation)); + nf->set_mutable(false); -ThetaResult::~ThetaResult() noexcept = default; + rvsdg::SubstitutionMap rmap; + auto theta = create(region); -ThetaResult & -ThetaResult::Copy(rvsdg::output & origin, StructuralOutput * output) -{ - auto thetaOutput = util::AssertedCast(output); - return ThetaResult::Create(origin, *thetaOutput); -} + /* add loop variables */ + std::vector oldLoopVars = GetLoopVars(); + std::vector newLoopVars; + for (auto olv : oldLoopVars) + { + auto nlv = theta->AddLoopVar(smap.lookup(olv.input->origin())); + newLoopVars.push_back(nlv); + rmap.insert(olv.pre, nlv.pre); + } -ThetaPredicateResult::~ThetaPredicateResult() noexcept = default; + /* copy subregion */ + subregion()->copy(theta->subregion(), rmap, false, false); + theta->set_predicate(rmap.lookup(predicate()->origin())); -ThetaPredicateResult & -ThetaPredicateResult::Copy(rvsdg::output & origin, StructuralOutput * output) -{ - JLM_ASSERT(output == nullptr); - return ThetaPredicateResult::Create(origin); -} + /* redirect loop variables */ + for (size_t i = 0; i < oldLoopVars.size(); ++i) + { + newLoopVars[i].post->divert_to(rmap.lookup(oldLoopVars[i].post->origin())); + smap.insert(oldLoopVars[i].output, newLoopVars[i].output); + } -/* theta node */ + nf->set_mutable(true); + return theta; +} -ThetaNode::~ThetaNode() noexcept = default; +[[nodiscard]] ThetaNode::LoopVar +ThetaNode::MapInputLoopVar(const rvsdg::input & input) const +{ + JLM_ASSERT(rvsdg::TryGetOwnerNode(input) == this); + auto peer = MapInputToOutputIndex(input.index()); + return LoopVar{ const_cast(&input), + subregion()->argument(input.index()), + peer ? subregion()->result(*peer + 1) : nullptr, + peer ? output(*peer) : nullptr }; +} -const ThetaNode::loopvar_iterator & -ThetaNode::loopvar_iterator::operator++() noexcept +[[nodiscard]] ThetaNode::LoopVar +ThetaNode::MapPreLoopVar(const rvsdg::output & argument) const { - if (output_ == nullptr) - return *this; + JLM_ASSERT(rvsdg::TryGetRegionParentNode(argument) == this); + auto peer = MapInputToOutputIndex(argument.index()); + return LoopVar{ input(argument.index()), + const_cast(&argument), + peer ? subregion()->result(*peer + 1) : nullptr, + peer ? output(*peer) : nullptr }; +} - auto node = output_->node(); - auto index = output_->index(); - if (index == node->noutputs() - 1) +[[nodiscard]] ThetaNode::LoopVar +ThetaNode::MapPostLoopVar(const rvsdg::input & result) const +{ + JLM_ASSERT(rvsdg::TryGetRegionParentNode(result) == this); + if (result.index() == 0) { - output_ = nullptr; - return *this; + // This is the loop continuation predicate. + // There is nothing sensible to return here. + throw std::logic_error("cannot map loop continuation predicate to loop variable"); } - - index++; - output_ = node->output(index); - return *this; + auto peer = MapOutputToInputIndex(result.index() - 1); + return LoopVar{ peer ? input(*peer) : nullptr, + peer ? subregion()->argument(*peer) : nullptr, + const_cast(&result), + output(result.index() - 1) }; } -ThetaOutput * -ThetaNode::add_loopvar(jlm::rvsdg::output * origin) +[[nodiscard]] ThetaNode::LoopVar +ThetaNode::MapOutputLoopVar(const rvsdg::output & output) const { - Node::add_input(std::make_unique(this, origin, origin->Type())); - Node::add_output(std::make_unique(this, origin->Type())); - - auto input = ThetaNode::input(ninputs() - 1); - auto output = ThetaNode::output(noutputs() - 1); - input->output_ = output; - output->input_ = input; + JLM_ASSERT(rvsdg::TryGetOwnerNode(output) == this); + auto peer = MapOutputToInputIndex(output.index()); + return LoopVar{ peer ? input(*peer) : nullptr, + peer ? subregion()->argument(*peer) : nullptr, + subregion()->result(output.index() + 1), + const_cast(&output) }; +} - auto & thetaArgument = ThetaArgument::Create(*subregion(), *input); - ThetaResult::Create(thetaArgument, *output); - return output; +[[nodiscard]] std::vector +ThetaNode::GetLoopVars() const +{ + std::vector loopvars; + for (size_t input_index = 0; input_index < ninputs(); ++input_index) + { + // Check if there is a matching input/output -- if we are in + // the process of deleting a loop variable, inputs and outputs + // might be unmatched. + auto output_index = MapInputToOutputIndex(input_index); + if (output_index) + { + loopvars.push_back(LoopVar{ input(input_index), + subregion()->argument(input_index), + subregion()->result(*output_index + 1), + output(*output_index) }); + } + } + return loopvars; } -ThetaNode * -ThetaNode::copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const +std::optional +ThetaNode::MapInputToOutputIndex(std::size_t index) const noexcept { - auto nf = graph()->GetNodeNormalForm(typeid(Operation)); - nf->set_mutable(false); + std::size_t offset = 0; + for (std::size_t unmatched : unmatchedInputs) + { + if (unmatched == index) + { + return std::nullopt; + } + if (unmatched < index) + { + ++offset; + } + } - rvsdg::SubstitutionMap rmap; - auto theta = create(region); + index -= offset; + offset = 0; + for (std::size_t unmatched : unmatchedOutputs) + { + if (unmatched <= index) + { + ++offset; + } + } + return index + offset; +} - /* add loop variables */ - for (auto olv : *this) +std::optional +ThetaNode::MapOutputToInputIndex(std::size_t index) const noexcept +{ + std::size_t offset = 0; + for (std::size_t unmatched : unmatchedOutputs) { - auto nlv = theta->add_loopvar(smap.lookup(olv->input()->origin())); - rmap.insert(olv->argument(), nlv->argument()); + if (unmatched == index) + { + return std::nullopt; + } + if (unmatched < index) + { + ++offset; + } } - /* copy subregion */ - subregion()->copy(theta->subregion(), rmap, false, false); - theta->set_predicate(rmap.lookup(predicate()->origin())); + index -= offset; + offset = 0; + for (std::size_t unmatched : unmatchedInputs) + { + if (unmatched <= index) + { + ++offset; + } + } + return index + offset; +} - /* redirect loop variables */ - for (auto olv = begin(), nlv = theta->begin(); olv != end(); olv++, nlv++) +void +ThetaNode::MarkInputIndexErased(std::size_t index) noexcept +{ + if (auto peer = MapInputToOutputIndex(index)) { - (*nlv)->result()->divert_to(rmap.lookup((*olv)->result()->origin())); - smap.insert(olv.output(), nlv.output()); + unmatchedOutputs.push_back(*peer); } + else + { + auto i = std::remove(unmatchedInputs.begin(), unmatchedInputs.end(), index); + unmatchedInputs.erase(i, unmatchedInputs.end()); + } + for (auto & unmatched : unmatchedInputs) + { + if (unmatched > index) + { + unmatched -= 1; + } + } +} - nf->set_mutable(true); - return theta; +void +ThetaNode::MarkOutputIndexErased(std::size_t index) noexcept +{ + if (auto peer = MapOutputToInputIndex(index)) + { + unmatchedInputs.push_back(*peer); + } + else + { + auto i = std::remove(unmatchedOutputs.begin(), unmatchedOutputs.end(), index); + unmatchedOutputs.erase(i, unmatchedOutputs.end()); + } + for (auto & unmatched : unmatchedOutputs) + { + if (unmatched > index) + { + unmatched -= 1; + } + } } } diff --git a/jlm/rvsdg/theta.hpp b/jlm/rvsdg/theta.hpp index 4265e052f..fb67bada5 100644 --- a/jlm/rvsdg/theta.hpp +++ b/jlm/rvsdg/theta.hpp @@ -12,6 +12,8 @@ #include #include +#include + namespace jlm::rvsdg { @@ -27,70 +29,43 @@ class ThetaOperation final : public StructuralOperation copy() const override; }; -class ThetaInput; -class ThetaOutput; - class ThetaNode final : public StructuralNode { public: - class loopvar_iterator - { - public: - constexpr loopvar_iterator(ThetaOutput * output) noexcept - : output_(output) - {} - - const loopvar_iterator & - operator++() noexcept; - - inline const loopvar_iterator - operator++(int) noexcept - { - loopvar_iterator it(*this); - ++(*this); - return it; - } - - inline bool - operator==(const loopvar_iterator & other) const noexcept - { - return output_ == other.output_; - } - - inline bool - operator!=(const loopvar_iterator & other) const noexcept - { - return !(*this == other); - } - - ThetaOutput * - operator*() noexcept - { - return output_; - } - - ThetaOutput ** - operator->() noexcept - { - return &output_; - } - - ThetaOutput * - output() const noexcept - { - return output_; - } - - private: - ThetaOutput * output_; - }; - ~ThetaNode() noexcept override; private: explicit ThetaNode(rvsdg::Region & parent); public: + /** + * \brief Description of a loop-carried variable. + * + * A loop-carried variable from the POV of a theta node has + * multiple representations (entry, pre-iteration, + * post-iteration, exit). This structure bundles + * all representations of a single loop-carried variable. + */ + struct LoopVar + { + /** + * \brief Variable at loop entry (input to theta). + */ + rvsdg::input * input; + /** + * \brief Variable before iteration (input argument to subregion). + */ + rvsdg::output * pre; + /** + * \brief Variable after iteration (output result from subregion). + */ + rvsdg::input * post; + /** + * \brief Variable at loop exit (output of theta). + */ + rvsdg::output * output; + }; + static ThetaNode * create(rvsdg::Region * parent) { @@ -121,28 +96,6 @@ class ThetaNode final : public StructuralNode remove(node); } - inline size_t - nloopvars() const noexcept - { - JLM_ASSERT(ninputs() == noutputs()); - return ninputs(); - } - - inline ThetaNode::loopvar_iterator - begin() const - { - if (ninputs() == 0) - return loopvar_iterator(nullptr); - - return loopvar_iterator(output(0)); - } - - inline ThetaNode::loopvar_iterator - end() const - { - return loopvar_iterator(nullptr); - } - /** * Remove theta outputs and their respective results. * @@ -161,7 +114,7 @@ class ThetaNode final : public StructuralNode * \see ThetaOutput#IsDead() */ template - util::HashSet + util::HashSet RemoveThetaOutputsWhere(const F & match); /** @@ -177,10 +130,10 @@ class ThetaNode final : public StructuralNode * \see RemoveThetaOutputsWhere() * \see ThetaOutput#IsDead() */ - util::HashSet + util::HashSet PruneThetaOutputs() { - auto match = [](const ThetaOutput &) + auto match = [](const rvsdg::output &) { return true; }; @@ -194,7 +147,7 @@ class ThetaNode final : public StructuralNode * An input must match the condition specified by \p match and its respective argument must be * dead. * - * @tparam F A type that supports the function call operator: bool operator(const ThetaInput&) + * @tparam F A type that supports the function call operator: bool operator(const jlm::input&) * @param match Defines the condition of the elements to remove. * @return The outputs corresponding to the removed outputs. * @@ -207,7 +160,7 @@ class ThetaNode final : public StructuralNode * \see RegionArgument#IsDead() */ template - util::HashSet + util::HashSet RemoveThetaInputsWhere(const F & match); /** @@ -223,10 +176,10 @@ class ThetaNode final : public StructuralNode * \see RemoveThetaInputsWhere() * \see RegionArgument#IsDead() */ - util::HashSet + util::HashSet PruneThetaInputs() { - auto match = [](const ThetaInput &) + auto match = [](const rvsdg::input &) { return true; }; @@ -234,230 +187,161 @@ class ThetaNode final : public StructuralNode return RemoveThetaInputsWhere(match); } - ThetaInput * - input(size_t index) const noexcept; - - ThetaOutput * - output(size_t index) const noexcept; - - ThetaOutput * - add_loopvar(jlm::rvsdg::output * origin); + /** + * \brief Creates a new loop-carried variable. + * + * \param origin + * Input value at start of loop. + * + * \returns + * Loop variable description. + * + * Creates a new variable that is routed through the loop. The variable + * is set up such that the post-iteration value is the same as the + * pre-iteration value (i.e. the value remains unchanged through + * the loop). Caller can redirect edges inside the loop to turn this + * into a variable changed by the loop + */ + LoopVar + AddLoopVar(rvsdg::output * origin); virtual ThetaNode * copy(rvsdg::Region * region, rvsdg::SubstitutionMap & smap) const override; -}; - -class ThetaInput final : public StructuralInput -{ - friend ThetaNode; - friend ThetaOutput; - -public: - ~ThetaInput() noexcept override; - - ThetaInput(ThetaNode * node, jlm::rvsdg::output * origin, std::shared_ptr type) - : StructuralInput(node, origin, std::move(type)), - output_(nullptr) - {} - - ThetaNode * - node() const noexcept - { - return static_cast(StructuralInput::node()); - } - - ThetaOutput * - output() const noexcept - { - return output_; - } - - inline RegionArgument * - argument() const noexcept - { - JLM_ASSERT(arguments.size() == 1); - return arguments.first(); - } - - [[nodiscard]] inline RegionResult * - result() const noexcept; - -private: - ThetaOutput * output_; -}; - -static inline bool -is_invariant(const ThetaInput * input) noexcept -{ - return input->result()->origin() == input->argument(); -} - -class ThetaOutput final : public StructuralOutput -{ - friend ThetaNode; - friend ThetaInput; - -public: - ~ThetaOutput() noexcept override; - - ThetaOutput(ThetaNode * node, const std::shared_ptr type) - : StructuralOutput(node, std::move(type)), - input_(nullptr) - {} - - ThetaNode * - node() const noexcept - { - return static_cast(StructuralOutput::node()); - } - - [[nodiscard]] ThetaInput * - input() const noexcept - { - return input_; - } - inline RegionArgument * - argument() const noexcept - { - return input_->argument(); - } - - [[nodiscard]] RegionResult * - result() const noexcept - { - JLM_ASSERT(results.size() == 1); - return results.first(); - } - -private: - ThetaInput * input_; -}; - -/** - * Represents a region argument in a theta subregion. - */ -class ThetaArgument final : public RegionArgument -{ - friend ThetaNode; - -public: - ~ThetaArgument() noexcept override; - - ThetaArgument & - Copy(rvsdg::Region & region, StructuralInput * input) override; - -private: - ThetaArgument(rvsdg::Region & region, ThetaInput & input) - : RegionArgument(®ion, &input, input.Type()) - { - JLM_ASSERT(is(region.node())); - } - - static ThetaArgument & - Create(rvsdg::Region & region, ThetaInput & input) - { - auto thetaArgument = new ThetaArgument(region, input); - region.append_argument(thetaArgument); - return *thetaArgument; - } -}; - -/** - * Represents a region result in a theta subregion. - */ -class ThetaResult final : public RegionResult -{ - friend ThetaNode; - -public: - ~ThetaResult() noexcept override; - - ThetaResult & - Copy(rvsdg::output & origin, StructuralOutput * output) override; - -private: - ThetaResult(rvsdg::output & origin, ThetaOutput & thetaOutput) - : RegionResult(origin.region(), &origin, &thetaOutput, origin.Type()) - { - JLM_ASSERT(is(origin.region()->node())); - } + /** + * \brief Maps variable at entry to full varibale description. + * + * \param input + * Input to the theta node. + * + * \returns + * The loop variable description. + * + * \pre + * \p input must be an input to this node. + * + * Returns the full description of the loop variable corresponding + * to this entry into the theta node. + */ + [[nodiscard]] LoopVar + MapInputLoopVar(const rvsdg::input & input) const; - static ThetaResult & - Create(rvsdg::output & origin, ThetaOutput & thetaOutput) - { - auto thetaResult = new ThetaResult(origin, thetaOutput); - origin.region()->append_result(thetaResult); - return *thetaResult; - } -}; + /** + * \brief Maps variable at start of loop iteration to full varibale description. + * + * \param argument + * Argument of theta region. + * + * \returns + * The loop variable description. + * + * \pre + * \p argument must be an argument to the subregion of this node. + * + * Returns the full description of the loop variable corresponding + * to this variable at the start of each loop iteration. + */ + [[nodiscard]] LoopVar + MapPreLoopVar(const rvsdg::output & argument) const; -/** - * Represents the predicate result of a theta subregion. - */ -class ThetaPredicateResult final : public RegionResult -{ - friend ThetaNode; + /** + * \brief Maps variable at end of loop iteration to full varibale description. + * + * \param result + * Result of theta region. + * + * \returns + * The loop variable description. + * + * \pre + * \p result must be a result to the subregion of this node. + * + * Returns the full description of the loop variable corresponding + * to this variable at the end of each loop iteration. + */ + [[nodiscard]] LoopVar + MapPostLoopVar(const rvsdg::input & result) const; -public: - ~ThetaPredicateResult() noexcept override; + /** + * \brief Maps variable at exit to full varibale description. + * + * \param output + * Output of this theta node + * + * \returns + * The loop variable description + * + * \pre + * \p output must be an output of this node + * + * Returns the full description of the loop variable corresponding + * to this loop exit value. + */ + [[nodiscard]] LoopVar + MapOutputLoopVar(const rvsdg::output & output) const; - ThetaPredicateResult & - Copy(rvsdg::output & origin, StructuralOutput * output) override; + /** + * \brief Returns all loop variables. + * + * \returns + * List of loop variable descriptions. + */ + [[nodiscard]] std::vector + GetLoopVars() const; private: - explicit ThetaPredicateResult(rvsdg::output & origin) - : RegionResult(origin.region(), &origin, nullptr, ControlType::Create(2)) - { - JLM_ASSERT(is(origin.region()->node())); - } - - static ThetaPredicateResult & - Create(rvsdg::output & origin) - { - auto thetaResult = new ThetaPredicateResult(origin); - origin.region()->append_result(thetaResult); - return *thetaResult; - } + // Calling RemoveThetaInputsWhere/RemoveThetaOutputsWhere can result + // in inputs (and pre-loop arguments) and outputs (and post-loop results) + // to become unmatched. In this case, the theta node itself has + // "invalid" shape until fixed properly. + // The indices of unmatched inputs/outputs are tracked here to + // detect this situation, and also to provide correct mapping. + // Computing the mapping is a bit fiddly as it requires adjusting + // indices accordingly, should seriously consider whether this + // is really necessary or things can rather be reformulated such that + // inputs/outputs are always consistent. + + std::optional + MapInputToOutputIndex(std::size_t index) const noexcept; + + std::optional + MapOutputToInputIndex(std::size_t index) const noexcept; + + void + MarkInputIndexErased(std::size_t index) noexcept; + + void + MarkOutputIndexErased(std::size_t index) noexcept; + + std::vector unmatchedInputs; + std::vector unmatchedOutputs; }; static inline bool -is_invariant(const ThetaOutput * output) noexcept +ThetaLoopVarIsInvariant(const ThetaNode::LoopVar & loopVar) noexcept { - return output->result()->origin() == output->argument(); + return loopVar.post->origin() == loopVar.pre; } /* theta node method definitions */ -inline ThetaInput * -ThetaNode::input(size_t index) const noexcept -{ - return static_cast(Node::input(index)); -} - -inline ThetaOutput * -ThetaNode::output(size_t index) const noexcept -{ - return static_cast(Node::output(index)); -} - template -util::HashSet +util::HashSet ThetaNode::RemoveThetaOutputsWhere(const F & match) { - util::HashSet deadInputs; + util::HashSet deadInputs; + auto loopvars = GetLoopVars(); // iterate backwards to avoid the invalidation of 'n' by RemoveOutput() - for (size_t n = noutputs() - 1; n != static_cast(-1); n--) + for (size_t n = loopvars.size(); n > 0; --n) { - auto & thetaOutput = *output(n); - auto & thetaResult = *thetaOutput.result(); - - if (thetaOutput.IsDead() && match(thetaOutput)) + auto & loopvar = loopvars[n - 1]; + if (loopvar.output->IsDead() && match(*loopvar.output)) { - deadInputs.Insert(thetaOutput.input()); - subregion()->RemoveResult(thetaResult.index()); - RemoveOutput(thetaOutput.index()); + deadInputs.Insert(loopvar.input); + subregion()->RemoveResult(loopvar.post->index()); + MarkOutputIndexErased(loopvar.output->index()); + RemoveOutput(loopvar.output->index()); } } @@ -465,21 +349,22 @@ ThetaNode::RemoveThetaOutputsWhere(const F & match) } template -util::HashSet +util::HashSet ThetaNode::RemoveThetaInputsWhere(const F & match) { - util::HashSet deadOutputs; + util::HashSet deadOutputs; // iterate backwards to avoid the invalidation of 'n' by RemoveInput() for (size_t n = ninputs() - 1; n != static_cast(-1); n--) { auto & thetaInput = *input(n); - auto & thetaArgument = *thetaInput.argument(); + auto loopvar = MapInputLoopVar(thetaInput); - if (thetaArgument.IsDead() && match(thetaInput)) + if (loopvar.pre->IsDead() && match(thetaInput)) { - deadOutputs.Insert(thetaInput.output()); - subregion()->RemoveArgument(thetaArgument.index()); + deadOutputs.Insert(loopvar.output); + subregion()->RemoveArgument(loopvar.pre->index()); + MarkInputIndexErased(thetaInput.index()); RemoveInput(thetaInput.index()); } } @@ -487,14 +372,6 @@ ThetaNode::RemoveThetaInputsWhere(const F & match) return deadOutputs; } -/* theta input method definitions */ - -[[nodiscard]] inline RegionResult * -ThetaInput::result() const noexcept -{ - return output_->result(); -} - } #endif diff --git a/tests/TestRvsdgs.cpp b/tests/TestRvsdgs.cpp index 7a94a119d..2e2e7ca81 100644 --- a/tests/TestRvsdgs.cpp +++ b/tests/TestRvsdgs.cpp @@ -1622,29 +1622,29 @@ ThetaTest::SetupRvsdg() auto thetanode = jlm::rvsdg::ThetaNode::create(fct->subregion()); - auto n = thetanode->add_loopvar(zero); - auto l = thetanode->add_loopvar(fct->GetFunctionArguments()[0]); - auto a = thetanode->add_loopvar(fct->GetFunctionArguments()[1]); - auto c = thetanode->add_loopvar(fct->GetFunctionArguments()[2]); - auto s = thetanode->add_loopvar(fct->GetFunctionArguments()[3]); + auto n = thetanode->AddLoopVar(zero); + auto l = thetanode->AddLoopVar(fct->GetFunctionArguments()[0]); + auto a = thetanode->AddLoopVar(fct->GetFunctionArguments()[1]); + auto c = thetanode->AddLoopVar(fct->GetFunctionArguments()[2]); + auto s = thetanode->AddLoopVar(fct->GetFunctionArguments()[3]); auto gepnode = GetElementPtrOperation::Create( - a->argument(), - { n->argument() }, + a.pre, + { n.pre }, jlm::rvsdg::bittype::Create(32), pointerType); - auto store = StoreNonVolatileNode::Create(gepnode, c->argument(), { s->argument() }, 4); + auto store = StoreNonVolatileNode::Create(gepnode, c.pre, { s.pre }, 4); auto one = jlm::rvsdg::create_bitconstant(thetanode->subregion(), 32, 1); - auto sum = jlm::rvsdg::bitadd_op::create(32, n->argument(), one); - auto cmp = jlm::rvsdg::bitult_op::create(32, sum, l->argument()); + auto sum = jlm::rvsdg::bitadd_op::create(32, n.pre, one); + auto cmp = jlm::rvsdg::bitult_op::create(32, sum, l.pre); auto predicate = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); - n->result()->divert_to(sum); - s->result()->divert_to(store[0]); + n.post->divert_to(sum); + s.post->divert_to(store[0]); thetanode->set_predicate(predicate); - fct->finalize({ s }); + fct->finalize({ s.output }); GraphExport::Create(*fct->output(), "f"); /* diff --git a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp index 67cdcb9f8..ffb18ee61 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/DeadNodeEliminationTests.cpp @@ -66,8 +66,8 @@ TestDeadLoopNodeOutput() auto loopNode = loop_node::create(lambdaNode->subregion()); jlm::rvsdg::output * buffer; - auto output0 = loopNode->add_loopvar(p, &buffer); - loopNode->add_loopvar(x); + auto output0 = loopNode->AddLoopVar(p, &buffer); + loopNode->AddLoopVar(x); loopNode->set_predicate(buffer); auto lambdaOutput = lambdaNode->finalize({ output0 }); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp index 24bc435bb..33cb51fe4 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryConverterTests.cpp @@ -274,33 +274,29 @@ TestThetaLoad() auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); auto thetaRegion = theta->subregion(); // Predicate - auto idv = theta->add_loopvar(lambda->GetFunctionArguments()[0]); - auto lvs = theta->add_loopvar(lambda->GetFunctionArguments()[1]); - auto lve = theta->add_loopvar(lambda->GetFunctionArguments()[2]); + auto idv = theta->AddLoopVar(lambda->GetFunctionArguments()[0]); + auto lvs = theta->AddLoopVar(lambda->GetFunctionArguments()[1]); + auto lve = theta->AddLoopVar(lambda->GetFunctionArguments()[2]); jlm::rvsdg::bitult_op ult(32); jlm::rvsdg::bitsgt_op sgt(32); jlm::rvsdg::bitadd_op add(32); jlm::rvsdg::bitsub_op sub(32); - auto arm = jlm::rvsdg::SimpleNode::create_normalized( - thetaRegion, - add, - { idv->argument(), lvs->argument() })[0]; - auto cmp = - jlm::rvsdg::SimpleNode::create_normalized(thetaRegion, ult, { arm, lve->argument() })[0]; + auto arm = jlm::rvsdg::SimpleNode::create_normalized(thetaRegion, add, { idv.pre, lvs.pre })[0]; + auto cmp = jlm::rvsdg::SimpleNode::create_normalized(thetaRegion, ult, { arm, lve.pre })[0]; auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); - idv->result()->divert_to(arm); + idv.post->divert_to(arm); theta->set_predicate(match); // Load node - auto loadAddress = theta->add_loopvar(lambda->GetFunctionArguments()[3]); - auto memoryStateArgument = theta->add_loopvar(lambda->GetFunctionArguments()[4]); + auto loadAddress = theta->AddLoopVar(lambda->GetFunctionArguments()[3]); + auto memoryStateArgument = theta->AddLoopVar(lambda->GetFunctionArguments()[4]); auto loadOutput = LoadNonVolatileNode::Create( - loadAddress->argument(), - { memoryStateArgument->argument() }, + loadAddress.pre, + { memoryStateArgument.pre }, PointerType::Create(), 32); - loadAddress->result()->divert_to(loadOutput[0]); - memoryStateArgument->result()->divert_to(loadOutput[1]); + loadAddress.post->divert_to(loadOutput[0]); + memoryStateArgument.post->divert_to(loadOutput[1]); auto lambdaOutput = lambda->finalize({ theta->output(3), theta->output(4) }); GraphExport::Create(*lambdaOutput, "f"); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp index 6a522ea7d..5fbec9e50 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/MemoryQueueTests.cpp @@ -44,15 +44,15 @@ TestSingleLoad() // Load node auto functionArguments = lambda->GetFunctionArguments(); - auto loadAddress = theta->add_loopvar(functionArguments[0]); - auto memoryStateArgument = theta->add_loopvar(functionArguments[1]); + auto loadAddress = theta->AddLoopVar(functionArguments[0]); + auto memoryStateArgument = theta->AddLoopVar(functionArguments[1]); auto loadOutput = LoadNonVolatileNode::Create( - loadAddress->argument(), - { memoryStateArgument->argument() }, + loadAddress.pre, + { memoryStateArgument.pre }, PointerType::Create(), 32); - loadAddress->result()->divert_to(loadOutput[0]); - memoryStateArgument->result()->divert_to(loadOutput[1]); + loadAddress.post->divert_to(loadOutput[0]); + memoryStateArgument.post->divert_to(loadOutput[1]); auto lambdaOutput = lambda->finalize({ theta->output(0), theta->output(1) }); GraphExport::Create(*lambdaOutput, "f"); @@ -122,22 +122,22 @@ TestLoadStore() // Load node auto functionArguments = lambda->GetFunctionArguments(); - auto loadAddress = theta->add_loopvar(functionArguments[0]); - auto storeAddress = theta->add_loopvar(functionArguments[1]); - auto memoryStateArgument = theta->add_loopvar(functionArguments[2]); + auto loadAddress = theta->AddLoopVar(functionArguments[0]); + auto storeAddress = theta->AddLoopVar(functionArguments[1]); + auto memoryStateArgument = theta->AddLoopVar(functionArguments[2]); auto loadOutput = LoadNonVolatileNode::Create( - loadAddress->argument(), - { memoryStateArgument->argument() }, + loadAddress.pre, + { memoryStateArgument.pre }, PointerType::Create(), 32); auto storeOutput = StoreNonVolatileNode::Create( - storeAddress->argument(), + storeAddress.pre, jlm::rvsdg::create_bitconstant(theta->subregion(), 32, 1), { loadOutput[1] }, 32); - loadAddress->result()->divert_to(loadOutput[0]); - memoryStateArgument->result()->divert_to(storeOutput[0]); + loadAddress.post->divert_to(loadOutput[0]); + memoryStateArgument.post->divert_to(storeOutput[0]); auto lambdaOutput = lambda->finalize({ theta->output(0), theta->output(2) }); GraphExport::Create(*lambdaOutput, "f"); @@ -205,18 +205,18 @@ TestAddrQueue() // Load node auto functionArguments = lambda->GetFunctionArguments(); - auto address = theta->add_loopvar(functionArguments[0]); - auto memoryStateArgument = theta->add_loopvar(functionArguments[1]); + auto address = theta->AddLoopVar(functionArguments[0]); + auto memoryStateArgument = theta->AddLoopVar(functionArguments[1]); auto loadOutput = LoadNonVolatileNode::Create( - address->argument(), - { memoryStateArgument->argument() }, + address.pre, + { memoryStateArgument.pre }, PointerType::Create(), 32); auto storeOutput = - StoreNonVolatileNode::Create(address->argument(), loadOutput[0], { loadOutput[1] }, 32); + StoreNonVolatileNode::Create(address.pre, loadOutput[0], { loadOutput[1] }, 32); - address->result()->divert_to(loadOutput[0]); - memoryStateArgument->result()->divert_to(storeOutput[0]); + address.post->divert_to(loadOutput[0]); + memoryStateArgument.post->divert_to(storeOutput[0]); auto lambdaOutput = lambda->finalize({ theta->output(0), theta->output(1) }); GraphExport::Create(*lambdaOutput, "f"); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp index 6c53a3f76..2392fe28b 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestFork.cpp @@ -34,11 +34,11 @@ TestFork() auto loop = hls::loop_node::create(lambda->subregion()); auto subregion = loop->subregion(); rvsdg::output * idvBuffer; - loop->add_loopvar(lambda->GetFunctionArguments()[0], &idvBuffer); + loop->AddLoopVar(lambda->GetFunctionArguments()[0], &idvBuffer); rvsdg::output * lvsBuffer; - loop->add_loopvar(lambda->GetFunctionArguments()[1], &lvsBuffer); + loop->AddLoopVar(lambda->GetFunctionArguments()[1], &lvsBuffer); rvsdg::output * lveBuffer; - loop->add_loopvar(lambda->GetFunctionArguments()[2], &lveBuffer); + loop->AddLoopVar(lambda->GetFunctionArguments()[2], &lveBuffer); auto arm = rvsdg::SimpleNode::create_normalized(subregion, add, { idvBuffer, lvsBuffer })[0]; auto cmp = rvsdg::SimpleNode::create_normalized(subregion, ult, { arm, lveBuffer })[0]; @@ -104,7 +104,7 @@ TestConstantFork() auto loop = hls::loop_node::create(lambdaRegion); auto subregion = loop->subregion(); rvsdg::output * idvBuffer; - loop->add_loopvar(lambda->GetFunctionArguments()[0], &idvBuffer); + loop->AddLoopVar(lambda->GetFunctionArguments()[0], &idvBuffer); auto bitConstant1 = rvsdg::create_bitconstant(subregion, 32, 1); auto arm = rvsdg::SimpleNode::create_normalized(subregion, add, { idvBuffer, bitConstant1 })[0]; diff --git a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp index 1b1191645..e82f91588 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/TestTheta.cpp @@ -34,18 +34,15 @@ TestUnknownBoundaries() auto theta = jlm::rvsdg::ThetaNode::create(lambda->subregion()); auto subregion = theta->subregion(); - auto idv = theta->add_loopvar(lambda->GetFunctionArguments()[0]); - auto lvs = theta->add_loopvar(lambda->GetFunctionArguments()[1]); - auto lve = theta->add_loopvar(lambda->GetFunctionArguments()[2]); - - auto arm = jlm::rvsdg::SimpleNode::create_normalized( - subregion, - add, - { idv->argument(), lvs->argument() })[0]; - auto cmp = jlm::rvsdg::SimpleNode::create_normalized(subregion, ult, { arm, lve->argument() })[0]; + auto idv = theta->AddLoopVar(lambda->GetFunctionArguments()[0]); + auto lvs = theta->AddLoopVar(lambda->GetFunctionArguments()[1]); + auto lve = theta->AddLoopVar(lambda->GetFunctionArguments()[2]); + + auto arm = jlm::rvsdg::SimpleNode::create_normalized(subregion, add, { idv.pre, lvs.pre })[0]; + auto cmp = jlm::rvsdg::SimpleNode::create_normalized(subregion, ult, { arm, lve.pre })[0]; auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); - idv->result()->divert_to(arm); + idv.post->divert_to(arm); theta->set_predicate(match); auto f = lambda->finalize({ theta->output(0), theta->output(1), theta->output(2) }); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp index 8ca61d7ba..8320c0d4c 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/UnusedStateRemovalTests.cpp @@ -90,20 +90,21 @@ TestTheta() auto thetaNode = jlm::rvsdg::ThetaNode::create(&rvsdg.GetRootRegion()); - auto thetaOutput0 = thetaNode->add_loopvar(p); - auto thetaOutput1 = thetaNode->add_loopvar(x); - auto thetaOutput2 = thetaNode->add_loopvar(y); - auto thetaOutput3 = thetaNode->add_loopvar(z); - - thetaOutput2->result()->divert_to(thetaOutput3->argument()); - thetaOutput3->result()->divert_to(thetaOutput2->argument()); - thetaNode->set_predicate(thetaOutput0->argument()); - - auto result = jlm::tests::SimpleNode::Create( - rvsdg.GetRootRegion(), - { thetaOutput0, thetaOutput1, thetaOutput2, thetaOutput3 }, - { valueType }) - .output(0); + auto thetaOutput0 = thetaNode->AddLoopVar(p); + auto thetaOutput1 = thetaNode->AddLoopVar(x); + auto thetaOutput2 = thetaNode->AddLoopVar(y); + auto thetaOutput3 = thetaNode->AddLoopVar(z); + + thetaOutput2.post->divert_to(thetaOutput3.pre); + thetaOutput3.post->divert_to(thetaOutput2.pre); + thetaNode->set_predicate(thetaOutput0.pre); + + auto result = + jlm::tests::SimpleNode::Create( + rvsdg.GetRootRegion(), + { thetaOutput0.output, thetaOutput1.output, thetaOutput2.output, thetaOutput3.output }, + { valueType }) + .output(0); GraphExport::Create(*result, "f"); diff --git a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp index d3b2011f4..c81269da1 100644 --- a/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp +++ b/tests/jlm/hls/backend/rvsdg2rhls/test-loop-passthrough.cpp @@ -52,7 +52,7 @@ test() auto loop = hls::loop_node::create(lambda->subregion()); - auto loop_out = loop->add_loopvar(lambda->GetFunctionArguments()[1]); + auto loop_out = loop->AddLoopVar(lambda->GetFunctionArguments()[1]); auto f = lambda->finalize({ loop_out }); jlm::llvm::GraphExport::Create(*f, ""); diff --git a/tests/jlm/llvm/ir/operators/TestCall.cpp b/tests/jlm/llvm/ir/operators/TestCall.cpp index 8308196e0..1042741fc 100644 --- a/tests/jlm/llvm/ir/operators/TestCall.cpp +++ b/tests/jlm/llvm/ir/operators/TestCall.cpp @@ -200,18 +200,18 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto SetupOuterTheta = [](jlm::rvsdg::Region * region, jlm::rvsdg::output * functionG) { auto outerTheta = jlm::rvsdg::ThetaNode::create(region); - auto otf = outerTheta->add_loopvar(functionG); + auto otf = outerTheta->AddLoopVar(functionG); auto innerTheta = jlm::rvsdg::ThetaNode::create(outerTheta->subregion()); - auto itf = innerTheta->add_loopvar(otf->argument()); + auto itf = innerTheta->AddLoopVar(otf.pre); auto predicate = jlm::rvsdg::control_false(innerTheta->subregion()); auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); - auto ev = gamma->AddEntryVar(itf->argument()); + auto ev = gamma->AddEntryVar(itf.pre); auto xv = gamma->AddExitVar(ev.branchArgument); - itf->result()->divert_to(xv.output); - otf->result()->divert_to(itf); + itf.post->divert_to(xv.output); + otf.post->divert_to(itf.output); return otf; }; @@ -233,7 +233,7 @@ TestCallTypeClassifierNonRecursiveDirectCall() auto functionG = SetupOuterTheta(lambda->subregion(), functionGArgument); auto callResults = - CallNode::Create(functionG, functionTypeG, { iOStateArgument, memoryStateArgument }); + CallNode::Create(functionG.output, functionTypeG, { iOStateArgument, memoryStateArgument }); lambda->finalize(callResults); @@ -300,31 +300,31 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() jlm::rvsdg::output * iOState, jlm::rvsdg::output * memoryState) { - auto SetupInnerTheta = [&](jlm::rvsdg::Region * region, jlm::rvsdg::RegionArgument * g) + auto SetupInnerTheta = [&](jlm::rvsdg::Region * region, jlm::rvsdg::output * g) { auto innerTheta = jlm::rvsdg::ThetaNode::create(region); - auto thetaOutputG = innerTheta->add_loopvar(g); + auto thetaOutputG = innerTheta->AddLoopVar(g); return thetaOutputG; }; auto outerTheta = jlm::rvsdg::ThetaNode::create(region); - auto thetaOutputG = outerTheta->add_loopvar(g); - auto thetaOutputValue = outerTheta->add_loopvar(value); - auto thetaOutputIoState = outerTheta->add_loopvar(iOState); - auto thetaOutputMemoryState = outerTheta->add_loopvar(memoryState); + auto thetaOutputG = outerTheta->AddLoopVar(g); + auto thetaOutputValue = outerTheta->AddLoopVar(value); + auto thetaOutputIoState = outerTheta->AddLoopVar(iOState); + auto thetaOutputMemoryState = outerTheta->AddLoopVar(memoryState); - auto functionG = SetupInnerTheta(outerTheta->subregion(), thetaOutputG->argument()); + auto functionG = SetupInnerTheta(outerTheta->subregion(), thetaOutputG.pre); auto callResults = CallNode::Create( - functionG, + functionG.output, functionTypeG, - { thetaOutputIoState->argument(), thetaOutputMemoryState->argument() }); + { thetaOutputIoState.pre, thetaOutputMemoryState.pre }); - thetaOutputG->result()->divert_to(functionG); - thetaOutputValue->result()->divert_to(callResults[0]); - thetaOutputIoState->result()->divert_to(callResults[1]); - thetaOutputMemoryState->result()->divert_to(callResults[2]); + thetaOutputG.post->divert_to(functionG.output); + thetaOutputValue.post->divert_to(callResults[0]); + thetaOutputIoState.post->divert_to(callResults[1]); + thetaOutputMemoryState.post->divert_to(callResults[2]); return std::make_tuple( thetaOutputValue, @@ -356,7 +356,7 @@ TestCallTypeClassifierNonRecursiveDirectCallTheta() iOStateArgument, memoryStateArgument); - auto lambdaOutput = lambda->finalize({ loopValue, iOState, memoryState }); + auto lambdaOutput = lambda->finalize({ loopValue.output, iOState.output, memoryState.output }); return std::make_tuple(lambdaOutput, callNode); }; diff --git a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp index a58238d9f..d3c0b587c 100644 --- a/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp +++ b/tests/jlm/llvm/opt/InvariantValueRedirectionTests.cpp @@ -104,20 +104,21 @@ TestTheta() auto l = lambdaNode->GetFunctionArguments()[2]; auto thetaNode1 = jlm::rvsdg::ThetaNode::create(lambdaNode->subregion()); - auto thetaOutput1 = thetaNode1->add_loopvar(c); - auto thetaOutput2 = thetaNode1->add_loopvar(x); - auto thetaOutput3 = thetaNode1->add_loopvar(l); + auto thetaVar1 = thetaNode1->AddLoopVar(c); + auto thetaVar2 = thetaNode1->AddLoopVar(x); + auto thetaVar3 = thetaNode1->AddLoopVar(l); auto thetaNode2 = jlm::rvsdg::ThetaNode::create(thetaNode1->subregion()); - auto thetaOutput4 = thetaNode2->add_loopvar(thetaOutput1->argument()); - thetaNode2->add_loopvar(thetaOutput2->argument()); - auto thetaOutput5 = thetaNode2->add_loopvar(thetaOutput3->argument()); - thetaNode2->set_predicate(thetaOutput4->argument()); + auto thetaVar4 = thetaNode2->AddLoopVar(thetaVar1.pre); + thetaNode2->AddLoopVar(thetaVar2.pre); + auto thetaVar5 = thetaNode2->AddLoopVar(thetaVar3.pre); + thetaNode2->set_predicate(thetaVar4.pre); - thetaOutput3->result()->divert_to(thetaOutput5); - thetaNode1->set_predicate(thetaOutput1->argument()); + thetaVar3.post->divert_to(thetaVar5.output); + thetaNode1->set_predicate(thetaVar1.pre); - auto lambdaOutput = lambdaNode->finalize({ thetaOutput1, thetaOutput2, thetaOutput3 }); + auto lambdaOutput = + lambdaNode->finalize({ thetaVar1.output, thetaVar2.output, thetaVar3.output }); GraphExport::Create(*lambdaOutput, "test"); @@ -127,7 +128,7 @@ TestTheta() // Assert assert(lambdaNode->GetFunctionResults()[0]->origin() == c); assert(lambdaNode->GetFunctionResults()[1]->origin() == x); - assert(lambdaNode->GetFunctionResults()[2]->origin() == thetaOutput3); + assert(lambdaNode->GetFunctionResults()[2]->origin() == thetaVar3.output); return 0; } diff --git a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp index 4a4be8c84..dbabb95be 100644 --- a/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp +++ b/tests/jlm/llvm/opt/TestDeadNodeElimination.cpp @@ -130,23 +130,23 @@ TestTheta() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lv1 = theta->add_loopvar(x); - auto lv2 = theta->add_loopvar(y); - auto lv3 = theta->add_loopvar(z); - auto lv4 = theta->add_loopvar(y); + auto lv1 = theta->AddLoopVar(x); + auto lv2 = theta->AddLoopVar(y); + auto lv3 = theta->AddLoopVar(z); + auto lv4 = theta->AddLoopVar(y); - lv1->result()->divert_to(lv2->argument()); - lv2->result()->divert_to(lv1->argument()); + lv1.post->divert_to(lv2.pre); + lv2.post->divert_to(lv1.pre); - auto t = jlm::tests::create_testop(theta->subregion(), { lv3->argument() }, { vt })[0]; - lv3->result()->divert_to(t); - lv4->result()->divert_to(lv2->argument()); + auto t = jlm::tests::create_testop(theta->subregion(), { lv3.pre }, { vt })[0]; + lv3.post->divert_to(t); + lv4.post->divert_to(lv2.pre); auto c = jlm::tests::create_testop(theta->subregion(), {}, { ct })[0]; theta->set_predicate(c); - GraphExport::Create(*theta->output(0), "a"); - GraphExport::Create(*theta->output(3), "b"); + GraphExport::Create(*lv1.output, "a"); + GraphExport::Create(*lv4.output, "b"); // jlm::rvsdg::view(graph.GetRootRegion(), stdout); RunDeadNodeElimination(rm); @@ -173,26 +173,26 @@ TestNestedTheta() auto otheta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lvo1 = otheta->add_loopvar(c); - auto lvo2 = otheta->add_loopvar(x); - auto lvo3 = otheta->add_loopvar(y); + auto lvo1 = otheta->AddLoopVar(c); + auto lvo2 = otheta->AddLoopVar(x); + auto lvo3 = otheta->AddLoopVar(y); auto itheta = jlm::rvsdg::ThetaNode::create(otheta->subregion()); - auto lvi1 = itheta->add_loopvar(lvo1->argument()); - auto lvi2 = itheta->add_loopvar(lvo2->argument()); - auto lvi3 = itheta->add_loopvar(lvo3->argument()); + auto lvi1 = itheta->AddLoopVar(lvo1.pre); + auto lvi2 = itheta->AddLoopVar(lvo2.pre); + auto lvi3 = itheta->AddLoopVar(lvo3.pre); - lvi2->result()->divert_to(lvi3->argument()); + lvi2.post->divert_to(lvi3.pre); - itheta->set_predicate(lvi1->argument()); + itheta->set_predicate(lvi1.pre); - lvo2->result()->divert_to(itheta->output(1)); - lvo3->result()->divert_to(itheta->output(1)); + lvo2.post->divert_to(lvi2.output); + lvo3.post->divert_to(lvi2.output); - otheta->set_predicate(lvo1->argument()); + otheta->set_predicate(lvo1.pre); - GraphExport::Create(*otheta->output(2), "y"); + GraphExport::Create(*lvo3.output, "y"); // jlm::rvsdg::view(graph, stdout); RunDeadNodeElimination(rm); @@ -219,19 +219,19 @@ TestEvolvingTheta() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lv0 = theta->add_loopvar(c); - auto lv1 = theta->add_loopvar(x1); - auto lv2 = theta->add_loopvar(x2); - auto lv3 = theta->add_loopvar(x3); - auto lv4 = theta->add_loopvar(x4); + auto lv0 = theta->AddLoopVar(c); + auto lv1 = theta->AddLoopVar(x1); + auto lv2 = theta->AddLoopVar(x2); + auto lv3 = theta->AddLoopVar(x3); + auto lv4 = theta->AddLoopVar(x4); - lv1->result()->divert_to(lv2->argument()); - lv2->result()->divert_to(lv3->argument()); - lv3->result()->divert_to(lv4->argument()); + lv1.post->divert_to(lv2.pre); + lv2.post->divert_to(lv3.pre); + lv3.post->divert_to(lv4.pre); - theta->set_predicate(lv0->argument()); + theta->set_predicate(lv0.pre); - GraphExport::Create(*lv1, "x1"); + GraphExport::Create(*lv1.output, "x1"); // jlm::rvsdg::view(graph, stdout); RunDeadNodeElimination(rm); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp index 4237dde43..f3ae6d7a1 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestAndersen.cpp @@ -595,7 +595,7 @@ TestTheta() auto & gepOutput = ptg->GetRegisterNode(*test.gep->output(0)); - auto & thetaArgument2 = ptg->GetRegisterNode(*test.theta->output(2)->argument()); + auto & thetaArgument2 = ptg->GetRegisterNode(*test.theta->GetLoopVars()[2].pre); auto & thetaOutput2 = ptg->GetRegisterNode(*test.theta->output(2)); assert(TargetsExactly(lambdaArgument1, { &lambda, &ptg->GetExternalMemoryNode() })); diff --git a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp index f55d5e5a6..21bc00d3c 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestMemoryStateEncoder.cpp @@ -1420,17 +1420,17 @@ ValidateThetaTestSteensgaardAgnostic(const jlm::tests::ThetaTest & test) jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambda_exit_mux, 2, 1)); - auto thetaOutput = - jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); - auto theta = jlm::rvsdg::output::GetNode(*thetaOutput); + auto thetaOutput = lambda_exit_mux->input(0)->origin(); + auto theta = jlm::rvsdg::TryGetOwnerNode(*thetaOutput); assert(theta == test.theta); - auto storeStateOutput = thetaOutput->result()->origin(); + auto loopvar = theta->MapOutputLoopVar(*thetaOutput); + auto storeStateOutput = loopvar.post->origin(); auto store = jlm::rvsdg::output::GetNode(*storeStateOutput); assert(is(*store, 4, 2)); - assert(store->input(storeStateOutput->index() + 2)->origin() == thetaOutput->argument()); + assert(store->input(storeStateOutput->index() + 2)->origin() == loopvar.pre); - auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*thetaOutput->input()->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*loopvar.input->origin()); assert(is(*lambda_entry_mux, 1, 2)); } @@ -1445,17 +1445,17 @@ ValidateThetaTestSteensgaardRegionAware(const jlm::tests::ThetaTest & test) jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambdaExitMerge, 2, 1)); - auto thetaOutput = - jlm::util::AssertedCast(lambdaExitMerge->input(0)->origin()); - auto theta = jlm::rvsdg::output::GetNode(*thetaOutput); + auto thetaOutput = lambdaExitMerge->input(0)->origin(); + auto theta = jlm::rvsdg::TryGetOwnerNode(*thetaOutput); assert(theta == test.theta); + auto loopvar = theta->MapOutputLoopVar(*thetaOutput); - auto storeStateOutput = thetaOutput->result()->origin(); + auto storeStateOutput = loopvar.post->origin(); auto store = jlm::rvsdg::output::GetNode(*storeStateOutput); assert(is(*store, 4, 2)); - assert(store->input(storeStateOutput->index() + 2)->origin() == thetaOutput->argument()); + assert(store->input(storeStateOutput->index() + 2)->origin() == loopvar.pre); - auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*thetaOutput->input()->origin()); + auto lambdaEntrySplit = jlm::rvsdg::output::GetNode(*loopvar.input->origin()); assert(is(*lambdaEntrySplit, 1, 2)); } @@ -1470,17 +1470,17 @@ ValidateThetaTestSteensgaardAgnosticTopDown(const jlm::tests::ThetaTest & test) jlm::rvsdg::output::GetNode(*test.lambda->GetFunctionResults()[0]->origin()); assert(is(*lambda_exit_mux, 2, 1)); - auto thetaOutput = - jlm::util::AssertedCast(lambda_exit_mux->input(0)->origin()); - auto theta = jlm::rvsdg::output::GetNode(*thetaOutput); + auto thetaOutput = lambda_exit_mux->input(0)->origin(); + auto theta = jlm::rvsdg::TryGetOwnerNode(*thetaOutput); assert(theta == test.theta); + auto loopvar = theta->MapOutputLoopVar(*thetaOutput); - auto storeStateOutput = thetaOutput->result()->origin(); + auto storeStateOutput = loopvar.post->origin(); auto store = jlm::rvsdg::output::GetNode(*storeStateOutput); assert(is(*store, 4, 2)); - assert(store->input(storeStateOutput->index() + 2)->origin() == thetaOutput->argument()); + assert(store->input(storeStateOutput->index() + 2)->origin() == loopvar.pre); - auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*thetaOutput->input()->origin()); + auto lambda_entry_mux = jlm::rvsdg::output::GetNode(*loopvar.input->origin()); assert(is(*lambda_entry_mux, 1, 2)); } diff --git a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp index 06bc14d98..f80b5c8b8 100644 --- a/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp +++ b/tests/jlm/llvm/opt/alias-analyses/TestSteensgaard.cpp @@ -702,7 +702,7 @@ TestTheta() auto & gepOutput = pointsToGraph.GetRegisterNode(*test.gep->output(0)); - auto & thetaArgument2 = pointsToGraph.GetRegisterNode(*test.theta->output(2)->argument()); + auto & thetaArgument2 = pointsToGraph.GetRegisterNode(*test.theta->GetLoopVars()[2].pre); auto & thetaOutput2 = pointsToGraph.GetRegisterNode(*test.theta->output(2)); assertTargets(lambdaArgument1, { &lambda, &pointsToGraph.GetExternalMemoryNode() }); diff --git a/tests/jlm/llvm/opt/test-cne.cpp b/tests/jlm/llvm/opt/test-cne.cpp index 446307e9b..6df22e575 100644 --- a/tests/jlm/llvm/opt/test-cne.cpp +++ b/tests/jlm/llvm/opt/test-cne.cpp @@ -148,24 +148,24 @@ test_theta() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); - auto lv1 = theta->add_loopvar(c); - auto lv2 = theta->add_loopvar(x); - auto lv3 = theta->add_loopvar(x); - auto lv4 = theta->add_loopvar(x); + auto lv1 = theta->AddLoopVar(c); + auto lv2 = theta->AddLoopVar(x); + auto lv3 = theta->AddLoopVar(x); + auto lv4 = theta->AddLoopVar(x); - auto u1 = jlm::tests::create_testop(region, { lv2->argument() }, { vt })[0]; - auto u2 = jlm::tests::create_testop(region, { lv3->argument() }, { vt })[0]; - auto b1 = jlm::tests::create_testop(region, { lv3->argument(), lv4->argument() }, { vt })[0]; + auto u1 = jlm::tests::create_testop(region, { lv2.pre }, { vt })[0]; + auto u2 = jlm::tests::create_testop(region, { lv3.pre }, { vt })[0]; + auto b1 = jlm::tests::create_testop(region, { lv3.pre, lv4.pre }, { vt })[0]; - lv2->result()->divert_to(u1); - lv3->result()->divert_to(u2); - lv4->result()->divert_to(b1); + lv2.post->divert_to(u1); + lv3.post->divert_to(u2); + lv4.post->divert_to(b1); - theta->set_predicate(lv1->argument()); + theta->set_predicate(lv1.pre); - GraphExport::Create(*theta->output(1), "lv2"); - GraphExport::Create(*theta->output(2), "lv3"); - GraphExport::Create(*theta->output(3), "lv4"); + GraphExport::Create(*lv2.output, "lv2"); + GraphExport::Create(*lv3.output, "lv3"); + GraphExport::Create(*lv4.output, "lv4"); // jlm::rvsdg::view(graph.GetRootRegion(), stdout); jlm::llvm::cne cne; @@ -201,29 +201,29 @@ test_theta2() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); - auto lv1 = theta->add_loopvar(c); - auto lv2 = theta->add_loopvar(x); - auto lv3 = theta->add_loopvar(x); + auto lv1 = theta->AddLoopVar(c); + auto lv2 = theta->AddLoopVar(x); + auto lv3 = theta->AddLoopVar(x); - auto u1 = jlm::tests::create_testop(region, { lv2->argument() }, { vt })[0]; - auto u2 = jlm::tests::create_testop(region, { lv3->argument() }, { vt })[0]; + auto u1 = jlm::tests::create_testop(region, { lv2.pre }, { vt })[0]; + auto u2 = jlm::tests::create_testop(region, { lv3.pre }, { vt })[0]; auto b1 = jlm::tests::create_testop(region, { u2, u2 }, { vt })[0]; - lv2->result()->divert_to(u1); - lv3->result()->divert_to(b1); + lv2.post->divert_to(u1); + lv3.post->divert_to(b1); - theta->set_predicate(lv1->argument()); + theta->set_predicate(lv1.pre); - GraphExport::Create(*theta->output(1), "lv2"); - GraphExport::Create(*theta->output(2), "lv3"); + GraphExport::Create(*lv2.output, "lv2"); + GraphExport::Create(*lv3.output, "lv3"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::cne cne; cne.run(rm, statisticsCollector); // jlm::rvsdg::view(graph, stdout); - assert(lv2->result()->origin() == u1); - assert(lv2->argument()->nusers() != 0 && lv3->argument()->nusers() != 0); + assert(lv2.post->origin() == u1); + assert(lv2.pre->nusers() != 0 && lv3.pre->nusers() != 0); } static inline void @@ -245,32 +245,32 @@ test_theta3() auto theta1 = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto r1 = theta1->subregion(); - auto lv1 = theta1->add_loopvar(c); - auto lv2 = theta1->add_loopvar(x); - auto lv3 = theta1->add_loopvar(x); - auto lv4 = theta1->add_loopvar(x); + auto lv1 = theta1->AddLoopVar(c); + auto lv2 = theta1->AddLoopVar(x); + auto lv3 = theta1->AddLoopVar(x); + auto lv4 = theta1->AddLoopVar(x); auto theta2 = jlm::rvsdg::ThetaNode::create(r1); auto r2 = theta2->subregion(); - auto p = theta2->add_loopvar(lv1->argument()); - theta2->add_loopvar(lv2->argument()); - theta2->add_loopvar(lv3->argument()); - theta2->add_loopvar(lv4->argument()); - theta2->set_predicate(p->argument()); + auto p = theta2->AddLoopVar(lv1.pre); + auto p2 = theta2->AddLoopVar(lv2.pre); + auto p3 = theta2->AddLoopVar(lv3.pre); + auto p4 = theta2->AddLoopVar(lv4.pre); + theta2->set_predicate(p.pre); - auto u1 = jlm::tests::test_op::create(r1, { theta2->output(1) }, { vt }); - auto b1 = jlm::tests::test_op::create(r1, { theta2->output(2), theta2->output(2) }, { vt }); - auto u2 = jlm::tests::test_op::create(r1, { theta2->output(3) }, { vt }); + auto u1 = jlm::tests::test_op::create(r1, { p2.output }, { vt }); + auto b1 = jlm::tests::test_op::create(r1, { p3.output, p3.output }, { vt }); + auto u2 = jlm::tests::test_op::create(r1, { p4.output }, { vt }); - lv2->result()->divert_to(u1->output(0)); - lv3->result()->divert_to(b1->output(0)); - lv4->result()->divert_to(u1->output(0)); + lv2.post->divert_to(u1->output(0)); + lv3.post->divert_to(b1->output(0)); + lv4.post->divert_to(u1->output(0)); - theta1->set_predicate(lv1->argument()); + theta1->set_predicate(lv1.pre); - GraphExport::Create(*theta1->output(1), "lv2"); - GraphExport::Create(*theta1->output(2), "lv3"); - GraphExport::Create(*theta1->output(3), "lv4"); + GraphExport::Create(*lv2.output, "lv2"); + GraphExport::Create(*lv3.output, "lv3"); + GraphExport::Create(*lv4.output, "lv4"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::cne cne; @@ -305,23 +305,23 @@ test_theta4() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); - auto lv1 = theta->add_loopvar(c); - auto lv2 = theta->add_loopvar(x); - auto lv3 = theta->add_loopvar(x); - auto lv4 = theta->add_loopvar(y); - auto lv5 = theta->add_loopvar(y); - auto lv6 = theta->add_loopvar(x); - auto lv7 = theta->add_loopvar(x); + auto lv1 = theta->AddLoopVar(c); + auto lv2 = theta->AddLoopVar(x); + auto lv3 = theta->AddLoopVar(x); + auto lv4 = theta->AddLoopVar(y); + auto lv5 = theta->AddLoopVar(y); + auto lv6 = theta->AddLoopVar(x); + auto lv7 = theta->AddLoopVar(x); - auto u1 = jlm::tests::test_op::create(region, { lv2->argument() }, { vt }); - auto b1 = jlm::tests::test_op::create(region, { lv3->argument(), lv3->argument() }, { vt }); + auto u1 = jlm::tests::test_op::create(region, { lv2.pre }, { vt }); + auto b1 = jlm::tests::test_op::create(region, { lv3.pre, lv3.pre }, { vt }); - lv2->result()->divert_to(lv4->argument()); - lv3->result()->divert_to(lv5->argument()); - lv4->result()->divert_to(u1->output(0)); - lv5->result()->divert_to(b1->output(0)); + lv2.post->divert_to(lv4.pre); + lv3.post->divert_to(lv5.pre); + lv4.post->divert_to(u1->output(0)); + lv5.post->divert_to(b1->output(0)); - theta->set_predicate(lv1->argument()); + theta->set_predicate(lv1.pre); auto & ex1 = GraphExport::Create(*theta->output(1), "lv2"); auto & ex2 = GraphExport::Create(*theta->output(2), "lv3"); @@ -334,8 +334,8 @@ test_theta4() // jlm::rvsdg::view(graph, stdout); assert(ex1.origin() != ex2.origin()); - assert(lv2->argument()->nusers() != 0 && lv3->argument()->nusers() != 0); - assert(lv6->result()->origin() == lv7->result()->origin()); + assert(lv2.pre->nusers() != 0 && lv3.pre->nusers() != 0); + assert(lv6.post->origin() == lv7.post->origin()); } static inline void @@ -358,16 +358,16 @@ test_theta5() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); auto region = theta->subregion(); - auto lv0 = theta->add_loopvar(c); - auto lv1 = theta->add_loopvar(x); - auto lv2 = theta->add_loopvar(x); - auto lv3 = theta->add_loopvar(y); - auto lv4 = theta->add_loopvar(y); + auto lv0 = theta->AddLoopVar(c); + auto lv1 = theta->AddLoopVar(x); + auto lv2 = theta->AddLoopVar(x); + auto lv3 = theta->AddLoopVar(y); + auto lv4 = theta->AddLoopVar(y); - lv1->result()->divert_to(lv3->argument()); - lv2->result()->divert_to(lv4->argument()); + lv1.post->divert_to(lv3.pre); + lv2.post->divert_to(lv4.pre); - theta->set_predicate(lv0->argument()); + theta->set_predicate(lv0.pre); auto & ex1 = GraphExport::Create(*theta->output(1), "lv1"); auto & ex2 = GraphExport::Create(*theta->output(2), "lv2"); diff --git a/tests/jlm/llvm/opt/test-inversion.cpp b/tests/jlm/llvm/opt/test-inversion.cpp index e37700d68..f87c7ba2a 100644 --- a/tests/jlm/llvm/opt/test-inversion.cpp +++ b/tests/jlm/llvm/opt/test-inversion.cpp @@ -31,20 +31,20 @@ test1() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lvx = theta->add_loopvar(x); - auto lvy = theta->add_loopvar(y); - theta->add_loopvar(z); + auto lvx = theta->AddLoopVar(x); + auto lvy = theta->AddLoopVar(y); + theta->AddLoopVar(z); auto a = jlm::tests::create_testop( theta->subregion(), - { lvx->argument(), lvy->argument() }, + { lvx.pre, lvy.pre }, { jlm::rvsdg::bittype::Create(1) })[0]; auto predicate = jlm::rvsdg::match(1, { { 1, 0 } }, 1, 2, a); auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); - auto evx = gamma->AddEntryVar(lvx->argument()); - auto evy = gamma->AddEntryVar(lvy->argument()); + auto evx = gamma->AddEntryVar(lvx.pre); + auto evy = gamma->AddEntryVar(lvy.pre); auto b = jlm::tests::create_testop( gamma->subregion(0), @@ -57,7 +57,7 @@ test1() auto xvy = gamma->AddExitVar({ b, c }); - lvy->result()->divert_to(xvy.output); + lvy.post->divert_to(xvy.output); theta->set_predicate(predicate); @@ -87,26 +87,26 @@ test2() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lv1 = theta->add_loopvar(x); + auto lv1 = theta->AddLoopVar(x); auto n1 = jlm::tests::create_testop( theta->subregion(), - { lv1->argument() }, + { lv1.pre }, { jlm::rvsdg::bittype::Create(1) })[0]; - auto n2 = jlm::tests::create_testop(theta->subregion(), { lv1->argument() }, { vt })[0]; + auto n2 = jlm::tests::create_testop(theta->subregion(), { lv1.pre }, { vt })[0]; auto predicate = jlm::rvsdg::match(1, { { 1, 0 } }, 1, 2, n1); auto gamma = jlm::rvsdg::GammaNode::create(predicate, 2); auto ev1 = gamma->AddEntryVar(n1); - auto ev2 = gamma->AddEntryVar(lv1->argument()); + auto ev2 = gamma->AddEntryVar(lv1.pre); auto ev3 = gamma->AddEntryVar(n2); gamma->AddExitVar(ev1.branchArgument); gamma->AddExitVar(ev2.branchArgument); gamma->AddExitVar(ev3.branchArgument); - lv1->result()->divert_to(gamma->output(1)); + lv1.post->divert_to(gamma->output(1)); theta->set_predicate(predicate); diff --git a/tests/jlm/llvm/opt/test-push.cpp b/tests/jlm/llvm/opt/test-push.cpp index ab4a5f39d..7cca44259 100644 --- a/tests/jlm/llvm/opt/test-push.cpp +++ b/tests/jlm/llvm/opt/test-push.cpp @@ -76,23 +76,20 @@ test_theta() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lv1 = theta->add_loopvar(c); - auto lv2 = theta->add_loopvar(x); - auto lv3 = theta->add_loopvar(x); - auto lv4 = theta->add_loopvar(s); + auto lv1 = theta->AddLoopVar(c); + auto lv2 = theta->AddLoopVar(x); + auto lv3 = theta->AddLoopVar(x); + auto lv4 = theta->AddLoopVar(s); auto o1 = jlm::tests::create_testop(theta->subregion(), {}, { vt })[0]; - auto o2 = jlm::tests::create_testop(theta->subregion(), { o1, lv3->argument() }, { vt })[0]; - auto o3 = jlm::tests::create_testop(theta->subregion(), { lv2->argument(), o2 }, { vt })[0]; - auto o4 = jlm::tests::create_testop( - theta->subregion(), - { lv3->argument(), lv4->argument() }, - { st })[0]; + auto o2 = jlm::tests::create_testop(theta->subregion(), { o1, lv3.pre }, { vt })[0]; + auto o3 = jlm::tests::create_testop(theta->subregion(), { lv2.pre, o2 }, { vt })[0]; + auto o4 = jlm::tests::create_testop(theta->subregion(), { lv3.pre, lv4.pre }, { st })[0]; - lv2->result()->divert_to(o3); - lv4->result()->divert_to(o4); + lv2.post->divert_to(o3); + lv4.post->divert_to(o4); - theta->set_predicate(lv1->argument()); + theta->set_predicate(lv1.pre); GraphExport::Create(*theta->output(0), "c"); @@ -121,18 +118,17 @@ test_push_theta_bottom() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lvc = theta->add_loopvar(c); - auto lva = theta->add_loopvar(a); - auto lvv = theta->add_loopvar(v); - auto lvs = theta->add_loopvar(s); + auto lvc = theta->AddLoopVar(c); + auto lva = theta->AddLoopVar(a); + auto lvv = theta->AddLoopVar(v); + auto lvs = theta->AddLoopVar(s); - auto s1 = - StoreNonVolatileNode::Create(lva->argument(), lvv->argument(), { lvs->argument() }, 4)[0]; + auto s1 = StoreNonVolatileNode::Create(lva.pre, lvv.pre, { lvs.pre }, 4)[0]; - lvs->result()->divert_to(s1); - theta->set_predicate(lvc->argument()); + lvs.post->divert_to(s1); + theta->set_predicate(lvc.pre); - auto & ex = GraphExport::Create(*lvs, "s"); + auto & ex = GraphExport::Create(*lvs.output, "s"); jlm::rvsdg::view(graph, stdout); jlm::llvm::push_bottom(theta); diff --git a/tests/jlm/llvm/opt/test-unroll.cpp b/tests/jlm/llvm/opt/test-unroll.cpp index f901dd3e4..c6677b524 100644 --- a/tests/jlm/llvm/opt/test-unroll.cpp +++ b/tests/jlm/llvm/opt/test-unroll.cpp @@ -47,15 +47,15 @@ create_theta( auto theta = ThetaNode::create(&graph->GetRootRegion()); auto subregion = theta->subregion(); - auto idv = theta->add_loopvar(init); - auto lvs = theta->add_loopvar(step); - auto lve = theta->add_loopvar(end); + auto idv = theta->AddLoopVar(init); + auto lvs = theta->AddLoopVar(step); + auto lve = theta->AddLoopVar(end); - auto arm = SimpleNode::create_normalized(subregion, aop, { idv->argument(), lvs->argument() })[0]; - auto cmp = SimpleNode::create_normalized(subregion, cop, { arm, lve->argument() })[0]; + auto arm = SimpleNode::create_normalized(subregion, aop, { idv.pre, lvs.pre })[0]; + auto cmp = SimpleNode::create_normalized(subregion, cop, { arm, lve.pre })[0]; auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, cmp); - idv->result()->divert_to(arm); + idv.post->divert_to(arm); theta->set_predicate(match); return theta; @@ -87,7 +87,7 @@ test_unrollinfo() assert(!ui->is_known()); assert(!ui->niterations()); assert(ui->theta() == theta); - assert(ui->idv()->input()->origin() == x); + assert(theta->MapPreLoopVar(*ui->idv()).input->origin() == x); } { @@ -242,19 +242,19 @@ test_unknown_boundaries() auto y = &jlm::tests::GraphImport::Create(graph, bt, "y"); auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lv1 = theta->add_loopvar(x); - auto lv2 = theta->add_loopvar(y); + auto lv1 = theta->AddLoopVar(x); + auto lv2 = theta->AddLoopVar(y); auto one = jlm::rvsdg::create_bitconstant(theta->subregion(), 32, 1); - auto add = jlm::rvsdg::bitadd_op::create(32, lv1->argument(), one); - auto cmp = jlm::rvsdg::bitult_op::create(32, add, lv2->argument()); + auto add = jlm::rvsdg::bitadd_op::create(32, lv1.pre, one); + auto cmp = jlm::rvsdg::bitult_op::create(32, add, lv2.pre); auto match = jlm::rvsdg::match(1, { { 1, 0 } }, 1, 2, cmp); - lv1->result()->divert_to(add); + lv1.post->divert_to(add); theta->set_predicate(match); - auto & ex1 = GraphExport::Create(*lv1, "x"); + auto & ex1 = GraphExport::Create(*lv1.output, "x"); // jlm::rvsdg::view(graph, stdout); jlm::llvm::loopunroll loopunroll(2); @@ -301,59 +301,59 @@ test_nested_theta() /* Outer loop */ auto otheta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lvo_init = otheta->add_loopvar(init); - auto lvo_step = otheta->add_loopvar(step); - auto lvo_end = otheta->add_loopvar(end); + auto lvo_init = otheta->AddLoopVar(init); + auto lvo_step = otheta->AddLoopVar(step); + auto lvo_end = otheta->AddLoopVar(end); - auto add = jlm::rvsdg::bitadd_op::create(32, lvo_init->argument(), lvo_step->argument()); - auto compare = jlm::rvsdg::bitult_op::create(32, add, lvo_end->argument()); + auto add = jlm::rvsdg::bitadd_op::create(32, lvo_init.pre, lvo_step.pre); + auto compare = jlm::rvsdg::bitult_op::create(32, add, lvo_end.pre); auto match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, compare); otheta->set_predicate(match); - lvo_init->result()->divert_to(add); + lvo_init.post->divert_to(add); /* First inner loop in the original loop */ auto inner_theta = jlm::rvsdg::ThetaNode::create(otheta->subregion()); auto inner_init = jlm::rvsdg::create_bitconstant(otheta->subregion(), 32, 0); - auto lvi_init = inner_theta->add_loopvar(inner_init); - auto lvi_step = inner_theta->add_loopvar(lvo_step->argument()); - auto lvi_end = inner_theta->add_loopvar(lvo_end->argument()); + auto lvi_init = inner_theta->AddLoopVar(inner_init); + auto lvi_step = inner_theta->AddLoopVar(lvo_step.pre); + auto lvi_end = inner_theta->AddLoopVar(lvo_end.pre); - auto inner_add = jlm::rvsdg::bitadd_op::create(32, lvi_init->argument(), lvi_step->argument()); - auto inner_compare = jlm::rvsdg::bitult_op::create(32, inner_add, lvi_end->argument()); + auto inner_add = jlm::rvsdg::bitadd_op::create(32, lvi_init.pre, lvi_step.pre); + auto inner_compare = jlm::rvsdg::bitult_op::create(32, inner_add, lvi_end.pre); auto inner_match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, inner_compare); inner_theta->set_predicate(inner_match); - lvi_init->result()->divert_to(inner_add); + lvi_init.post->divert_to(inner_add); /* Nested inner loop */ auto inner_nested_theta = jlm::rvsdg::ThetaNode::create(inner_theta->subregion()); auto inner_nested_init = jlm::rvsdg::create_bitconstant(inner_theta->subregion(), 32, 0); - auto lvi_nested_init = inner_nested_theta->add_loopvar(inner_nested_init); - auto lvi_nested_step = inner_nested_theta->add_loopvar(lvi_step->argument()); - auto lvi_nested_end = inner_nested_theta->add_loopvar(lvi_end->argument()); + auto lvi_nested_init = inner_nested_theta->AddLoopVar(inner_nested_init); + auto lvi_nested_step = inner_nested_theta->AddLoopVar(lvi_step.pre); + auto lvi_nested_end = inner_nested_theta->AddLoopVar(lvi_end.pre); auto inner_nested_add = - jlm::rvsdg::bitadd_op::create(32, lvi_nested_init->argument(), lvi_nested_step->argument()); + jlm::rvsdg::bitadd_op::create(32, lvi_nested_init.pre, lvi_nested_step.pre); auto inner_nested_compare = - jlm::rvsdg::bitult_op::create(32, inner_nested_add, lvi_nested_end->argument()); + jlm::rvsdg::bitult_op::create(32, inner_nested_add, lvi_nested_end.pre); auto inner_nested_match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, inner_nested_compare); inner_nested_theta->set_predicate(inner_nested_match); - lvi_nested_init->result()->divert_to(inner_nested_add); + lvi_nested_init.post->divert_to(inner_nested_add); /* Second inner loop in the original loop */ auto inner2_theta = jlm::rvsdg::ThetaNode::create(otheta->subregion()); auto inner2_init = jlm::rvsdg::create_bitconstant(otheta->subregion(), 32, 0); - auto lvi2_init = inner2_theta->add_loopvar(inner2_init); - auto lvi2_step = inner2_theta->add_loopvar(lvo_step->argument()); - auto lvi2_end = inner2_theta->add_loopvar(lvo_end->argument()); + auto lvi2_init = inner2_theta->AddLoopVar(inner2_init); + auto lvi2_step = inner2_theta->AddLoopVar(lvo_step.pre); + auto lvi2_end = inner2_theta->AddLoopVar(lvo_end.pre); - auto inner2_add = jlm::rvsdg::bitadd_op::create(32, lvi2_init->argument(), lvi2_step->argument()); - auto inner2_compare = jlm::rvsdg::bitult_op::create(32, inner2_add, lvi2_end->argument()); + auto inner2_add = jlm::rvsdg::bitadd_op::create(32, lvi2_init.pre, lvi2_step.pre); + auto inner2_compare = jlm::rvsdg::bitult_op::create(32, inner2_add, lvi2_end.pre); auto inner2_match = jlm::rvsdg::match(1, { { 1, 1 } }, 0, 2, inner2_compare); inner2_theta->set_predicate(inner2_match); - lvi2_init->result()->divert_to(inner2_add); + lvi2_init.post->divert_to(inner2_add); // jlm::rvsdg::view(graph, stdout); jlm::llvm::loopunroll loopunroll(4); diff --git a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp index 4dc2d775d..d92e4599a 100644 --- a/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp +++ b/tests/jlm/mlir/backend/TestJlmToMlirConverter.cpp @@ -634,8 +634,8 @@ TestTheta() auto predicate = jlm::rvsdg::control_constant(rvsdgThetaNode->subregion(), 2, 0); - rvsdgThetaNode->add_loopvar(entryvar1); - rvsdgThetaNode->add_loopvar(entryvar2); + rvsdgThetaNode->AddLoopVar(entryvar1); + rvsdgThetaNode->AddLoopVar(entryvar2); rvsdgThetaNode->set_predicate(predicate); // Convert the RVSDG to MLIR diff --git a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp index 348bf174b..1d9348e6d 100644 --- a/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp +++ b/tests/jlm/mlir/frontend/TestMlirToJlmConverter.cpp @@ -998,7 +998,7 @@ TestThetaOp() std::cout << "Checking theta node" << std::endl; assert(thetaNode->ninputs() == 2); - assert(thetaNode->nloopvars() == 2); + assert(thetaNode->GetLoopVars().size() == 2); assert(thetaNode->noutputs() == 2); assert(thetaNode->nsubregions() == 1); assert(is(thetaNode->predicate()->type())); diff --git a/tests/jlm/rvsdg/test-theta.cpp b/tests/jlm/rvsdg/test-theta.cpp index e2b190122..674caef9f 100644 --- a/tests/jlm/rvsdg/test-theta.cpp +++ b/tests/jlm/rvsdg/test-theta.cpp @@ -24,13 +24,13 @@ TestThetaCreation() auto theta = jlm::rvsdg::ThetaNode::create(&graph.GetRootRegion()); - auto lv1 = theta->add_loopvar(imp1); - auto lv2 = theta->add_loopvar(imp2); - auto lv3 = theta->add_loopvar(imp3); + auto lv1 = theta->AddLoopVar(imp1); + auto lv2 = theta->AddLoopVar(imp2); + auto lv3 = theta->AddLoopVar(imp3); - lv2->result()->divert_to(lv3->argument()); - lv3->result()->divert_to(lv3->argument()); - theta->set_predicate(lv1->argument()); + lv2.post->divert_to(lv3.pre); + lv3.post->divert_to(lv3.pre); + theta->set_predicate(lv1.pre); jlm::tests::GraphExport::Create(*theta->output(0), "exp"); auto theta2 = static_cast(theta)->copy( @@ -38,13 +38,13 @@ TestThetaCreation() { imp1, imp2, imp3 }); jlm::rvsdg::view(&graph.GetRootRegion(), stdout); - assert(lv1->node() == theta); - assert(lv2->node() == theta); - assert(lv3->node() == theta); + assert(jlm::rvsdg::TryGetOwnerNode(*lv1.output) == theta); + assert(jlm::rvsdg::TryGetOwnerNode(*lv2.output) == theta); + assert(jlm::rvsdg::TryGetOwnerNode(*lv3.output) == theta); assert(theta->predicate() == theta->subregion()->result(0)); - assert(theta->nloopvars() == 3); - assert((*theta->begin())->result() == theta->subregion()->result(1)); + assert(theta->GetLoopVars().size() == 3); + assert(theta->GetLoopVars()[0].post == theta->subregion()->result(1)); assert(dynamic_cast(theta2)); } @@ -64,30 +64,30 @@ TestRemoveThetaOutputsWhere() auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); - auto thetaOutput0 = thetaNode->add_loopvar(ctl); - auto thetaOutput1 = thetaNode->add_loopvar(x); - auto thetaOutput2 = thetaNode->add_loopvar(y); - thetaNode->set_predicate(thetaOutput0->argument()); + auto thetaOutput0 = thetaNode->AddLoopVar(ctl); + auto thetaOutput1 = thetaNode->AddLoopVar(x); + auto thetaOutput2 = thetaNode->AddLoopVar(y); + thetaNode->set_predicate(thetaOutput0.pre); - jlm::tests::GraphExport::Create(*thetaOutput0, ""); + jlm::tests::GraphExport::Create(*thetaOutput0.output, ""); // Act & Assert auto deadInputs = thetaNode->RemoveThetaOutputsWhere( - [&](const ThetaOutput & output) + [&](const jlm::rvsdg::output & output) { - return output.index() == thetaOutput1->index(); + return output.index() == thetaOutput1.output->index(); }); assert(deadInputs.Size() == 1); assert(deadInputs.Contains(thetaNode->input(1))); assert(thetaNode->noutputs() == 2); assert(thetaNode->subregion()->nresults() == 3); - assert(thetaOutput0->index() == 0); - assert(thetaOutput0->result()->index() == 1); - assert(thetaOutput2->index() == 1); - assert(thetaOutput2->result()->index() == 2); + assert(thetaOutput0.output->index() == 0); + assert(thetaOutput0.post->index() == 1); + assert(thetaOutput2.output->index() == 1); + assert(thetaOutput2.post->index() == 2); deadInputs = thetaNode->RemoveThetaOutputsWhere( - [](const ThetaOutput &) + [](const jlm::rvsdg::output &) { return true; }); @@ -95,8 +95,8 @@ TestRemoveThetaOutputsWhere() assert(deadInputs.Contains(thetaNode->input(2))); assert(thetaNode->noutputs() == 1); assert(thetaNode->subregion()->nresults() == 2); - assert(thetaOutput0->index() == 0); - assert(thetaOutput0->result()->index() == 1); + assert(thetaOutput0.output->index() == 0); + assert(thetaOutput0.post->index() == 1); } static void @@ -114,12 +114,12 @@ TestPruneThetaOutputs() auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); - auto thetaOutput0 = thetaNode->add_loopvar(ctl); - thetaNode->add_loopvar(x); - thetaNode->add_loopvar(y); - thetaNode->set_predicate(thetaOutput0->argument()); + auto thetaOutput0 = thetaNode->AddLoopVar(ctl); + thetaNode->AddLoopVar(x); + thetaNode->AddLoopVar(y); + thetaNode->set_predicate(thetaOutput0.pre); - jlm::tests::GraphExport::Create(*thetaOutput0, ""); + jlm::tests::GraphExport::Create(*thetaOutput0.output, ""); // Act auto deadInputs = thetaNode->PruneThetaOutputs(); @@ -130,8 +130,8 @@ TestPruneThetaOutputs() assert(deadInputs.Contains(thetaNode->input(2))); assert(thetaNode->noutputs() == 1); assert(thetaNode->subregion()->nresults() == 2); - assert(thetaOutput0->index() == 0); - assert(thetaOutput0->result()->index() == 1); + assert(thetaOutput0.output->index() == 0); + assert(thetaOutput0.post->index() == 1); } static void @@ -149,45 +149,46 @@ TestRemoveThetaInputsWhere() auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); - auto thetaOutput0 = thetaNode->add_loopvar(ctl); - auto thetaOutput1 = thetaNode->add_loopvar(x); - auto thetaOutput2 = thetaNode->add_loopvar(y); - thetaNode->set_predicate(thetaOutput0->argument()); + auto thetaOutput0 = thetaNode->AddLoopVar(ctl); + auto thetaOutput1 = thetaNode->AddLoopVar(x); + auto thetaOutput2 = thetaNode->AddLoopVar(y); + thetaNode->set_predicate(thetaOutput0.pre); auto result = jlm::tests::SimpleNode::Create(*thetaNode->subregion(), {}, { valueType }).output(0); - thetaOutput1->result()->divert_to(result); - thetaOutput2->result()->divert_to(result); + thetaOutput1.post->divert_to(result); + thetaOutput2.post->divert_to(result); - jlm::tests::GraphExport::Create(*thetaOutput0, ""); + jlm::tests::GraphExport::Create(*thetaOutput0.output, ""); // Act & Assert auto deadOutputs = thetaNode->RemoveThetaInputsWhere( - [&](const ThetaInput & input) + [&](const jlm::rvsdg::input & input) { - return input.index() == thetaOutput1->input()->index(); + return input.index() == thetaOutput1.input->index(); }); assert(deadOutputs.Size() == 1); assert(deadOutputs.Contains(thetaNode->output(1))); assert(thetaNode->ninputs() == 2); assert(thetaNode->subregion()->narguments() == 2); - assert(thetaOutput0->input()->index() == 0); - assert(thetaOutput0->argument()->index() == 0); - assert(thetaOutput2->input()->index() == 1); - assert(thetaOutput2->argument()->index() == 1); + assert(thetaOutput0.input->index() == 0); + assert(thetaOutput0.pre->index() == 0); + assert(thetaOutput2.input->index() == 1); + assert(thetaOutput2.pre->index() == 1); + auto expectDeadOutput = thetaNode->output(2); deadOutputs = thetaNode->RemoveThetaInputsWhere( - [](const ThetaInput &) + [](const jlm::rvsdg::input & /* input */) { return true; }); assert(deadOutputs.Size() == 1); - assert(deadOutputs.Contains(thetaNode->output(2))); + assert(deadOutputs.Contains(expectDeadOutput)); assert(thetaNode->ninputs() == 1); assert(thetaNode->subregion()->narguments() == 1); - assert(thetaOutput0->input()->index() == 0); - assert(thetaOutput0->argument()->index() == 0); + assert(thetaOutput0.input->index() == 0); + assert(thetaOutput0.pre->index() == 0); } static void @@ -205,18 +206,18 @@ TestPruneThetaInputs() auto thetaNode = ThetaNode::create(&rvsdg.GetRootRegion()); - auto thetaOutput0 = thetaNode->add_loopvar(ctl); - auto thetaOutput1 = thetaNode->add_loopvar(x); - auto thetaOutput2 = thetaNode->add_loopvar(y); - thetaNode->set_predicate(thetaOutput0->argument()); + auto thetaOutput0 = thetaNode->AddLoopVar(ctl); + auto thetaOutput1 = thetaNode->AddLoopVar(x); + auto thetaOutput2 = thetaNode->AddLoopVar(y); + thetaNode->set_predicate(thetaOutput0.pre); auto result = jlm::tests::SimpleNode::Create(*thetaNode->subregion(), {}, { valueType }).output(0); - thetaOutput1->result()->divert_to(result); - thetaOutput2->result()->divert_to(result); + thetaOutput1.post->divert_to(result); + thetaOutput2.post->divert_to(result); - jlm::tests::GraphExport::Create(*thetaOutput0, ""); + jlm::tests::GraphExport::Create(*thetaOutput0.output, ""); // Act auto deadOutputs = thetaNode->PruneThetaInputs(); @@ -227,8 +228,8 @@ TestPruneThetaInputs() assert(deadOutputs.Contains(thetaNode->output(2))); assert(thetaNode->ninputs() == 1); assert(thetaNode->subregion()->narguments() == 1); - assert(thetaOutput0->input()->index() == 0); - assert(thetaOutput0->argument()->index() == 0); + assert(thetaOutput0.input->index() == 0); + assert(thetaOutput0.pre->index() == 0); } static int From acd463fb075bfa54848579705eb61c2ad51ee720 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felix=20Rei=C3=9Fmann?= Date: Tue, 7 Jan 2025 18:27:23 +0100 Subject: [PATCH 166/170] Generate indirect call nodes set only once per indirect call. (#706) `AddIndirectCallNodes` may be called multiple times for a single `indirectCall`. The assertion inside `CreateIndirectCallNodesSet` would then fail on subsequent calls, as the set was already created. This PR exchanges `CreateIndirectCallNodesSet` for `GetOrCreateIndirectCallNodesSet`, a pattern wich already exists for other sets, in order to avoid this issue. --- .../alias-analyses/TopDownMemoryNodeEliminator.cpp | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp index eede0c420..52041340a 100644 --- a/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp +++ b/jlm/llvm/opt/alias-analyses/TopDownMemoryNodeEliminator.cpp @@ -182,7 +182,7 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi const util::HashSet & memoryNodes) { JLM_ASSERT(CallNode::ClassifyCall(indirectCall)->IsIndirectCall()); - auto & set = CreateIndirectCallNodesSet(indirectCall); + auto & set = GetOrCreateIndirectCallNodesSet(indirectCall); set.UnionWith(memoryNodes); } @@ -251,10 +251,13 @@ class TopDownMemoryNodeEliminator::Provisioning final : public MemoryNodeProvisi } util::HashSet & - CreateIndirectCallNodesSet(const CallNode & indirectCall) + GetOrCreateIndirectCallNodesSet(const CallNode & indirectCall) { - JLM_ASSERT(!HasIndirectCallNodesSet(indirectCall)); - IndirectCallNodes_[&indirectCall] = {}; + if (!HasIndirectCallNodesSet(indirectCall)) + { + IndirectCallNodes_[&indirectCall] = {}; + } + return IndirectCallNodes_[&indirectCall]; } From 7a0ae398ddf6a112a12435b825eb7946896b0f17 Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Tue, 7 Jan 2025 21:23:58 +0100 Subject: [PATCH 167/170] Generate comment in PR from CI if cycles differ (#721) If the cycle count for one or more of the hls test suite benchmarks results in a different cycle count than expected, then a comment is generated that appears in the PR. --------- Co-authored-by: HKrogstie --- .github/workflows/hls.yml | 14 ++++++++++++++ scripts/run-hls-test.sh | 2 +- 2 files changed, 15 insertions(+), 1 deletion(-) diff --git a/.github/workflows/hls.yml b/.github/workflows/hls.yml index 72c4a85d7..d9890c58d 100644 --- a/.github/workflows/hls.yml +++ b/.github/workflows/hls.yml @@ -26,3 +26,17 @@ jobs: install-verilator: true - name: "Run hls-test-suite" run: ./scripts/run-hls-test.sh + - name: "Create comment if cycles differ" + if: ${{ hashFiles('./usr/hls-test-suite/build/cycle-diff.log') != '' }} + uses: actions/github-script@v7 + with: + github-token: ${{secrets.GITHUB_TOKEN}} + script: | + const fs = require("fs"); + const comment = fs.readFileSync("./usr/hls-test-suite/build/cycle-diff.log", { encoding: "utf8" }); + github.rest.issues.createComment({ + issue_number: context.issue.number, + owner: context.repo.owner, + repo: context.repo.repo, + body: "```\n" + comment + "```" + }); \ No newline at end of file diff --git a/scripts/run-hls-test.sh b/scripts/run-hls-test.sh index e6a6e226c..edfde5aad 100755 --- a/scripts/run-hls-test.sh +++ b/scripts/run-hls-test.sh @@ -3,7 +3,7 @@ set -eu # URL to the benchmark git repository and the commit to be used GIT_REPOSITORY=https://github.com/phate/hls-test-suite.git -GIT_COMMIT=8ff67e118ab25ce7cbbdc8adfefb19340c54ce83 +GIT_COMMIT=1365c30074f921733dec6c5fc949bd1cb64ae001 # Get the absolute path to this script and set default JLM paths SCRIPT_DIR="$(dirname "$(realpath "$0")")" From fd724ecd2c9c219c6c7d5c232df94c1c444f56d2 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Tue, 7 Jan 2025 21:48:08 +0100 Subject: [PATCH 168/170] Remove load normal form (#723) --- jlm/llvm/ir/operators/Load.cpp | 126 ---------------------- jlm/llvm/ir/operators/Load.hpp | 109 ------------------- tests/jlm/llvm/ir/operators/LoadTests.cpp | 35 ------ 3 files changed, 270 deletions(-) diff --git a/jlm/llvm/ir/operators/Load.cpp b/jlm/llvm/ir/operators/Load.cpp index 1ae3b98d7..8d36fd7ae 100644 --- a/jlm/llvm/ir/operators/Load.cpp +++ b/jlm/llvm/ir/operators/Load.cpp @@ -566,110 +566,6 @@ perform_load_load_state_reduction( return ld; } -load_normal_form::~load_normal_form() -{} - -load_normal_form::load_normal_form( - const std::type_info & opclass, - rvsdg::node_normal_form * parent, - rvsdg::Graph * graph) noexcept - : simple_normal_form(opclass, parent, graph), - enable_load_mux_(false), - enable_load_store_(false), - enable_load_alloca_(false), - enable_load_load_state_(false), - enable_multiple_origin_(false), - enable_load_store_state_(false) -{} - -bool -load_normal_form::normalize_node(rvsdg::Node * node) const -{ - JLM_ASSERT(is(node->GetOperation())); - auto op = static_cast(&node->GetOperation()); - auto operands = rvsdg::operands(node); - - if (!get_mutable()) - return true; - - if (get_load_mux_reducible() && is_load_mux_reducible(operands)) - { - divert_users(node, perform_load_mux_reduction(*op, operands)); - remove(node); - return false; - } - - if (get_load_store_reducible() && is_load_store_reducible(*op, operands)) - { - divert_users(node, perform_load_store_reduction(*op, operands)); - remove(node); - return false; - } - - if (get_load_alloca_reducible() && is_load_alloca_reducible(operands)) - { - divert_users(node, perform_load_alloca_reduction(*op, operands)); - remove(node); - return false; - } - - if (get_load_store_state_reducible() && is_load_store_state_reducible(*op, operands)) - { - divert_users(node, perform_load_store_state_reduction(*op, operands)); - remove(node); - return false; - } - - if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) - { - divert_users(node, perform_multiple_origin_reduction(*op, operands)); - remove(node); - return false; - } - - if (get_load_load_state_reducible() && is_load_load_state_reducible(operands)) - { - divert_users(node, perform_load_load_state_reduction(*op, operands)); - remove(node); - return false; - } - - return simple_normal_form::normalize_node(node); -} - -std::vector -load_normal_form::normalized_create( - rvsdg::Region * region, - const rvsdg::SimpleOperation & op, - const std::vector & operands) const -{ - JLM_ASSERT(is(op)); - auto lop = static_cast(&op); - - if (!get_mutable()) - return simple_normal_form::normalized_create(region, op, operands); - - if (get_load_mux_reducible() && is_load_mux_reducible(operands)) - return perform_load_mux_reduction(*lop, operands); - - if (get_load_store_reducible() && is_load_store_reducible(*lop, operands)) - return perform_load_store_reduction(*lop, operands); - - if (get_load_alloca_reducible() && is_load_alloca_reducible(operands)) - return perform_load_alloca_reduction(*lop, operands); - - if (get_load_store_state_reducible() && is_load_store_state_reducible(*lop, operands)) - return perform_load_store_state_reduction(*lop, operands); - - if (get_multiple_origin_reducible() && is_multiple_origin_reducible(operands)) - return perform_multiple_origin_reduction(*lop, operands); - - if (get_load_load_state_reducible() && is_load_load_state_reducible(operands)) - return perform_load_load_state_reduction(*lop, operands); - - return simple_normal_form::normalized_create(region, op, operands); -} - std::optional> NormalizeLoadMux( const LoadNonVolatileOperation & operation, @@ -737,25 +633,3 @@ NormalizeLoadLoadState( } } - -namespace -{ - -static jlm::rvsdg::node_normal_form * -create_load_normal_form( - const std::type_info & opclass, - jlm::rvsdg::node_normal_form * parent, - jlm::rvsdg::Graph * graph) -{ - return new jlm::llvm::load_normal_form(opclass, parent, graph); -} - -static void __attribute__((constructor)) -register_normal_form() -{ - jlm::rvsdg::node_normal_form::register_factory( - typeid(jlm::llvm::LoadNonVolatileOperation), - create_load_normal_form); -} - -} diff --git a/jlm/llvm/ir/operators/Load.hpp b/jlm/llvm/ir/operators/Load.hpp index d27554ada..7e533d0e2 100644 --- a/jlm/llvm/ir/operators/Load.hpp +++ b/jlm/llvm/ir/operators/Load.hpp @@ -17,108 +17,6 @@ namespace jlm::llvm { -/* load normal form */ - -class load_normal_form final : public rvsdg::simple_normal_form -{ -public: - virtual ~load_normal_form(); - - load_normal_form( - const std::type_info & opclass, - rvsdg::node_normal_form * parent, - rvsdg::Graph * graph) noexcept; - - virtual bool - normalize_node(rvsdg::Node * node) const override; - - virtual std::vector - normalized_create( - rvsdg::Region * region, - const rvsdg::SimpleOperation & op, - const std::vector & operands) const override; - - inline void - set_load_mux_reducible(bool enable) noexcept - { - enable_load_mux_ = enable; - } - - inline bool - get_load_mux_reducible() const noexcept - { - return enable_load_mux_; - } - - inline void - set_load_alloca_reducible(bool enable) noexcept - { - enable_load_alloca_ = enable; - } - - inline bool - get_load_alloca_reducible() const noexcept - { - return enable_load_alloca_; - } - - inline void - set_multiple_origin_reducible(bool enable) noexcept - { - enable_multiple_origin_ = enable; - } - - inline bool - get_multiple_origin_reducible() const noexcept - { - return enable_multiple_origin_; - } - - inline void - set_load_store_state_reducible(bool enable) noexcept - { - enable_load_store_state_ = enable; - } - - inline bool - get_load_store_state_reducible() const noexcept - { - return enable_load_store_state_; - } - - inline void - set_load_store_reducible(bool enable) noexcept - { - enable_load_store_ = enable; - } - - inline bool - get_load_store_reducible() const noexcept - { - return enable_load_store_; - } - - void - set_load_load_state_reducible(bool enable) noexcept - { - enable_load_load_state_ = enable; - } - - bool - get_load_load_state_reducible() const noexcept - { - return enable_load_load_state_; - } - -private: - bool enable_load_mux_; - bool enable_load_store_; - bool enable_load_alloca_; - bool enable_load_load_state_; - bool enable_multiple_origin_; - bool enable_load_store_state_; -}; - /** * Abstract base class for load operations. * @@ -463,13 +361,6 @@ class LoadNonVolatileOperation final : public LoadOperation [[nodiscard]] size_t NumMemoryStates() const noexcept override; - static load_normal_form * - GetNormalForm(rvsdg::Graph * graph) noexcept - { - return jlm::util::AssertedCast( - graph->GetNodeNormalForm(typeid(LoadNonVolatileOperation))); - } - static std::unique_ptr Create( const variable * address, diff --git a/tests/jlm/llvm/ir/operators/LoadTests.cpp b/tests/jlm/llvm/ir/operators/LoadTests.cpp index ec26c09fe..4d151c6dd 100644 --- a/tests/jlm/llvm/ir/operators/LoadTests.cpp +++ b/tests/jlm/llvm/ir/operators/LoadTests.cpp @@ -90,10 +90,6 @@ TestLoadAllocaReduction() auto bt = jlm::rvsdg::bittype::Create(32); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_alloca_reducible(false); - auto size = &jlm::tests::GraphImport::Create(graph, bt, "v"); auto alloca1 = alloca_op::create(bt, size, 4); @@ -137,10 +133,6 @@ LoadMuxReduction_Success() const auto bitstringType = jlm::rvsdg::bittype::Create(32); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_mux_reducible(false); - const auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); auto s1 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state1"); auto s2 = &jlm::tests::GraphImport::Create(graph, memoryStateType, "state2"); @@ -197,10 +189,6 @@ LoadMuxReduction_WrongNumberOfOperands() const auto mt = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_mux_reducible(false); - const auto a = &jlm::tests::GraphImport::Create(graph, pt, "a"); const auto s1 = &jlm::tests::GraphImport::Create(graph, mt, "s1"); const auto s2 = &jlm::tests::GraphImport::Create(graph, mt, "s2"); @@ -246,10 +234,6 @@ LoadMuxReduction_LoadWithoutStates() const auto pointerType = PointerType::Create(); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_mux_reducible(false); - const auto address = &jlm::tests::GraphImport::Create(graph, pointerType, "address"); auto & loadNode = LoadNonVolatileNode::CreateNode(*address, {}, valueType, 4); @@ -289,10 +273,6 @@ TestDuplicateStateReduction() const auto pointerType = PointerType::Create(); jlm::rvsdg::Graph graph; - const auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_multiple_origin_reducible(false); - const auto a = &jlm::tests::GraphImport::Create(graph, pointerType, "a"); auto s1 = &jlm::tests::GraphImport::Create(graph, memoryType, "s1"); auto s2 = &jlm::tests::GraphImport::Create(graph, memoryType, "s2"); @@ -345,10 +325,6 @@ TestLoadStoreStateReduction() auto bt = jlm::rvsdg::bittype::Create(32); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_store_state_reducible(false); - auto size = &jlm::tests::GraphImport::Create(graph, bt, "v"); auto alloca1 = alloca_op::create(bt, size, 4); @@ -402,10 +378,6 @@ TestLoadStoreReduction_Success() auto mt = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_store_reducible(false); - auto a = &jlm::tests::GraphImport::Create(graph, pt, "address"); auto v = &jlm::tests::GraphImport::Create(graph, vt, "value"); auto s = &jlm::tests::GraphImport::Create(graph, mt, "state"); @@ -451,10 +423,6 @@ LoadStoreReduction_DifferentValueOperandType() const auto memoryStateType = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - nf->set_load_store_reducible(false); - auto & address = jlm::tests::GraphImport::Create(graph, pointerType, "address"); auto & value = jlm::tests::GraphImport::Create(graph, jlm::rvsdg::bittype::Create(32), "value"); auto memoryState = &jlm::tests::GraphImport::Create(graph, memoryStateType, "memoryState"); @@ -506,9 +474,6 @@ TestLoadLoadReduction() auto mt = MemoryStateType::Create(); jlm::rvsdg::Graph graph; - auto nf = LoadNonVolatileOperation::GetNormalForm(&graph); - nf->set_mutable(false); - auto a1 = &jlm::tests::GraphImport::Create(graph, pt, "a1"); auto a2 = &jlm::tests::GraphImport::Create(graph, pt, "a2"); auto a3 = &jlm::tests::GraphImport::Create(graph, pt, "a3"); From 55160baae3a31948fb81f31ebea327ec9b277aae Mon Sep 17 00:00:00 2001 From: Magnus Sjalander Date: Wed, 8 Jan 2025 06:21:21 +0100 Subject: [PATCH 169/170] Avoid deprecated save-always for CI cache (#722) --- .github/actions/BuildMlirDialect/action.yml | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/.github/actions/BuildMlirDialect/action.yml b/.github/actions/BuildMlirDialect/action.yml index 2110878a2..3f259d77c 100644 --- a/.github/actions/BuildMlirDialect/action.yml +++ b/.github/actions/BuildMlirDialect/action.yml @@ -19,7 +19,6 @@ runs: id: cache-mlir uses: actions/cache@v4 with: - save-always: true path: | ${{ github.workspace }}/lib/mlir-rvsdg key: ${{ runner.os }}-mlir-${{ steps.get-mlir-hash.outputs.hash }} @@ -38,3 +37,11 @@ runs: --install-path ${{ github.workspace }}/lib/mlir-rvsdg shell: bash + - name: "Save MLIR to the cache" + if: steps.cache-mlir.outputs.cache-hit != 'true' + id: save-cache-circt + uses: actions/cache/save@v4 + with: + path: | + ${{ github.workspace }}/lib/mlir-rvsdg + key: ${{ runner.os }}-mlir-${{ steps.get-mlir-hash.outputs.hash }} From 14268ac5e147f1de86e2de2b9ee40cd433225748 Mon Sep 17 00:00:00 2001 From: Nico Reissmann Date: Wed, 8 Jan 2025 06:45:20 +0100 Subject: [PATCH 170/170] Add FlattenBitConcatOperation normalization (#724) --- jlm/rvsdg/bitstring/concat.cpp | 35 +++++++++++++++++++++++++ jlm/rvsdg/bitstring/concat.hpp | 5 ++++ tests/jlm/rvsdg/bitstring/bitstring.cpp | 11 +++++++- 3 files changed, 50 insertions(+), 1 deletion(-) diff --git a/jlm/rvsdg/bitstring/concat.cpp b/jlm/rvsdg/bitstring/concat.cpp index 156b0e201..4187078b2 100644 --- a/jlm/rvsdg/bitstring/concat.cpp +++ b/jlm/rvsdg/bitstring/concat.cpp @@ -368,4 +368,39 @@ bitconcat_op::copy() const return std::make_unique(*this); } +static std::vector> +GetTypesFromOperands(const std::vector & args) +{ + std::vector> types; + for (const auto arg : args) + { + types.push_back(std::dynamic_pointer_cast(arg->Type())); + } + return types; +} + +std::optional> +FlattenBitConcatOperation(const bitconcat_op &, const std::vector & operands) +{ + JLM_ASSERT(!operands.empty()); + + const auto newOperands = base::detail::associative_flatten( + operands, + [](jlm::rvsdg::output * arg) + { + // FIXME: switch to comparing operator, not just typeid, after + // converting "concat" to not be a binary operator anymore + return is(output::GetNode(*arg)); + }); + + if (operands == newOperands) + { + JLM_ASSERT(newOperands.size() == 2); + return std::nullopt; + } + + JLM_ASSERT(newOperands.size() > 2); + return outputs(&CreateOpNode(newOperands, GetTypesFromOperands(newOperands))); +} + } diff --git a/jlm/rvsdg/bitstring/concat.hpp b/jlm/rvsdg/bitstring/concat.hpp index ae5ccb20d..df1198350 100644 --- a/jlm/rvsdg/bitstring/concat.hpp +++ b/jlm/rvsdg/bitstring/concat.hpp @@ -55,6 +55,11 @@ class bitconcat_op final : public BinaryOperation jlm::rvsdg::output * bitconcat(const std::vector & operands); +std::optional> +FlattenBitConcatOperation( + const bitconcat_op & operation, + const std::vector & operands); + } #endif diff --git a/tests/jlm/rvsdg/bitstring/bitstring.cpp b/tests/jlm/rvsdg/bitstring/bitstring.cpp index f2f589c07..6736be078 100644 --- a/tests/jlm/rvsdg/bitstring/bitstring.cpp +++ b/tests/jlm/rvsdg/bitstring/bitstring.cpp @@ -8,6 +8,7 @@ #include #include +#include #include static int @@ -1176,8 +1177,10 @@ ConcatFlattening() { using namespace jlm::rvsdg; - // Arrange & Act + // Arrange Graph graph; + const auto nf = graph.GetNodeNormalForm(typeid(bitconcat_op)); + nf->set_mutable(false); auto x = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "x"); auto y = &jlm::tests::GraphImport::Create(graph, bittype::Create(8), "y"); @@ -1189,6 +1192,12 @@ ConcatFlattening() auto & ex = jlm::tests::GraphExport::Create(*concatResult2, "dummy"); view(graph, stdout); + // Act + const auto concatNode = output::GetNode(*ex.origin()); + ReduceNode(FlattenBitConcatOperation, *concatNode); + + view(graph, stdout); + // Assert auto node = output::GetNode(*ex.origin()); assert(dynamic_cast(&node->GetOperation()));