diff --git a/numba_rvsdg/core/datastructures/basic_block.py b/numba_rvsdg/core/datastructures/basic_block.py index 758d932..a89d16e 100644 --- a/numba_rvsdg/core/datastructures/basic_block.py +++ b/numba_rvsdg/core/datastructures/basic_block.py @@ -1,6 +1,6 @@ import dis -from typing import Tuple, Dict, List, Optional -from dataclasses import dataclass, replace, field +from typing import Dict, List, Optional +from dataclasses import dataclass, field from numba_rvsdg.core.utils import _next_inst_offset from numba_rvsdg.core.datastructures import block_names @@ -26,9 +26,12 @@ class BasicBlock: name: str - _jump_targets: Tuple[str, ...] = tuple() + _jump_targets: list[str] = field(default_factory=list) + backedges: list[str] = field(default_factory=list) - backedges: Tuple[str, ...] = tuple() + def __post_init__(self) -> None: + assert isinstance(self._jump_targets, list) + assert isinstance(self.backedges, list) @property def is_exiting(self) -> bool: @@ -58,7 +61,7 @@ def fallthrough(self) -> bool: return len(self._jump_targets) == 1 @property - def jump_targets(self) -> Tuple[str, ...]: + def jump_targets(self) -> list[str]: """Retrieves the jump targets for this block, excluding any jump targets that are also backedges. @@ -73,31 +76,22 @@ def jump_targets(self) -> Tuple[str, ...]: for j in self._jump_targets: if j not in self.backedges: acc.append(j) - return tuple(acc) + return acc - def declare_backedge(self, target: str) -> "BasicBlock": + def declare_backedge(self, target: str) -> None: """Declare one of the jump targets as a backedge of this block. Parameters ---------- target: str The jump target that is to be declared as a backedge. - - Returns - ------- - basic_block: BasicBlock - The resulting block. - """ if target in self.jump_targets: assert not self.backedges - return replace(self, backedges=(target,)) - return self + self.backedges.append(target) - def replace_jump_targets( - self, jump_targets: Tuple[str, ...] - ) -> "BasicBlock": - """Replaces jump targets of this block by the given tuple. + def replace_jump_targets(self, jump_targets: list[str]) -> None: + """Replaces jump targets of this block by the given list. This method replaces the jump targets of the current BasicBlock. The provided jump targets must be in the same order as their @@ -109,19 +103,14 @@ def replace_jump_targets( Parameters ---------- - jump_targets: Tuple - The new jump target tuple. Must be ordered. - - Returns - ------- - basic_block: BasicBlock - The resulting BasicBlock. - + jump_targets: List + The new jump target list. Must be ordered. """ - return replace(self, _jump_targets=jump_targets) + self._jump_targets.clear() + self._jump_targets.extend(jump_targets) - def replace_backedges(self, backedges: Tuple[str, ...]) -> "BasicBlock": - """Replaces back edges of this block by the given tuple. + def replace_backedges(self, backedges: list[str]) -> None: + """Replaces back edges of this block by the given list. This method replaces the back edges of the current BasicBlock. The provided back edges must be in the same order as their @@ -129,16 +118,11 @@ def replace_backedges(self, backedges: Tuple[str, ...]) -> "BasicBlock": Parameters ---------- - backedges: Tuple - The new back edges tuple. Must be ordered. - - Returns - ------- - basic_block: BasicBlock - The resulting BasicBlock. - + backedges: List + The new back edges list. Must be ordered. """ - return replace(self, backedges=backedges) + self.backedges.clear() + self.backedges.extend(backedges) @dataclass(frozen=True) @@ -271,10 +255,8 @@ class SyntheticBranch(SyntheticBlock): variable: str = "" branch_value_table: Dict[int, str] = field(default_factory=lambda: {}) - def replace_jump_targets( - self, jump_targets: Tuple[str, ...] - ) -> "BasicBlock": - """Replaces jump targets of this block by the given tuple. + def replace_jump_targets(self, jump_targets: list[str]) -> None: + """Replaces jump targets of this block by the given list. This method replaces the jump targets of the current BasicBlock. The provided jump targets must be in the same order as their @@ -287,18 +269,12 @@ def replace_jump_targets( Parameters ---------- - jump_targets: Tuple - The new jump target tuple. Must be ordered. - - Returns - ------- - basic_block: BasicBlock - The resulting BasicBlock. - + jump_targets: List + The new jump target list. Must be ordered. """ - old_branch_value_table = self.branch_value_table - new_branch_value_table = {} + old_branch_value_table = self.branch_value_table.copy() + self.branch_value_table.clear() for target in self._jump_targets: if target not in jump_targets: # ASSUMPTION: only one jump_target is being updated @@ -307,18 +283,15 @@ def replace_jump_targets( new_target = next(iter(diff)) for k, v in old_branch_value_table.items(): if v == target: - new_branch_value_table[k] = new_target + self.branch_value_table[k] = new_target else: # copy all old values for k, v in old_branch_value_table.items(): if v == target: - new_branch_value_table[k] = v + self.branch_value_table[k] = v - return replace( - self, - _jump_targets=jump_targets, - branch_value_table=new_branch_value_table, - ) + self._jump_targets.clear() + self._jump_targets.extend(jump_targets) @dataclass(frozen=True) diff --git a/numba_rvsdg/core/datastructures/byte_flow.py b/numba_rvsdg/core/datastructures/byte_flow.py index fb08a79..04e2381 100644 --- a/numba_rvsdg/core/datastructures/byte_flow.py +++ b/numba_rvsdg/core/datastructures/byte_flow.py @@ -1,5 +1,4 @@ import dis -from copy import deepcopy from dataclasses import dataclass from typing import Generator, Callable @@ -61,90 +60,51 @@ def from_bytecode(code: Callable) -> "ByteFlow": # type: ignore scfg = flowinfo.build_basicblocks() return ByteFlow(bc=bc, scfg=scfg) - def _join_returns(self) -> "ByteFlow": + def _join_returns(self) -> None: """Joins the return blocks within the corresponding SCFG. - This method creates a deep copy of the SCFG and performs - operation to join return blocks within the control flow. - It returns a new ByteFlow object with the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + This method performs operation to join return blocks within + the control flow. """ - scfg = deepcopy(self.scfg) - scfg.join_returns() - return ByteFlow(bc=self.bc, scfg=scfg) + self.scfg.join_returns() - def _restructure_loop(self) -> "ByteFlow": + def _restructure_loop(self) -> None: """Restructures the loops within the corresponding SCFG. - Creates a deep copy of the SCFG and performs the operation to - restructure loop constructs within the control flow using - the algorithm LOOP RESTRUCTURING from section 4.1 of Bahmann2015. + Performs the operation to restructure loop constructs within + the control flow using the algorithm LOOP RESTRUCTURING from + section 4.1 of Bahmann2015. It applies the restructuring operation to both the main SCFG - and any subregions within it. It returns a new ByteFlow object - with the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + and any subregions within it. """ - scfg = deepcopy(self.scfg) - restructure_loop(scfg.region) - for region in _iter_subregions(scfg): + restructure_loop(self.scfg.region) + for region in _iter_subregions(self.scfg): restructure_loop(region) - return ByteFlow(bc=self.bc, scfg=scfg) - def _restructure_branch(self) -> "ByteFlow": + def _restructure_branch(self) -> None: """Restructures the branches within the corresponding SCFG. - Creates a deep copy of the SCFG and performs the operation to - restructure branch constructs within the control flow. It applies - the restructuring operation to both the main SCFG and any - subregions within it. It returns a new ByteFlow object with - the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + This method applies restructuring branch operation to both + the main SCFG and any subregions within it. """ - scfg = deepcopy(self.scfg) - restructure_branch(scfg.region) - for region in _iter_subregions(scfg): + restructure_branch(self.scfg.region) + for region in _iter_subregions(self.scfg): restructure_branch(region) - return ByteFlow(bc=self.bc, scfg=scfg) - def restructure(self) -> "ByteFlow": + def restructure(self) -> None: """Applies join_returns, restructure_loop and restructure_branch in the respective order on the SCFG. - Creates a deep copy of the SCFG and applies a series of - restructuring operations to it. The operations include - joining return blocks, restructuring loop constructs, and - restructuring branch constructs. It returns a new ByteFlow - object with the updated SCFG. - - Returns - ------- - byteflow: ByteFlow - The new ByteFlow object with updated SCFG. + Applies a series of restructuring operations to given SCFG. + The operations include joining return blocks, restructuring + loop constructs, and restructuring branch constructs. """ - scfg = deepcopy(self.scfg) # close - scfg.join_returns() + self._join_returns() # handle loop - restructure_loop(scfg.region) - for region in _iter_subregions(scfg): - restructure_loop(region) + self._restructure_loop() # handle branch - restructure_branch(scfg.region) - for region in _iter_subregions(scfg): - restructure_branch(region) - return ByteFlow(bc=self.bc, scfg=scfg) + self._restructure_branch() def _iter_subregions(scfg: SCFG) -> Generator[RegionBlock, SCFG, None]: diff --git a/numba_rvsdg/core/datastructures/flow_info.py b/numba_rvsdg/core/datastructures/flow_info.py index a6af42d..4824761 100644 --- a/numba_rvsdg/core/datastructures/flow_info.py +++ b/numba_rvsdg/core/datastructures/flow_info.py @@ -126,19 +126,19 @@ def build_basicblocks( for begin, end in zip(offsets, [*offsets[1:], end_offset]): name = names[begin] - targets: Tuple[str, ...] + targets: list[str] term_offset = _prev_inst_offset(end) if term_offset not in self.jump_insts: # implicit jump - targets = (names[end],) + targets = [names[end]] else: - targets = tuple(names[o] for o in self.jump_insts[term_offset]) + targets = [names[o] for o in self.jump_insts[term_offset]] block = PythonBytecodeBlock( name=name, begin=begin, end=end, _jump_targets=targets, - backedges=(), + backedges=[], ) scfg.add_block(block) return scfg diff --git a/numba_rvsdg/core/datastructures/scfg.py b/numba_rvsdg/core/datastructures/scfg.py index 5b4d15e..5018266 100644 --- a/numba_rvsdg/core/datastructures/scfg.py +++ b/numba_rvsdg/core/datastructures/scfg.py @@ -524,17 +524,22 @@ def insert_block( block_type: SyntheticBlock The type/class of the newly created block. """ + # To prevent spurious mutations in successsors and predecessors + # since they are a part of the graph. + successors = successors.copy() + predecessors = predecessors.copy() + # TODO: needs a diagram and documentaion # initialize new block new_block = block_type( - name=new_name, _jump_targets=tuple(successors), backedges=tuple() + name=new_name, _jump_targets=successors, backedges=[] ) # add block to self self.add_block(new_block) # Replace any arcs from any of predecessors to any of successors with # an arc through the inserted block instead. for name in predecessors: - block = self.graph.pop(name) + block = self.graph[name] jt = list(block.jump_targets) if successors: for s in successors: @@ -545,7 +550,7 @@ def insert_block( jt.pop(jt.index(s)) else: jt.append(new_name) - self.add_block(block.replace_jump_targets(jump_targets=tuple(jt))) + block.replace_jump_targets(jump_targets=jt) def insert_SyntheticExit( self, @@ -639,8 +644,8 @@ def insert_block_and_control_blocks( variable_assignment[branch_variable] = branch_variable_value synth_assign_block = SyntheticAssignment( name=synth_assign, - _jump_targets=(new_name,), - backedges=(), + _jump_targets=[new_name], + backedges=[], variable_assignment=variable_assignment, ) # add block @@ -652,16 +657,12 @@ def insert_block_and_control_blocks( # replace previous successor with synth_assign jt[jt.index(s)] = synth_assign # finally, replace the jump_targets - self.add_block( - self.graph.pop(name).replace_jump_targets( - jump_targets=tuple(jt) - ) - ) + self.graph[name].replace_jump_targets(jump_targets=jt) # initialize new block, which will hold the branching table new_block = SyntheticHead( name=new_name, - _jump_targets=tuple(successors), - backedges=tuple(), + _jump_targets=successors, + backedges=[], variable=branch_variable, branch_value_table=branch_value_table, ) @@ -1109,30 +1110,35 @@ def reverse_lookup(value: type) -> str: raise TypeError("Block type not found.") seen = set() - q: Set[Tuple[str, BasicBlock]] = set() + q: Set[str] = set() # Order of elements doesn't matter since they're going to # be sorted at the end. - q.update(scfg.graph.items()) + graph_dict = {} + all_keys: set[str] = set(scfg.graph.keys()) + q.update(all_keys) + graph_dict.update(scfg.graph) while q: - key, value = q.pop() + key = q.pop() + value = scfg.graph[key] if key in seen: continue seen.add(key) - block_type = reverse_lookup(type(value)) + block_type: str = reverse_lookup(type(value)) blocks[key] = {"type": block_type} if isinstance(value, RegionBlock): - assert value.subregion is not None - assert value.parent_region is not None - q.update(value.subregion.graph.items()) + inner_graph = value.subregion.graph # type: ignore + q.update(inner_graph.keys()) + graph_dict.update(inner_graph) blocks[key]["kind"] = value.kind blocks[key]["contains"] = sorted( - [idx.name for idx in value.subregion.graph.values()] + [idx.name for idx in inner_graph.values()] ) blocks[key]["header"] = value.header blocks[key]["exiting"] = value.exiting - blocks[key]["parent_region"] = value.parent_region.name + parent_name = value.parent_region.name # type: ignore + blocks[key]["parent_region"] = parent_name elif isinstance(value, SyntheticBranch): blocks[key]["branch_value_table"] = value.branch_value_table blocks[key]["variable"] = value.variable @@ -1144,9 +1150,13 @@ def reverse_lookup(value: type) -> str: edges[key] = sorted([i for i in value._jump_targets]) backedges[key] = sorted([i for i in value.backedges]) - graph_dict = {"blocks": blocks, "edges": edges, "backedges": backedges} + graph_dict = { + "blocks": blocks, # type: ignore + "edges": edges, # type: ignore + "backedges": backedges, # type: ignore + } - return graph_dict + return graph_dict # type: ignore @staticmethod def find_outer_graph(graph_dict: Dict[str, Dict[str, Any]]) -> Set[str]: @@ -1181,7 +1191,7 @@ def extract_block_info( block_ref_dict: Dict[str, str], edges: Dict[str, List[str]], backedges: Dict[str, List[str]], - ) -> Tuple[Dict[str, Any], str, Tuple[str, ...], Tuple[str, ...]]: + ) -> Tuple[Dict[str, Any], str, list[str], list[str]]: """Helper method to extract information from various components of an `SCFG` graph. @@ -1211,14 +1221,14 @@ def extract_block_info( List of backedges of the requested block. """ block_info = blocks[current_name].copy() - block_edges = tuple(block_ref_dict[idx] for idx in edges[current_name]) + block_edges = [block_ref_dict[idx] for idx in edges[current_name]] if backedges.get(current_name): - block_backedges = tuple( + block_backedges = [ block_ref_dict[idx] for idx in backedges[current_name] - ) + ] else: - block_backedges = () + block_backedges = [] block_type = block_info.pop("type") diff --git a/numba_rvsdg/core/transformations.py b/numba_rvsdg/core/transformations.py index ab1cd13..6b48383 100644 --- a/numba_rvsdg/core/transformations.py +++ b/numba_rvsdg/core/transformations.py @@ -60,9 +60,7 @@ def loop_restructure_helper(scfg: SCFG, loop: Set[str]) -> None: and len(exiting_blocks) == 1 and backedge_blocks[0] == next(iter(exiting_blocks)) ): - scfg.add_block( - scfg.graph.pop(backedge_blocks[0]).declare_backedge(loop_head) - ) + scfg.graph[backedge_blocks[0]].declare_backedge(loop_head) return # The synthetic exiting latch and synthetic exit need to be created @@ -152,8 +150,8 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: # Create the actual control variable block synth_assign_block = SyntheticAssignment( name=synth_assign, - _jump_targets=(synth_exiting_latch,), - backedges=(), + _jump_targets=[synth_exiting_latch], + backedges=[], variable_assignment=variable_assignment, ) # Insert the assignment to the scfg @@ -183,20 +181,18 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: # that point to the headers, no need to add a backedge, # since it will be contained in the SyntheticExitingLatch # later on. - block = scfg.graph.pop(name) + block = scfg.graph[name] jts = list(block.jump_targets) for h in headers: if h in jts: jts.remove(h) - scfg.add_block( - block.replace_jump_targets(jump_targets=tuple(jts)) - ) + block.replace_jump_targets(jump_targets=jts) # Setup the assignment block and initialize it with the # correct jump_targets and variable assignment. synth_assign_block = SyntheticAssignment( name=synth_assign, - _jump_targets=(synth_exiting_latch,), - backedges=(), + _jump_targets=[synth_exiting_latch], + backedges=[], variable_assignment=variable_assignment, ) # Add the new block to the SCFG @@ -205,22 +201,19 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: new_jt[new_jt.index(jt)] = synth_assign # finally, replace the jump_targets for this block with the new # ones - scfg.add_block( - scfg.graph.pop(name).replace_jump_targets( - jump_targets=tuple(new_jt) - ) - ) + scfg.graph[name].replace_jump_targets(jump_targets=new_jt) + # Add any new blocks to the loop. loop.update(new_blocks) # Insert the exiting latch, add it to the loop and to the graph. synth_exiting_latch_block = SyntheticExitingLatch( name=synth_exiting_latch, - _jump_targets=( + _jump_targets=[ synth_exit if needs_synth_exit else next(iter(exit_blocks)), loop_head, - ), - backedges=(loop_head,), + ], + backedges=[loop_head], variable=backedge_variable, branch_value_table=backedge_value_table, ) @@ -231,8 +224,8 @@ def reverse_lookup(d: Mapping[int, str], value: str) -> int: if needs_synth_exit: synth_exit_block = SyntheticExitBranch( name=synth_exit, - _jump_targets=tuple(exit_blocks), - backedges=(), + _jump_targets=list(exit_blocks), + backedges=[], variable=exit_variable, branch_value_table=exit_value_table, ) @@ -331,33 +324,27 @@ def find_tail_blocks( def update_exiting( region_block: RegionBlock, new_region_header: str, new_region_name: str -) -> RegionBlock: +) -> None: # Recursively updates the exiting blocks of a regionblock region_exiting = region_block.exiting - assert region_block.subregion is not None - region_exiting_block: BasicBlock = region_block.subregion.graph.pop( + region_exiting_block: BasicBlock + region_exiting_block = region_block.subregion.graph[ # type: ignore region_exiting - ) + ] jt = list(region_exiting_block._jump_targets) for idx, s in enumerate(jt): if s is new_region_header: jt[idx] = new_region_name - region_exiting_block = region_exiting_block.replace_jump_targets( - jump_targets=tuple(jt) - ) + region_exiting_block.replace_jump_targets(jump_targets=jt) be = list(region_exiting_block.backedges) for idx, s in enumerate(be): if s is new_region_header: be[idx] = new_region_name - region_exiting_block = region_exiting_block.replace_backedges( - backedges=tuple(be) - ) + region_exiting_block.replace_backedges(backedges=be) if isinstance(region_exiting_block, RegionBlock): - region_exiting_block = update_exiting( + update_exiting( region_exiting_block, new_region_header, new_region_name ) - region_block.subregion.add_block(region_exiting_block) - return region_block def extract_region( @@ -390,27 +377,26 @@ def extract_region( # the SCFG represents should not be the meta region. assert scfg.region.kind != "meta" continue - entry = scfg.graph.pop(name) + entry = scfg.graph[name] jt = list(entry._jump_targets) for idx, s in enumerate(jt): if s is region_header: jt[idx] = region_name - entry = entry.replace_jump_targets(jump_targets=tuple(jt)) + entry.replace_jump_targets(jump_targets=jt) be = list(entry.backedges) for idx, s in enumerate(be): if s is region_header: be[idx] = region_name - entry = entry.replace_backedges(backedges=tuple(be)) + entry.replace_backedges(backedges=be) # If the entry itself is a region, update it's # exiting blocks too, recursively if isinstance(entry, RegionBlock): - entry = update_exiting(entry, region_header, region_name) - scfg.add_block(entry) + update_exiting(entry, region_header, region_name) region = RegionBlock( name=region_name, _jump_targets=scfg[region_exiting].jump_targets, - backedges=(), + backedges=[], kind=region_kind, header=region_header, subregion=head_subgraph, diff --git a/numba_rvsdg/rendering/rendering.py b/numba_rvsdg/rendering/rendering.py index a8c1ba0..2852e00 100644 --- a/numba_rvsdg/rendering/rendering.py +++ b/numba_rvsdg/rendering/rendering.py @@ -365,14 +365,14 @@ def render_flow(flow: ByteFlow) -> None: """ ByteFlowRenderer().render_byteflow(flow).view("before") - cflow = flow._join_returns() - ByteFlowRenderer().render_byteflow(cflow).view("closed") + flow._join_returns() + ByteFlowRenderer().render_byteflow(flow).view("closed") - lflow = cflow._restructure_loop() - ByteFlowRenderer().render_byteflow(lflow).view("loop restructured") + flow._restructure_loop() + ByteFlowRenderer().render_byteflow(flow).view("loop restructured") - bflow = lflow._restructure_branch() - ByteFlowRenderer().render_byteflow(bflow).view("branch restructured") + flow._restructure_branch() + ByteFlowRenderer().render_byteflow(flow).view("branch restructured") def render_scfg(scfg: SCFG) -> None: diff --git a/numba_rvsdg/tests/test_byteflow.py b/numba_rvsdg/tests/test_byteflow.py index 46aea65..1ed065e 100644 --- a/numba_rvsdg/tests/test_byteflow.py +++ b/numba_rvsdg/tests/test_byteflow.py @@ -114,16 +114,16 @@ def test_constructor(self): name=name_gen.new_block_name(block_names.PYTHON_BYTECODE), begin=0, end=8, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) self.assertEqual(block.name, "python_bytecode_block_0") self.assertEqual(block.begin, 0) self.assertEqual(block.end, 8) self.assertFalse(block.fallthrough) self.assertTrue(block.is_exiting) - self.assertEqual(block.jump_targets, ()) - self.assertEqual(block.backedges, ()) + self.assertEqual(block.jump_targets, []) + self.assertEqual(block.backedges, []) def test_is_jump_target(self): name_gen = NameGenerator() @@ -131,12 +131,12 @@ def test_is_jump_target(self): name=name_gen.new_block_name(block_names.PYTHON_BYTECODE), begin=0, end=8, - _jump_targets=( + _jump_targets=[ name_gen.new_block_name(block_names.PYTHON_BYTECODE), - ), - backedges=(), + ], + backedges=[], ) - self.assertEqual(block.jump_targets, ("python_bytecode_block_1",)) + self.assertEqual(block.jump_targets, ["python_bytecode_block_1"]) self.assertFalse(block.is_exiting) def test_get_instructions(self): @@ -145,8 +145,8 @@ def test_get_instructions(self): name=name_gen.new_block_name(block_names.PYTHON_BYTECODE), begin=0, end=8, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) expected = [ Instruction( @@ -242,8 +242,8 @@ def test_build_basic_blocks(self): name=new_name, begin=0, end=10, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) } ) @@ -266,8 +266,8 @@ def test_from_bytecode(self): name=new_name, begin=0, end=10, - _jump_targets=(), - backedges=(), + _jump_targets=[], + backedges=[], ) } ) diff --git a/numba_rvsdg/tests/test_figures.py b/numba_rvsdg/tests/test_figures.py index 3309c04..eb6f279 100644 --- a/numba_rvsdg/tests/test_figures.py +++ b/numba_rvsdg/tests/test_figures.py @@ -442,7 +442,7 @@ def test_figure_3(self): flow = FlowInfo.from_bytecode(bc) scfg = flow.build_basicblocks() byteflow = ByteFlow(bc=bc, scfg=scfg) - byteflow = byteflow.restructure() + byteflow.restructure() x, _ = SCFG.from_yaml(fig_3_yaml) self.assertSCFGEqual(x, byteflow.scfg) @@ -475,7 +475,7 @@ def test_figure_4(self): flow = FlowInfo.from_bytecode(bc) scfg = flow.build_basicblocks() byteflow = ByteFlow(bc=bc, scfg=scfg) - byteflow = byteflow.restructure() + byteflow.restructure() x, _ = SCFG.from_yaml(fig_4_yaml) self.assertSCFGEqual(x, byteflow.scfg) diff --git a/numba_rvsdg/tests/test_scfg.py b/numba_rvsdg/tests/test_scfg.py index 71e167e..f236c03 100644 --- a/numba_rvsdg/tests/test_scfg.py +++ b/numba_rvsdg/tests/test_scfg.py @@ -162,7 +162,7 @@ def test_scfg_iter(self): block_0 = name_gen.new_block_name(block_names.BASIC) block_1 = name_gen.new_block_name(block_names.BASIC) expected = [ - (block_0, BasicBlock(name=block_0, _jump_targets=(block_1,))), + (block_0, BasicBlock(name=block_0, _jump_targets=[block_1])), (block_1, BasicBlock(name=block_1)), ] scfg, _ = SCFG.from_yaml( @@ -194,17 +194,14 @@ def foo(n): def test_concealed_region_view_iter(self): flow = ByteFlow.from_bytecode(self.foo) - restructured = flow._restructure_loop() + flow._restructure_loop() expected = [ ("python_bytecode_block_0", PythonBytecodeBlock), ("loop_region_0", RegionBlock), ("python_bytecode_block_3", PythonBytecodeBlock), ] received = list( - ( - (k, type(v)) - for k, v in restructured.scfg.concealed_region_view.items() - ) + ((k, type(v)) for k, v in flow.scfg.concealed_region_view.items()) ) self.assertEqual(expected, received) diff --git a/numba_rvsdg/tests/test_simulate.py b/numba_rvsdg/tests/test_simulate.py index 9986e71..566442b 100644 --- a/numba_rvsdg/tests/test_simulate.py +++ b/numba_rvsdg/tests/test_simulate.py @@ -4,29 +4,6 @@ from numba_rvsdg.tests.simulator import Simulator import unittest -# flow = ByteFlow.from_bytecode(foo) -# #pprint(flow.scfg) -# flow = flow.restructure() -# #pprint(flow.scfg) -# # pprint(rtsflow.scfg) -# ByteFlowRenderer().render_byteflow(flow).view() -# print(dis(foo)) -# -# sim = Simulator(flow, foo.__globals__) -# ret = sim.run(dict(x=1)) -# assert ret == foo(x=1) -# -# #sim = Simulator(flow, foo.__globals__) -# #ret = sim.run(dict(x=100)) -# #assert ret == foo(x=100) - -# You can use the following snipppet to visually debug the restructured -# byteflow: -# -# ByteFlowRenderer().render_byteflow(flow).view() -# -# - class SimulatorTest(unittest.TestCase): def _run(self, func, flow, kwargs): @@ -44,7 +21,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # if case self._run(foo, flow, {"x": 1}) @@ -59,7 +36,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass case self._run(foo, flow, {"x": 0}) @@ -78,7 +55,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass case self._run(foo, flow, {"x": 0}) @@ -97,7 +74,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass case self._run(foo, flow, {"x": 0}) @@ -121,7 +98,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # no loop self._run(foo, flow, {"x": 0}) @@ -145,7 +122,7 @@ def foo(x): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # loop bypass self._run(foo, flow, {"x": 0}) @@ -161,7 +138,7 @@ def foo(x, y): return (x > 0 and x < 10) or (y > 0 and y < 10) flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() self._run(foo, flow, {"x": 5, "y": 5}) @@ -175,7 +152,7 @@ def foo(s, e): return c flow = ByteFlow.from_bytecode(foo) - flow = flow.restructure() + flow.restructure() # no looping self._run(foo, flow, {"s": 0, "e": 0}) diff --git a/numba_rvsdg/tests/test_transforms.py b/numba_rvsdg/tests/test_transforms.py index 746c7ac..c2e0055 100644 --- a/numba_rvsdg/tests/test_transforms.py +++ b/numba_rvsdg/tests/test_transforms.py @@ -40,7 +40,7 @@ def test_linear(self): expected_scfg, _ = SCFG.from_yaml(expected) new_name = original_scfg.name_gen.new_block_name(block_names.BASIC) original_scfg.insert_block( - new_name, (block_dict["0"],), (block_dict["1"],), BasicBlock + new_name, [block_dict["0"]], [block_dict["1"]], BasicBlock ) self.assertSCFGEqual(expected_scfg, original_scfg) @@ -81,8 +81,8 @@ def test_dual_predecessor(self): new_name = original_scfg.name_gen.new_block_name(block_names.BASIC) original_scfg.insert_block( new_name, - (block_dict["0"], block_dict["1"]), - (block_dict["2"],), + [block_dict["0"], block_dict["1"]], + [block_dict["2"]], BasicBlock, ) self.assertSCFGEqual( @@ -130,8 +130,8 @@ def test_dual_successor(self): expected_scfg, _ = SCFG.from_yaml(expected) original_scfg.insert_block( original_scfg.name_gen.new_block_name(block_names.BASIC), - (block_dict["0"],), - (block_dict["1"], block_dict["2"]), + [block_dict["0"]], + [block_dict["1"], block_dict["2"]], BasicBlock, ) self.assertSCFGEqual(expected_scfg, original_scfg) @@ -184,8 +184,8 @@ def test_dual_predecessor_and_dual_successor(self): expected_scfg, _ = SCFG.from_yaml(expected) original_scfg.insert_block( original_scfg.name_gen.new_block_name(block_names.BASIC), - (block_dict["1"], block_dict["2"]), - (block_dict["3"], block_dict["4"]), + [block_dict["1"], block_dict["2"]], + [block_dict["3"], block_dict["4"]], BasicBlock, ) self.assertSCFGEqual(expected_scfg, original_scfg) @@ -238,8 +238,8 @@ def test_dual_predecessor_and_dual_successor_with_additional_arcs(self): expected_scfg, expected_block_dict = SCFG.from_yaml(expected) original_scfg.insert_block( original_scfg.name_gen.new_block_name(block_names.BASIC), - (block_dict["1"], block_dict["2"]), - (block_dict["3"], block_dict["4"]), + [block_dict["1"], block_dict["2"]], + [block_dict["3"], block_dict["4"]], BasicBlock, ) self.assertSCFGEqual( @@ -318,8 +318,8 @@ def test_join_tails_and_exits_case_00(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["0"],) - exits = (block_dict["1"],) + tails = [block_dict["0"]] + exits = [block_dict["1"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -369,8 +369,8 @@ def test_join_tails_and_exits_case_01(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["0"],) - exits = (block_dict["1"], block_dict["2"]) + tails = [block_dict["0"]] + exits = [block_dict["1"], block_dict["2"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -423,8 +423,8 @@ def test_join_tails_and_exits_case_02_01(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"],) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -477,8 +477,8 @@ def test_join_tails_and_exits_case_02_02(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"],) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits @@ -546,8 +546,8 @@ def test_join_tails_and_exits_case_03_01(self): """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"], block_dict["4"]) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"], block_dict["4"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits ) @@ -616,8 +616,8 @@ def test_join_tails_and_exits_case_03_02(self): backedges: """ expected_scfg, _ = SCFG.from_yaml(expected) - tails = (block_dict["1"], block_dict["2"]) - exits = (block_dict["3"], block_dict["4"]) + tails = [block_dict["1"], block_dict["2"]] + exits = [block_dict["3"], block_dict["4"]] solo_tail_name, solo_exit_name = original_scfg.join_tails_and_exits( tails, exits )