From 0270359962d51c28ab5c412180f819e43d1e8ce3 Mon Sep 17 00:00:00 2001 From: ythoma Date: Fri, 5 Feb 2021 09:46:04 +0100 Subject: [PATCH 001/220] Update install.rst On Ubuntu 20.04, I had to install curl as well: sudo apt install curl I guess that would be the same on other setups. --- docs/source/install.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index db8ba14d..758efdaa 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -16,7 +16,7 @@ Installing prerequisites (this command is for Ubuntu 16.04): gawk tcl-dev libffi-dev git mercurial graphviz \ xdot pkg-config python python3 libftdi-dev gperf \ libboost-program-options-dev autoconf libgmp-dev \ - cmake + cmake curl Yosys, Yosys-SMTBMC and ABC --------------------------- From 5a04ac3fccc91b86e8b66e7fab31ff5c261e1c51 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Wed, 12 Jan 2022 10:48:32 +0100 Subject: [PATCH 002/220] use --witness option when calling pono --- sbysrc/sby_engine_btor.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index 15344d8f..bf9216b1 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -46,7 +46,7 @@ def run(mode, task, engine_idx, engine): elif solver_args[0] == "pono": if random_seed: task.error("Setting the random seed is not available for the pono solver.") - solver_cmd = task.exe_paths["pono"] + f" -v 1 -e bmc -k {task.opt_depth - 1}" + solver_cmd = task.exe_paths["pono"] + f" --witness -v 1 -e bmc -k {task.opt_depth - 1}" else: task.error(f"Invalid solver command {solver_args[0]}.") From ad07ea0e8590fb8fba523701c9e72b521fe0cf0c Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Wed, 12 Jan 2022 11:06:05 +0100 Subject: [PATCH 003/220] add testcase exposing #137 --- tests/multi_assert.sby | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 tests/multi_assert.sby diff --git a/tests/multi_assert.sby b/tests/multi_assert.sby new file mode 100644 index 00000000..818195f8 --- /dev/null +++ b/tests/multi_assert.sby @@ -0,0 +1,24 @@ +[tasks] +btormc +pono + +[options] +mode bmc +depth 5 +expect fail + +[engines] +btormc: btor btormc +pono: btor pono + +[script] +read_verilog -sv multi_assert.v +prep -top test + +[file multi_assert.v] +module test(); +always @* begin +assert (1); +assert (0); +end +endmodule From 257a57d8ed173538ea75e417b4474f69e93c3454 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Wed, 12 Jan 2022 13:18:54 +0100 Subject: [PATCH 004/220] create only a single bad when using pono solver; workaround for #137 --- sbysrc/sby_core.py | 1 + sbysrc/sby_engine_btor.py | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 372cc9b5..f05b31d6 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -449,6 +449,7 @@ def make_model(self, model_name): print("dffunmap", file=f) print("stat", file=f) print("write_btor {}-i design_{m}.info design_{m}.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f) + print("write_btor -s {}-i design_{m}_single.info design_{m}_single.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f) proc = SbyProc( self, diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index bf9216b1..7985b324 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -153,7 +153,7 @@ def output_callback(line): task, f"engine_{engine_idx}_{common_state.produced_cex}", task.model("btor"), - "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor.info model/design_btor.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix), + "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor{s}.info model/design_btor{s}.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix, s='_single' if solver_args[0] == 'pono' else ''), logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) proc2.output_callback = output_callback2 @@ -216,7 +216,7 @@ def exit_callback(retcode): proc = SbyProc( task, f"engine_{engine_idx}", task.model("btor"), - f"cd {task.workdir}; {solver_cmd} model/design_btor.btor", + f"cd {task.workdir}; {solver_cmd} model/design_btor{'_single' if solver_args[0]=='pono' else ''}.btor", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) From ab5ff32b31bf349e58204d97e89914b4677fc444 Mon Sep 17 00:00:00 2001 From: Miodrag Milanovic Date: Wed, 12 Jan 2022 14:18:17 +0100 Subject: [PATCH 005/220] Added CI --- .github/workflows/ci.yml | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 .github/workflows/ci.yml diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 00000000..9e055251 --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,12 @@ +name: ci +on: [push, pull_request] + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - uses: YosysHQ/setup-oss-cad-suite@v1 + - name: Run checks + run: make ci From cdf5650c12dc4bfc4036b5fdb7456610959358ce Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 7 Dec 2021 20:14:06 +0100 Subject: [PATCH 006/220] add JUnit schema and validator Signed-off-by: N. Engelhardt --- tests/JUnit.xsd | 212 ++++++++++++++++++++++++++++++++++++++++ tests/validate_junit.py | 19 ++++ 2 files changed, 231 insertions(+) create mode 100644 tests/JUnit.xsd create mode 100644 tests/validate_junit.py diff --git a/tests/JUnit.xsd b/tests/JUnit.xsd new file mode 100644 index 00000000..84b0f157 --- /dev/null +++ b/tests/JUnit.xsd @@ -0,0 +1,212 @@ + + + + JUnit test result schema for the Apache Ant JUnit and JUnitReport tasks +Copyright © 2011, Windy Road Technology Pty. Limited +The Apache Ant JUnit XML Schema is distributed under the terms of the Apache License Version 2.0 http://www.apache.org/licenses/ +Permission to waive conditions of this license may be requested from Windy Road Support (http://windyroad.org/support). + + + + + + + + + + Contains an aggregation of testsuite results + + + + + + + + + + Derived from testsuite/@name in the non-aggregated documents + + + + + Starts at '0' for the first testsuite and is incremented by 1 for each following testsuite + + + + + + + + + + + + Contains the results of exexuting a testsuite + + + + + Properties (e.g., environment settings) set during test execution + + + + + + + + + + + + + + + + + + + + + + + + + Indicates that the test errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. Contains as a text node relevant data for the error, e.g., a stack trace + + + + + + + The error message. e.g., if a java exception is thrown, the return value of getMessage() + + + + + The type of error that occured. e.g., if a java execption is thrown the full class name of the exception. + + + + + + + + + Indicates that the test failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals. Contains as a text node relevant data for the failure, e.g., a stack trace + + + + + + + The message specified in the assert + + + + + The type of the assert. + + + + + + + + + + Name of the test method + + + + + Full class name for the class the test method is in. + + + + + Time taken (in seconds) to execute the test + + + + + + + Data that was written to standard out while the test was executed + + + + + + + + + + Data that was written to standard error while the test was executed + + + + + + + + + + + Full class name of the test for non-aggregated testsuite documents. Class name without the package for aggregated testsuites documents + + + + + + + + + + when the test was executed. Timezone may not be specified. + + + + + Host on which the tests were executed. 'localhost' should be used if the hostname cannot be determined. + + + + + + + + + + The total number of tests in the suite + + + + + The total number of tests in the suite that failed. A failure is a test which the code has explicitly failed by using the mechanisms for that purpose. e.g., via an assertEquals + + + + + The total number of tests in the suite that errored. An errored test is one that had an unanticipated problem. e.g., an unchecked throwable; or a problem with the implementation of the test. + + + + + The total number of ignored or skipped tests in the suite. + + + + + Time taken (in seconds) to execute the tests in the suite + + + + + + + + + diff --git a/tests/validate_junit.py b/tests/validate_junit.py new file mode 100644 index 00000000..c1c0573f --- /dev/null +++ b/tests/validate_junit.py @@ -0,0 +1,19 @@ +from xmlschema import XMLSchema, XMLSchemaValidationError +import argparse + +def main(): + parser = argparse.ArgumentParser(description="Validate JUnit output") + parser.add_argument('xml') + parser.add_argument('--xsd', default="JUnit.xsd") + + args = parser.parse_args() + + schema = XMLSchema(args.xsd) + try: + schema.validate(args.xml) + except XMLSchemaValidationError as e: + print(e) + exit(1) + +if __name__ == '__main__': + main() From 6ec2df34e36c69916a5da2f67c464eff85624590 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 7 Dec 2021 20:16:15 +0100 Subject: [PATCH 007/220] WIP change junit print to conform to schema; needs additional data, currently printing dummy info Signed-off-by: N. Engelhardt --- sbysrc/sby.py | 51 +++++++++++++++++++++++++++++++++++++-------------- 1 file changed, 37 insertions(+), 14 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 58f02d80..c720b223 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -20,7 +20,7 @@ import argparse, os, sys, shutil, tempfile, re ##yosys-sys-path## from sby_core import SbyTask, SbyAbort, process_filename -from time import localtime +import time class DictAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): @@ -156,7 +156,7 @@ def __call__(self, parser, namespace, values, option_string=None): early_logmsgs = list() def early_log(workdir, msg): - tm = localtime() + tm = time.localtime() early_logmsgs.append("SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, workdir, msg)) print(early_logmsgs[-1]) @@ -455,24 +455,47 @@ def run_task(taskname): if not my_opt_tmpdir and not setupmode: with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: + # TODO: create necessary data + # checks: collection of assert/cover statements active in task + # elements: dicts with entries 'type', 'hierarchy', 'location', 'status', 'tracefile' + checks = [ #for testing purposes + {'type':'assert', 'hierarchy':'top.dut.submod1', 'location':'test.v:404', 'status':'unknown', 'tracefile':'/home/user/path/task/engine_0/trace0.vcd'}, + {'type':'assert', 'hierarchy':'top.dut.submod1', 'location':'test.v:412', 'status':'fail', 'tracefile':'/home/user/path/task/engine_0/trace1.vcd'}, + {'type':'cover', 'hierarchy':'top.dut.submod2', 'location':'test3.v:42', 'status':'pass', 'tracefile':'/home/user/path/task/engine_1/trace0.vcd'}, + {'type':'cover', 'hierarchy':'top.dut.submod2', 'location':'test3.v:666', 'status':'error', 'tracefile':'/home/user/path/task/engine_1/trace1.vcd'} + ] + junit_tests = len(checks) junit_errors = 1 if task.retcode == 16 else 0 junit_failures = 1 if task.retcode != 0 and junit_errors == 0 else 0 - print('', file=f) - print(f'', file=f) - print(f'', file=f) - print('', file=f) + junit_type = "cover" if task.opt_mode == "cover" else "assert" #should this be here or individual for each check? + junit_time = time.strftime('%Y-%m-%dT%H:%M:%S') + print(f'', file=f) + print(f'', file=f) + #TODO: check with Micko if os.uname().nodename is sane enough in most places + print(f'', file=f) + print(f'', file=f) print(f'', file=f) - print('', file=f) - print(f'', file=f) - if junit_errors: - print(f'', file=f) - if junit_failures: - print(f'', file=f) + print(f'', file=f) + for check in checks: + print(f'', file=f) # name required + if check["status"] == "unknown": + print(f'', file=f) + elif check["status"] == "fail": + print(f'', file=f) + elif check["status"] == "error": + print(f'', file=f) # type mandatory, message optional + print(f'', file=f) print('', end="", file=f) with open(f"{task.workdir}/logfile.txt", "r") as logf: for line in logf: print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f) - print('', file=f) + print('', file=f) + print('', file=f) + #TODO: can we handle errors and still output this file? + print('', file=f) + print(f'', file=f) + print(f'', file=f) + with open(f"{task.workdir}/status", "w") as f: print(f"{task.status} {task.retcode} {task.total_time}", file=f) @@ -488,7 +511,7 @@ def run_task(taskname): failed.append(task) if failed and (len(tasknames) > 1 or tasknames[0] is not None): - tm = localtime() + tm = time.localtime() print("SBY {:2d}:{:02d}:{:02d} The following tasks failed: {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, failed)) sys.exit(retcode) From 7f3c4137c10c79c671a8f20b8d6b20a16487beee Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Wed, 19 Jan 2022 19:34:11 +0100 Subject: [PATCH 008/220] create json export and read in properties --- sbysrc/sby.py | 26 +++++------ sbysrc/sby_core.py | 12 +++++ sbysrc/sby_design.py | 102 +++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 125 insertions(+), 15 deletions(-) create mode 100644 sbysrc/sby_design.py diff --git a/sbysrc/sby.py b/sbysrc/sby.py index c720b223..3d2f583e 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -455,18 +455,14 @@ def run_task(taskname): if not my_opt_tmpdir and not setupmode: with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: - # TODO: create necessary data - # checks: collection of assert/cover statements active in task - # elements: dicts with entries 'type', 'hierarchy', 'location', 'status', 'tracefile' - checks = [ #for testing purposes - {'type':'assert', 'hierarchy':'top.dut.submod1', 'location':'test.v:404', 'status':'unknown', 'tracefile':'/home/user/path/task/engine_0/trace0.vcd'}, - {'type':'assert', 'hierarchy':'top.dut.submod1', 'location':'test.v:412', 'status':'fail', 'tracefile':'/home/user/path/task/engine_0/trace1.vcd'}, - {'type':'cover', 'hierarchy':'top.dut.submod2', 'location':'test3.v:42', 'status':'pass', 'tracefile':'/home/user/path/task/engine_1/trace0.vcd'}, - {'type':'cover', 'hierarchy':'top.dut.submod2', 'location':'test3.v:666', 'status':'error', 'tracefile':'/home/user/path/task/engine_1/trace1.vcd'} - ] + checks = task.design_hierarchy.get_property_list() junit_tests = len(checks) junit_errors = 1 if task.retcode == 16 else 0 - junit_failures = 1 if task.retcode != 0 and junit_errors == 0 else 0 + junit_failures = 0 + if task.retcode != 0 and junit_errors == 0: + for check in checks: + if check.status == "FAIL": + junit_failures += 1 junit_type = "cover" if task.opt_mode == "cover" else "assert" #should this be here or individual for each check? junit_time = time.strftime('%Y-%m-%dT%H:%M:%S') print(f'', file=f) @@ -478,12 +474,12 @@ def run_task(taskname): print(f'', file=f) for check in checks: print(f'', file=f) # name required - if check["status"] == "unknown": + if check.status == "UNKNOWN": print(f'', file=f) - elif check["status"] == "fail": - print(f'', file=f) - elif check["status"] == "error": - print(f'', file=f) # type mandatory, message optional + elif check.status == "FAIL": + print(f'', file=f) + elif check.status == "ERROR": + print(f'', file=f) # type mandatory, message optional print(f'', file=f) print('', end="", file=f) with open(f"{task.workdir}/logfile.txt", "r") as logf: diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index f05b31d6..840f2152 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -23,6 +23,7 @@ from shutil import copyfile, copytree, rmtree from select import select from time import time, localtime, sleep +from sby_design import SbyProperty, SbyModule, design_hierarchy all_procs_running = [] @@ -222,6 +223,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir): self.status = "UNKNOWN" self.total_time = 0 self.expect = [] + self.design_hierarchy = None yosys_program_prefix = "" ##yosys-program-prefix## self.exe_paths = { @@ -390,6 +392,8 @@ def make_model(self, model_name): print("opt -keepdc -fast", file=f) print("check", file=f) print("hierarchy -simcheck", file=f) + # FIXME: can using design and design_nomem in the same task happen? + print(f"""write_json ../model/design{"" if model_name == "base" else "_nomem"}.json""", file=f) print(f"""write_ilang ../model/design{"" if model_name == "base" else "_nomem"}.il""", file=f) proc = SbyProc( @@ -401,6 +405,14 @@ def make_model(self, model_name): ) proc.checkretcode = True + def instance_hierarchy_callback(retcode): + assert retcode == 0 + assert self.design_hierarchy == None # verify this assumption + with open(f"""{self.workdir}/model/design{"" if model_name == "base" else "_nomem"}.json""") as f: + self.design_hierarchy = design_hierarchy(f) + + proc.exit_callback = instance_hierarchy_callback + return [proc] if re.match(r"^smt2(_syn)?(_nomem)?(_stbv|_stdt)?$", model_name): diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py new file mode 100644 index 00000000..79c4dddc --- /dev/null +++ b/sbysrc/sby_design.py @@ -0,0 +1,102 @@ +# +# SymbiYosys (sby) -- Front-end for Yosys-based formal verification flows +# +# Copyright (C) 2022 N. Engelhardt +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# + +import json +from enum import Enum, auto +from dataclasses import dataclass, field + +@dataclass +class SbyProperty: + class Type(Enum): + ASSUME = auto() + ASSERT = auto() + COVER = auto() + LIVE = auto() + + def __repr__(self): + return self.name + + @classmethod + def from_cell(c, name): + if name == "$assume": + return c.ASSUME + if name == "$assert": + return c.ASSERT + if name == "$cover": + return c.COVER + if name == "$live": + return c.LIVE + raise ValueError("Unknown property type: " + name) + + name: str + type: Type + location: str + hierarchy: str + status: str = field(default="UNKNOWN") + tracefile: str = field(default="") + +@dataclass +class SbyModule: + name: str + type: str + submodules: dict = field(default_factory=dict) + properties: list = field(default_factory=list) + + def get_property_list(self): + l = list() + l.extend(self.properties) + for submod in self.submodules: + l.extend(submod.get_property_list()) + return l + +def design_hierarchy(filename): + design_json = json.load(filename) + def make_mod_hier(instance_name, module_name, hierarchy=""): + # print(instance_name,":", module_name) + mod = SbyModule(name=instance_name, type=module_name) + + cells = design_json["modules"][module_name]["cells"] + for cell_name, cell in cells.items(): + if cell["type"][0] != '$': + mod.submodules[cell_name] = make_mod_hier(cell_name, cell["type"], hierarchy=f"{hierarchy}/{instance_name}") + if cell["type"] in ["$assume", "$assert", "$cover", "$live"]: + try: + location = cell["attributes"]["src"] + except KeyError: + location = "" + p = SbyProperty(name=cell_name, type=SbyProperty.Type.from_cell(cell["type"]), location=location, hierarchy=f"{hierarchy}/{instance_name}") + mod.properties.append(p) + return mod + + for module_name in design_json["modules"]: + attrs = design_json["modules"][module_name]["attributes"] + if "top" in attrs and int(attrs["top"]) == 1: + hierarchy = make_mod_hier(module_name, module_name) + return hierarchy + else: + raise ValueError("Cannot find top module") + +def main(): + import sys + if len(sys.argv) != 2: + print(f"""Usage: {sys.argv[0]} design.json""") + with open(sys.argv[1]) as f: + print(design_hierarchy(f)) + +if __name__ == '__main__': + main() From a9d1972c47a158fc139847b7661592e1a25300cb Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Fri, 21 Jan 2022 15:18:53 +0100 Subject: [PATCH 009/220] add fallback if solver can't tell which property fails --- sbysrc/sby.py | 32 +++++++++++++++++++++++--------- 1 file changed, 23 insertions(+), 9 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 3d2f583e..f3b78eaa 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -458,11 +458,15 @@ def run_task(taskname): checks = task.design_hierarchy.get_property_list() junit_tests = len(checks) junit_errors = 1 if task.retcode == 16 else 0 + solver_gives_line = task.status == "FAIL" and any(check.status != "UNKNOWN" for check in checks) junit_failures = 0 - if task.retcode != 0 and junit_errors == 0: - for check in checks: - if check.status == "FAIL": - junit_failures += 1 + if junit_errors == 0 and task.retcode != 0: + if solver_gives_line: + for check in checks: + if check.status == "FAIL": + junit_failures += 1 + else: + junit_failures = 1 junit_type = "cover" if task.opt_mode == "cover" else "assert" #should this be here or individual for each check? junit_time = time.strftime('%Y-%m-%dT%H:%M:%S') print(f'', file=f) @@ -472,13 +476,23 @@ def run_task(taskname): print(f'', file=f) print(f'', file=f) print(f'', file=f) - for check in checks: + if solver_gives_line: + for check in checks: + print(f'', file=f) # name required + if check.status == "UNKNOWN": + print(f'', file=f) + elif check.status == "FAIL": + print(f'', file=f) + elif check.status == "ERROR": + print(f'', file=f) # type mandatory, message optional + print(f'', file=f) + else: print(f'', file=f) # name required - if check.status == "UNKNOWN": + if task.status == "UNKNOWN": print(f'', file=f) - elif check.status == "FAIL": - print(f'', file=f) - elif check.status == "ERROR": + elif task.status == "FAIL": + print(f'', file=f) + elif task.status == "ERROR": print(f'', file=f) # type mandatory, message optional print(f'', file=f) print('', end="", file=f) From 1cf27e7c315daa6019a9a6398a9827cfddf9c84b Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Thu, 27 Jan 2022 13:41:07 +0100 Subject: [PATCH 010/220] parse solver location output for assert failures (cover not functional yet) --- sbysrc/sby.py | 2 +- sbysrc/sby_core.py | 14 ++++++++------ sbysrc/sby_design.py | 29 +++++++++++++++++++++++++++-- sbysrc/sby_engine_smtbmc.py | 25 +++++++++++++++++++++++++ tests/submod_props.sby | 30 ++++++++++++++++++++++++++++++ 5 files changed, 91 insertions(+), 9 deletions(-) create mode 100644 tests/submod_props.sby diff --git a/sbysrc/sby.py b/sbysrc/sby.py index f3b78eaa..8d6cfc9e 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -478,7 +478,7 @@ def run_task(taskname): print(f'', file=f) if solver_gives_line: for check in checks: - print(f'', file=f) # name required + print(f'', file=f) # name required if check.status == "UNKNOWN": print(f'', file=f) elif check.status == "FAIL": diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 840f2152..ec4e0a10 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -222,8 +222,9 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir): self.reusedir = reusedir self.status = "UNKNOWN" self.total_time = 0 - self.expect = [] + self.expect = list() self.design_hierarchy = None + self.precise_prop_status = False yosys_program_prefix = "" ##yosys-program-prefix## self.exe_paths = { @@ -371,6 +372,9 @@ def make_model(self, model_name): print(f"# running in {self.workdir}/src/", file=f) for cmd in self.script: print(cmd, file=f) + # assumptions at this point: hierarchy has been run and a top module has been designated + print("hierarchy -simcheck", file=f) + print(f"""write_json ../model/design.json""", file=f) if model_name == "base": print("memory_nordff", file=f) else: @@ -392,8 +396,6 @@ def make_model(self, model_name): print("opt -keepdc -fast", file=f) print("check", file=f) print("hierarchy -simcheck", file=f) - # FIXME: can using design and design_nomem in the same task happen? - print(f"""write_json ../model/design{"" if model_name == "base" else "_nomem"}.json""", file=f) print(f"""write_ilang ../model/design{"" if model_name == "base" else "_nomem"}.il""", file=f) proc = SbyProc( @@ -407,9 +409,9 @@ def make_model(self, model_name): def instance_hierarchy_callback(retcode): assert retcode == 0 - assert self.design_hierarchy == None # verify this assumption - with open(f"""{self.workdir}/model/design{"" if model_name == "base" else "_nomem"}.json""") as f: - self.design_hierarchy = design_hierarchy(f) + if self.design_hierarchy == None: + with open(f"""{self.workdir}/model/design{"" if model_name == "base" else "_nomem"}.json""") as f: + self.design_hierarchy = design_hierarchy(f) proc.exit_callback = instance_hierarchy_callback diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 79c4dddc..7551ac71 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -28,7 +28,7 @@ class Type(Enum): COVER = auto() LIVE = auto() - def __repr__(self): + def __str__(self): return self.name @classmethod @@ -50,6 +50,9 @@ def from_cell(c, name): status: str = field(default="UNKNOWN") tracefile: str = field(default="") + def __repr__(self): + return f"SbyProperty<{self.type} {self.name} at {self.location}: status={self.status}, tracefile=\"{self.tracefile}\"" + @dataclass class SbyModule: name: str @@ -57,13 +60,35 @@ class SbyModule: submodules: dict = field(default_factory=dict) properties: list = field(default_factory=list) + def __repr__(self): + return f"SbyModule<{self.name} : {self.type}, submodules={self.submodules}, properties={self.properties}>" + def get_property_list(self): l = list() l.extend(self.properties) - for submod in self.submodules: + for submod in self.submodules.values(): l.extend(submod.get_property_list()) return l + def find_property(self, hierarchy, location): + # FIXME: use that RE that works with escaped paths from https://stackoverflow.com/questions/46207665/regex-pattern-to-split-verilog-path-in-different-instances-using-python + path = hierarchy.split('.') + mod = path.pop(0) + if self.name != mod: + raise ValueError(f"{self.name} is not the first module in hierarchical path {hierarchy}.") + try: + mod_hier = self + while path: + mod = path.pop(0) + mod_hier = mod_hier.submodules[mod] + except KeyError: + raise KeyError(f"Could not find {hierarchy} in design hierarchy!") + try: + prop = next(p for p in mod_hier.properties if location in p.location) + except StopIteration: + raise KeyError(f"Could not find assert at {location} in properties list!") + return prop + def design_hierarchy(filename): design_json = json.load(filename) def make_mod_hier(instance_name, module_name, hierarchy=""): diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index da2e31c1..b736e312 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -32,6 +32,7 @@ def run(mode, task, engine_idx, engine): basecase_only = False induction_only = False random_seed = None + task.precise_prop_status = True opts, args = getopt.getopt(engine[1:], "", ["nomem", "syn", "stbv", "stdt", "presat", "nopresat", "unroll", "nounroll", "dumpsmt2", "progress", "basecase", "induction", "seed="]) @@ -154,9 +155,11 @@ def run(mode, task, engine_idx, engine): task.induction_procs.append(proc) proc_status = None + last_prop = None def output_callback(line): nonlocal proc_status + nonlocal last_prop match = re.match(r"^## [0-9: ]+ Status: FAILED", line) if match: @@ -178,6 +181,28 @@ def output_callback(line): proc_status = "ERROR" return line + match = re.match(r"^## [0-9: ]+ Assert failed in (([a-z_][a-z0-9$_]*|\\\S*|\.)+): (\S*).*", line) + if match: + module_path = match[1] + location = match[3] + try: + prop = task.design_hierarchy.find_property(module_path, location) + except KeyError as e: + task.precise_prop_status = False + return line + f" (Warning: {str(e)})" + prop.status = "FAIL" + last_prop = prop + return line + + match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) in step \d+.", line) + if match: + location = match[1] + + match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) + if match and last_prop: + last_prop.tracefile = match[1] + last_prop = None + return line def exit_callback(retcode): diff --git a/tests/submod_props.sby b/tests/submod_props.sby new file mode 100644 index 00000000..33a3a006 --- /dev/null +++ b/tests/submod_props.sby @@ -0,0 +1,30 @@ +[tasks] +bmc +cover + +[options] +bmc: mode bmc +cover: mode cover + +expect fail + +[engines] +smtbmc boolector + +[script] +read_verilog -sv test.sv +prep -top top + +[file test.sv] +module test(input foo); +always @* assert(foo); +always @* assert(!foo); +always @* cover(foo); +always @* cover(!foo); +endmodule + +module top(); +test test_i ( +.foo(1'b1) +); +endmodule From d7e7f2c530cd88b05793af8a15d615a8b61a860b Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 31 Jan 2022 12:35:56 +0100 Subject: [PATCH 011/220] refactor model to have single base --- sbysrc/sby_core.py | 77 ++++++++++++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 34 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ec4e0a10..dde634bb 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -367,50 +367,47 @@ def make_model(self, model_name): if not os.path.isdir(f"{self.workdir}/model"): os.makedirs(f"{self.workdir}/model") - if model_name in ["base", "nomem"]: - with open(f"""{self.workdir}/model/design{"" if model_name == "base" else "_nomem"}.ys""", "w") as f: + def print_common_prep(): + if self.opt_multiclock: + print("clk2fflogic", file=f) + else: + print("async2sync", file=f) + print("chformal -assume -early", file=f) + if self.opt_mode in ["bmc", "prove"]: + print("chformal -live -fair -cover -remove", file=f) + if self.opt_mode == "cover": + print("chformal -live -fair -remove", file=f) + if self.opt_mode == "live": + print("chformal -assert2assume", file=f) + print("chformal -cover -remove", file=f) + print("opt_clean", file=f) + print("setundef -anyseq", file=f) + print("opt -keepdc -fast", file=f) + print("check", file=f) + print("hierarchy -simcheck", file=f) + + if model_name == "base": + with open(f"""{self.workdir}/model/design.ys""", "w") as f: print(f"# running in {self.workdir}/src/", file=f) for cmd in self.script: print(cmd, file=f) - # assumptions at this point: hierarchy has been run and a top module has been designated + # the user must designate a top module in [script] print("hierarchy -simcheck", file=f) print(f"""write_json ../model/design.json""", file=f) - if model_name == "base": - print("memory_nordff", file=f) - else: - print("memory_map", file=f) - if self.opt_multiclock: - print("clk2fflogic", file=f) - else: - print("async2sync", file=f) - print("chformal -assume -early", file=f) - if self.opt_mode in ["bmc", "prove"]: - print("chformal -live -fair -cover -remove", file=f) - if self.opt_mode == "cover": - print("chformal -live -fair -remove", file=f) - if self.opt_mode == "live": - print("chformal -assert2assume", file=f) - print("chformal -cover -remove", file=f) - print("opt_clean", file=f) - print("setundef -anyseq", file=f) - print("opt -keepdc -fast", file=f) - print("check", file=f) - print("hierarchy -simcheck", file=f) - print(f"""write_ilang ../model/design{"" if model_name == "base" else "_nomem"}.il""", file=f) + print(f"""write_rtlil ../model/design.il""", file=f) proc = SbyProc( self, model_name, [], - "cd {}/src; {} -ql ../model/design{s}.log ../model/design{s}.ys".format(self.workdir, self.exe_paths["yosys"], - s="" if model_name == "base" else "_nomem") + "cd {}/src; {} -ql ../model/design.log ../model/design.ys".format(self.workdir, self.exe_paths["yosys"]) ) proc.checkretcode = True def instance_hierarchy_callback(retcode): assert retcode == 0 if self.design_hierarchy == None: - with open(f"""{self.workdir}/model/design{"" if model_name == "base" else "_nomem"}.json""") as f: + with open(f"{self.workdir}/model/design.json") as f: self.design_hierarchy = design_hierarchy(f) proc.exit_callback = instance_hierarchy_callback @@ -420,7 +417,12 @@ def instance_hierarchy_callback(retcode): if re.match(r"^smt2(_syn)?(_nomem)?(_stbv|_stdt)?$", model_name): with open(f"{self.workdir}/model/design_{model_name}.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) - print(f"""read_ilang design{"_nomem" if "_nomem" in model_name else ""}.il""", file=f) + print(f"""read_ilang design.il""", file=f) + if "_nomem" in model_name: + print("memory_map", file=f) + else: + print("memory_nordff", file=f) + print_common_prep() if "_syn" in model_name: print("techmap", file=f) print("opt -fast", file=f) @@ -438,7 +440,7 @@ def instance_hierarchy_callback(retcode): proc = SbyProc( self, model_name, - self.model("nomem" if "_nomem" in model_name else "base"), + self.model("base"), "cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name) ) proc.checkretcode = True @@ -448,7 +450,12 @@ def instance_hierarchy_callback(retcode): if re.match(r"^btor(_syn)?(_nomem)?$", model_name): with open(f"{self.workdir}/model/design_{model_name}.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) - print(f"""read_ilang design{"_nomem" if "_nomem" in model_name else ""}.il""", file=f) + print(f"""read_ilang design.il""", file=f) + if "_nomem" in model_name: + print("memory_map", file=f) + else: + print("memory_nordff", file=f) + print_common_prep() print("flatten", file=f) print("setundef -undriven -anyseq", file=f) if "_syn" in model_name: @@ -468,7 +475,7 @@ def instance_hierarchy_callback(retcode): proc = SbyProc( self, model_name, - self.model("nomem" if "_nomem" in model_name else "base"), + self.model("base"), "cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name) ) proc.checkretcode = True @@ -478,7 +485,9 @@ def instance_hierarchy_callback(retcode): if model_name == "aig": with open(f"{self.workdir}/model/design_aiger.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) - print("read_ilang design_nomem.il", file=f) + print("read_ilang design.il", file=f) + print("memory_map", file=f) + print_common_prep() print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) @@ -495,7 +504,7 @@ def instance_hierarchy_callback(retcode): proc = SbyProc( self, "aig", - self.model("nomem"), + self.model("base"), f"""cd {self.workdir}/model; {self.exe_paths["yosys"]} -ql design_aiger.log design_aiger.ys""" ) proc.checkretcode = True From 9168b0163bfe3e9bffca915bdae05d27ffc5f992 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Sun, 6 Feb 2022 09:15:44 +0100 Subject: [PATCH 012/220] handle status of cover properties --- sbysrc/sby.py | 17 +++++++++-------- sbysrc/sby_design.py | 23 +++++++++++++++++------ sbysrc/sby_engine_smtbmc.py | 24 +++++++++++++----------- 3 files changed, 39 insertions(+), 25 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 8d6cfc9e..ee8f1a34 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -20,7 +20,7 @@ import argparse, os, sys, shutil, tempfile, re ##yosys-sys-path## from sby_core import SbyTask, SbyAbort, process_filename -import time +import time, platform class DictAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): @@ -458,7 +458,6 @@ def run_task(taskname): checks = task.design_hierarchy.get_property_list() junit_tests = len(checks) junit_errors = 1 if task.retcode == 16 else 0 - solver_gives_line = task.status == "FAIL" and any(check.status != "UNKNOWN" for check in checks) junit_failures = 0 if junit_errors == 0 and task.retcode != 0: if solver_gives_line: @@ -471,15 +470,17 @@ def run_task(taskname): junit_time = time.strftime('%Y-%m-%dT%H:%M:%S') print(f'', file=f) print(f'', file=f) - #TODO: check with Micko if os.uname().nodename is sane enough in most places - print(f'', file=f) + print(f'', file=f) print(f'', file=f) - print(f'', file=f) + print(f'', file=f) print(f'', file=f) - if solver_gives_line: + if task.precise_prop_status: for check in checks: - print(f'', file=f) # name required - if check.status == "UNKNOWN": + detail_attrs = f' type="{check.type}" location="{check.location}" id="{check.name}"' + print(f'', file=f) # name required + if check.status == "PASS": + pass + elif check.status == "UNKNOWN": print(f'', file=f) elif check.status == "FAIL": print(f'', file=f) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 7551ac71..c0fdb8b6 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -63,12 +63,14 @@ class SbyModule: def __repr__(self): return f"SbyModule<{self.name} : {self.type}, submodules={self.submodules}, properties={self.properties}>" - def get_property_list(self): - l = list() - l.extend(self.properties) + def __iter__(self): + for prop in self.properties: + yield prop for submod in self.submodules.values(): - l.extend(submod.get_property_list()) - return l + yield from submod.__iter__() + + def get_property_list(self): + return [p for p in self if p.type != p.Type.ASSUME] def find_property(self, hierarchy, location): # FIXME: use that RE that works with escaped paths from https://stackoverflow.com/questions/46207665/regex-pattern-to-split-verilog-path-in-different-instances-using-python @@ -89,6 +91,12 @@ def find_property(self, hierarchy, location): raise KeyError(f"Could not find assert at {location} in properties list!") return prop + def find_property_by_cellname(self, cell_name): + for prop in self: + if prop.name == cell_name: + return prop + raise KeyError(f"No such property: {cell_name}") + def design_hierarchy(filename): design_json = json.load(filename) def make_mod_hier(instance_name, module_name, hierarchy=""): @@ -121,7 +129,10 @@ def main(): if len(sys.argv) != 2: print(f"""Usage: {sys.argv[0]} design.json""") with open(sys.argv[1]) as f: - print(design_hierarchy(f)) + d = design_hierarchy(f) + print("Design Hierarchy:", d) + for p in d.get_property_list(): + print("Property:", p) if __name__ == '__main__': main() diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index b736e312..e179dd54 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -143,7 +143,7 @@ def run(mode, task, engine_idx, engine): task, procname, task.model(model_name), - f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", + f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --cellinfo --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", logfile=open(logfile_prefix + ".txt", "w"), logstderr=(not progress) ) @@ -181,22 +181,20 @@ def output_callback(line): proc_status = "ERROR" return line - match = re.match(r"^## [0-9: ]+ Assert failed in (([a-z_][a-z0-9$_]*|\\\S*|\.)+): (\S*).*", line) + match = re.match(r"^## [0-9: ]+ Assert failed in (\S+): (\S+) \((\S+)\)", line) if match: - module_path = match[1] - location = match[3] - try: - prop = task.design_hierarchy.find_property(module_path, location) - except KeyError as e: - task.precise_prop_status = False - return line + f" (Warning: {str(e)})" + cell_name = match[3] + prop = task.design_hierarchy.find_property_by_cellname(cell_name) prop.status = "FAIL" last_prop = prop return line - match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) in step \d+.", line) + match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) \((\S+)\) in step \d+.", line) if match: - location = match[1] + cell_name = match[2] + prop = task.design_hierarchy.find_property_by_cellname(cell_name) + prop.status = "PASS" + last_prop = prop match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) if match and last_prop: @@ -231,6 +229,10 @@ def exit_callback(retcode): excess_traces += 1 if excess_traces > 0: task.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""") + elif proc_status == "PASS" and mode == "bmc": + for prop in task.design_hierarchy: + if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": + prop.status = "PASS" task.terminate() From 5abaccab695063a65fe54748b8e7dcc02428838d Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 7 Feb 2022 12:29:27 +0100 Subject: [PATCH 013/220] refactor junit print into own function --- sbysrc/sby.py | 52 +-------------------------------------- sbysrc/sby_core.py | 61 ++++++++++++++++++++++++++++++++++++++++++++-- 2 files changed, 60 insertions(+), 53 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index ee8f1a34..788c68bf 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -455,57 +455,7 @@ def run_task(taskname): if not my_opt_tmpdir and not setupmode: with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: - checks = task.design_hierarchy.get_property_list() - junit_tests = len(checks) - junit_errors = 1 if task.retcode == 16 else 0 - junit_failures = 0 - if junit_errors == 0 and task.retcode != 0: - if solver_gives_line: - for check in checks: - if check.status == "FAIL": - junit_failures += 1 - else: - junit_failures = 1 - junit_type = "cover" if task.opt_mode == "cover" else "assert" #should this be here or individual for each check? - junit_time = time.strftime('%Y-%m-%dT%H:%M:%S') - print(f'', file=f) - print(f'', file=f) - print(f'', file=f) - print(f'', file=f) - print(f'', file=f) - print(f'', file=f) - if task.precise_prop_status: - for check in checks: - detail_attrs = f' type="{check.type}" location="{check.location}" id="{check.name}"' - print(f'', file=f) # name required - if check.status == "PASS": - pass - elif check.status == "UNKNOWN": - print(f'', file=f) - elif check.status == "FAIL": - print(f'', file=f) - elif check.status == "ERROR": - print(f'', file=f) # type mandatory, message optional - print(f'', file=f) - else: - print(f'', file=f) # name required - if task.status == "UNKNOWN": - print(f'', file=f) - elif task.status == "FAIL": - print(f'', file=f) - elif task.status == "ERROR": - print(f'', file=f) # type mandatory, message optional - print(f'', file=f) - print('', end="", file=f) - with open(f"{task.workdir}/logfile.txt", "r") as logf: - for line in logf: - print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f) - print('', file=f) - print('', file=f) - #TODO: can we handle errors and still output this file? - print('', file=f) - print(f'', file=f) - print(f'', file=f) + task.print_junit_result(f, junit_ts_name, junit_tc_name) with open(f"{task.workdir}/status", "w") as f: print(f"{task.status} {task.retcode} {task.total_time}", file=f) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index dde634bb..9c7e3fd0 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -16,13 +16,13 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import os, re, sys, signal +import os, re, sys, signal, platform if os.name == "posix": import resource, fcntl import subprocess from shutil import copyfile, copytree, rmtree from select import select -from time import time, localtime, sleep +from time import time, localtime, sleep, strftime from sby_design import SbyProperty, SbyModule, design_hierarchy all_procs_running = [] @@ -744,3 +744,60 @@ def run(self, setupmode): with open(f"{self.workdir}/{self.status}", "w") as f: for line in self.summary: print(line, file=f) + + def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): + junit_errors = 1 if self.retcode == 16 else 0 + if self.precise_prop_status: + checks = self.design_hierarchy.get_property_list() + junit_tests = len(checks) + else: + junit_tests = 1 + if self.retcode in [0, 16]: + junit_failures = 0 + else: + if self.precise_prop_status: + for check in checks: + if check.status not in self.expect: + junit_failures += 1 + else: + junit_failures = 1 + junit_time = strftime('%Y-%m-%dT%H:%M:%S') + print(f'', file=f) + print(f'', file=f) + print(f'', file=f) + print(f'', file=f) + print(f'', file=f) + print(f'', file=f) + if self.precise_prop_status: + for check in checks: + detail_attrs = '' if junit_format_strict else f' type="{check.type}" location="{check.location}" id="{check.name}"' + print(f'', file=f) # name required + if check.status == "PASS": + pass + elif check.status == "UNKNOWN": + print(f'', file=f) + elif check.status == "FAIL": + print(f'', file=f) + elif check.status == "ERROR": + print(f'', file=f) # type mandatory, message optional + print(f'', file=f) + else: + junit_type = "assert" if self.opt_mode in ["bmc", "prove"] else self.opt_mode + print(f'', file=f) # name required + if self.status == "UNKNOWN": + print(f'', file=f) + elif self.status == "FAIL": + print(f'', file=f) + elif self.status == "ERROR": + print(f'', file=f) # type mandatory, message optional + print(f'', file=f) + print('', end="", file=f) + with open(f"{self.workdir}/logfile.txt", "r") as logf: + for line in logf: + print(line.replace("&", "&").replace("<", "<").replace(">", ">").replace("\"", """), end="", file=f) + print('', file=f) + print('', file=f) + #TODO: can we handle errors and still output this file? + print('', file=f) + print(f'', file=f) + print(f'', file=f) From 53eb25fcaeddfdb7011ddba98c67f85b903fe8f0 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 7 Feb 2022 15:29:36 +0100 Subject: [PATCH 014/220] handle unreached cover properties --- sbysrc/sby.py | 2 +- sbysrc/sby_core.py | 4 +++- sbysrc/sby_engine_smtbmc.py | 6 ++++++ tests/cover_fail.sby | 31 +++++++++++++++++++++++++++++++ 4 files changed, 41 insertions(+), 2 deletions(-) create mode 100644 tests/cover_fail.sby diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 788c68bf..5616bc0f 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -455,7 +455,7 @@ def run_task(taskname): if not my_opt_tmpdir and not setupmode: with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: - task.print_junit_result(f, junit_ts_name, junit_tc_name) + task.print_junit_result(f, junit_ts_name, junit_tc_name, junit_format_strict=False) with open(f"{task.workdir}/status", "w") as f: print(f"{task.status} {task.retcode} {task.total_time}", file=f) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 9c7e3fd0..8668b9d0 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -756,6 +756,7 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric junit_failures = 0 else: if self.precise_prop_status: + junit_failures = 0 for check in checks: if check.status not in self.expect: junit_failures += 1 @@ -777,7 +778,8 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric elif check.status == "UNKNOWN": print(f'', file=f) elif check.status == "FAIL": - print(f'', file=f) + traceinfo = f' Trace file: {check.tracefile}' if check.type == check.Type.ASSERT else '' + print(f'', file=f) elif check.status == "ERROR": print(f'', file=f) # type mandatory, message optional print(f'', file=f) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index e179dd54..a2553f22 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -201,6 +201,12 @@ def output_callback(line): last_prop.tracefile = match[1] last_prop = None + match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\).", line) + if match: + cell_name = match[2] + prop = task.design_hierarchy.find_property_by_cellname(cell_name) + prop.status = "FAIL" + return line def exit_callback(retcode): diff --git a/tests/cover_fail.sby b/tests/cover_fail.sby new file mode 100644 index 00000000..f169a5f6 --- /dev/null +++ b/tests/cover_fail.sby @@ -0,0 +1,31 @@ +[options] +mode cover +depth 5 +expect fail + +[engines] +smtbmc boolector + +[script] +read_verilog -sv test.v +prep -top test + +[file test.v] +module test( +input clk, +input rst, +output reg [3:0] count +); + +initial assume (rst == 1'b1); + +always @(posedge clk) begin +if (rst) + count <= 4'b0; +else + count <= count + 1'b1; + +cover (count == 0); +cover (count == 4'd11); +end +endmodule From 7d3545dc86c9c6f33f87d9fad18d388e76ef526e Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 7 Feb 2022 19:20:29 +0100 Subject: [PATCH 015/220] fix junit error/failure/skipped count --- sbysrc/sby_core.py | 53 ++++++++++++++++++++++--------------- sbysrc/sby_engine_smtbmc.py | 4 ++- tests/cover_fail.sby | 4 +-- 3 files changed, 37 insertions(+), 24 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 8668b9d0..7535d715 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -302,6 +302,8 @@ def error(self, logmessage): self.status = "ERROR" if "ERROR" not in self.expect: self.retcode = 16 + else: + self.retcode = 0 self.terminate() with open(f"{self.workdir}/{self.status}", "w") as f: print(f"ERROR: {logmessage}", file=f) @@ -746,33 +748,44 @@ def run(self, setupmode): print(line, file=f) def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): - junit_errors = 1 if self.retcode == 16 else 0 + junit_time = strftime('%Y-%m-%dT%H:%M:%S') if self.precise_prop_status: checks = self.design_hierarchy.get_property_list() junit_tests = len(checks) - else: - junit_tests = 1 - if self.retcode in [0, 16]: junit_failures = 0 + junit_errors = 0 + junit_skipped = 0 + for check in checks: + if check.status == "PASS": + pass + elif check.status == "FAIL": + junit_failures += 1 + elif check.status == "UNKNOWN": + junit_skipped += 1 + else: + junit_errors += 1 + if junit_errors == 0 and self.status == "ERROR": + junit_errors = 1 else: - if self.precise_prop_status: - junit_failures = 0 - for check in checks: - if check.status not in self.expect: - junit_failures += 1 - else: - junit_failures = 1 - junit_time = strftime('%Y-%m-%dT%H:%M:%S') + junit_tests = 1 + junit_errors = 1 if self.retcode == 16 else 0 + junit_failures = 1 if self.retcode != 0 and junit_errors == 0 else 0 + junit_skipped = 0 print(f'', file=f) print(f'', file=f) - print(f'', file=f) + print(f'', file=f) print(f'', file=f) print(f'', file=f) print(f'', file=f) if self.precise_prop_status: for check in checks: - detail_attrs = '' if junit_format_strict else f' type="{check.type}" location="{check.location}" id="{check.name}"' - print(f'', file=f) # name required + if junit_format_strict: + detail_attrs = '' + else: + detail_attrs = f' type="{check.type}" location="{check.location}" id="{check.name}"' + if check.tracefile: + detail_attrs += f' tracefile="{check.tracefile}"' + print(f'', file=f) if check.status == "PASS": pass elif check.status == "UNKNOWN": @@ -785,13 +798,11 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric print(f'', file=f) else: junit_type = "assert" if self.opt_mode in ["bmc", "prove"] else self.opt_mode - print(f'', file=f) # name required - if self.status == "UNKNOWN": - print(f'', file=f) - elif self.status == "FAIL": - print(f'', file=f) - elif self.status == "ERROR": + print(f'', file=f) + if junit_errors: print(f'', file=f) # type mandatory, message optional + elif junit_failures: + print(f'', file=f) print(f'', file=f) print('', end="", file=f) with open(f"{self.workdir}/logfile.txt", "r") as logf: diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index a2553f22..0bf887d4 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -143,7 +143,7 @@ def run(mode, task, engine_idx, engine): task, procname, task.model(model_name), - f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --cellinfo --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", + f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", logfile=open(logfile_prefix + ".txt", "w"), logstderr=(not progress) ) @@ -195,11 +195,13 @@ def output_callback(line): prop = task.design_hierarchy.find_property_by_cellname(cell_name) prop.status = "PASS" last_prop = prop + return line match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) if match and last_prop: last_prop.tracefile = match[1] last_prop = None + return line match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\).", line) if match: diff --git a/tests/cover_fail.sby b/tests/cover_fail.sby index f169a5f6..f75a9111 100644 --- a/tests/cover_fail.sby +++ b/tests/cover_fail.sby @@ -25,7 +25,7 @@ if (rst) else count <= count + 1'b1; -cover (count == 0); -cover (count == 4'd11); +cover (count == 0 && !rst); +cover (count == 4'd11 && !rst); end endmodule From 7ee357fcc8aa13b7015049f1c89380c5cc074107 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 7 Feb 2022 22:01:52 +0100 Subject: [PATCH 016/220] fix induction --- sbysrc/sby_core.py | 2 +- sbysrc/sby_design.py | 7 ++++--- sbysrc/sby_engine_smtbmc.py | 3 +++ tests/both_ex.sby | 2 +- tests/cover_fail.sby | 2 +- tests/multi_assert.sby | 2 +- tests/preunsat.sby | 2 +- tests/redxor.sby | 2 +- tests/stopfirst.sby | 2 +- tests/submod_props.sby | 2 +- 10 files changed, 15 insertions(+), 11 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 7535d715..ede1a8d9 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -773,7 +773,7 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric junit_skipped = 0 print(f'', file=f) print(f'', file=f) - print(f'', file=f) + print(f'', file=f) print(f'', file=f) print(f'', file=f) print(f'', file=f) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index c0fdb8b6..98a57f13 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -51,7 +51,7 @@ def from_cell(c, name): tracefile: str = field(default="") def __repr__(self): - return f"SbyProperty<{self.type} {self.name} at {self.location}: status={self.status}, tracefile=\"{self.tracefile}\"" + return f"SbyProperty<{self.type} {self.name} at {self.location}: status={self.status}, tracefile=\"{self.tracefile}\">" @dataclass class SbyModule: @@ -105,14 +105,15 @@ def make_mod_hier(instance_name, module_name, hierarchy=""): cells = design_json["modules"][module_name]["cells"] for cell_name, cell in cells.items(): + sub_hierarchy=f"{hierarchy}/{instance_name}" if hierarchy else instance_name if cell["type"][0] != '$': - mod.submodules[cell_name] = make_mod_hier(cell_name, cell["type"], hierarchy=f"{hierarchy}/{instance_name}") + mod.submodules[cell_name] = make_mod_hier(cell_name, cell["type"], hierarchy=sub_hierarchy) if cell["type"] in ["$assume", "$assert", "$cover", "$live"]: try: location = cell["attributes"]["src"] except KeyError: location = "" - p = SbyProperty(name=cell_name, type=SbyProperty.Type.from_cell(cell["type"]), location=location, hierarchy=f"{hierarchy}/{instance_name}") + p = SbyProperty(name=cell_name, type=SbyProperty.Type.from_cell(cell["type"]), location=location, hierarchy=sub_hierarchy) mod.properties.append(p) return mod diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 0bf887d4..492e2a57 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -273,6 +273,9 @@ def exit_callback(retcode): assert False if task.basecase_pass and task.induction_pass: + for prop in task.design_hierarchy: + if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": + prop.status = "PASS" task.update_status("PASS") task.summary.append("successful proof by k-induction.") task.terminate() diff --git a/tests/both_ex.sby b/tests/both_ex.sby index f83f2b17..81773747 100644 --- a/tests/both_ex.sby +++ b/tests/both_ex.sby @@ -15,7 +15,7 @@ pono: btor pono cover: btor btormc [script] -read_verilog -sv both_ex.v +read -sv both_ex.v prep -top test [files] diff --git a/tests/cover_fail.sby b/tests/cover_fail.sby index f75a9111..8031e207 100644 --- a/tests/cover_fail.sby +++ b/tests/cover_fail.sby @@ -7,7 +7,7 @@ expect fail smtbmc boolector [script] -read_verilog -sv test.v +read -sv test.v prep -top test [file test.v] diff --git a/tests/multi_assert.sby b/tests/multi_assert.sby index 818195f8..883181a8 100644 --- a/tests/multi_assert.sby +++ b/tests/multi_assert.sby @@ -12,7 +12,7 @@ btormc: btor btormc pono: btor pono [script] -read_verilog -sv multi_assert.v +read -sv multi_assert.v prep -top test [file multi_assert.v] diff --git a/tests/preunsat.sby b/tests/preunsat.sby index 6694a6c3..98255c61 100644 --- a/tests/preunsat.sby +++ b/tests/preunsat.sby @@ -12,7 +12,7 @@ btormc: btor btormc yices: smtbmc yices [script] -read_verilog -sv test.sv +read -sv test.sv prep -top test [file test.sv] diff --git a/tests/redxor.sby b/tests/redxor.sby index 6e6e9f8b..0746861e 100644 --- a/tests/redxor.sby +++ b/tests/redxor.sby @@ -6,7 +6,7 @@ expect pass btor btormc [script] -read_verilog -formal redxor.v +read -formal redxor.v prep -top test [files] diff --git a/tests/stopfirst.sby b/tests/stopfirst.sby index 35ed539c..782f7919 100644 --- a/tests/stopfirst.sby +++ b/tests/stopfirst.sby @@ -6,7 +6,7 @@ expect fail btor btormc [script] -read_verilog -sv test.sv +read -sv test.sv prep -top test [file test.sv] diff --git a/tests/submod_props.sby b/tests/submod_props.sby index 33a3a006..93abc9c1 100644 --- a/tests/submod_props.sby +++ b/tests/submod_props.sby @@ -12,7 +12,7 @@ expect fail smtbmc boolector [script] -read_verilog -sv test.sv +read -sv test.sv prep -top top [file test.sv] From 89ed843ff1862b740edc812b6b1d245dbccf0721 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 22 Feb 2022 16:16:37 +0100 Subject: [PATCH 017/220] validate junit files (with extra attributes added to schema) --- sbysrc/sby_core.py | 6 ++++- tests/JUnit.xsd | 20 ++++++++++++++++ tests/Makefile | 10 ++++++-- tests/cover_fail.sby | 2 +- tests/junit_assert.sby | 38 +++++++++++++++++++++++++++++++ tests/junit_cover.sby | 43 +++++++++++++++++++++++++++++++++++ tests/junit_timeout_error.sby | 42 ++++++++++++++++++++++++++++++++++ 7 files changed, 157 insertions(+), 4 deletions(-) create mode 100644 tests/junit_assert.sby create mode 100644 tests/junit_cover.sby create mode 100644 tests/junit_timeout_error.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ede1a8d9..b50ca3e8 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -407,7 +407,9 @@ def print_common_prep(): proc.checkretcode = True def instance_hierarchy_callback(retcode): - assert retcode == 0 + if retcode != 0: + self.precise_prop_status = False + return if self.design_hierarchy == None: with open(f"{self.workdir}/model/design.json") as f: self.design_hierarchy = design_hierarchy(f) @@ -776,6 +778,8 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric print(f'', file=f) print(f'', file=f) print(f'', file=f) + print(f'', file=f) + print(f'', file=f) print(f'', file=f) if self.precise_prop_status: for check in checks: diff --git a/tests/JUnit.xsd b/tests/JUnit.xsd index 84b0f157..7a5f1846 100644 --- a/tests/JUnit.xsd +++ b/tests/JUnit.xsd @@ -130,6 +130,26 @@ Permission to waive conditions of this license may be requested from Windy Road Time taken (in seconds) to execute the test + + + Cell ID of the property + + + + + Kind of property (assert, cover, live) + + + + + Source location of the property + + + + + Tracefile for the property + + diff --git a/tests/Makefile b/tests/Makefile index 58971e69..4b8ac358 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,11 +1,17 @@ SBY_FILES=$(wildcard *.sby) SBY_TESTS=$(addprefix test_,$(SBY_FILES:.sby=)) +JUNIT_TESTS=junit_assert_pass junit_assert_fail junit_assert_preunsat \ +junit_cover_pass junit_cover_uncovered junit_cover_assert junit_cover_preunsat \ +junit_timeout_error_timeout junit_timeout_error_syntax junit_timeout_error_solver -.PHONY: test +.PHONY: test validate_junit FORCE: -test: $(SBY_TESTS) +test: $(JUNIT_TESTS) test_%: %.sby FORCE python3 ../sbysrc/sby.py -f $< + +$(JUNIT_TESTS): $(SBY_TESTS) + python validate_junit.py $@/$@.xml diff --git a/tests/cover_fail.sby b/tests/cover_fail.sby index 8031e207..391e0a83 100644 --- a/tests/cover_fail.sby +++ b/tests/cover_fail.sby @@ -1,7 +1,7 @@ [options] mode cover depth 5 -expect fail +expect pass,fail [engines] smtbmc boolector diff --git a/tests/junit_assert.sby b/tests/junit_assert.sby new file mode 100644 index 00000000..e13f3750 --- /dev/null +++ b/tests/junit_assert.sby @@ -0,0 +1,38 @@ +[tasks] +pass +fail +preunsat + +[options] +mode bmc +depth 1 + +pass: expect pass +fail: expect fail +preunsat: expect error + +[engines] +smtbmc boolector + +[script] +fail: read -define FAIL +preunsat: read -define PREUNSAT +read -sv test.sv +prep -top top + +[file test.sv] +module test(input foo); +always @* assert(foo); +`ifdef FAIL +always @* assert(!foo); +`endif +`ifdef PREUNSAT +always @* assume(!foo); +`endif +endmodule + +module top(); +test test_i ( +.foo(1'b1) +); +endmodule diff --git a/tests/junit_cover.sby b/tests/junit_cover.sby new file mode 100644 index 00000000..53747ba8 --- /dev/null +++ b/tests/junit_cover.sby @@ -0,0 +1,43 @@ +[tasks] +pass +uncovered fail +assert fail +preunsat + +[options] +mode cover +depth 1 + +pass: expect pass +fail: expect fail +preunsat: expect fail + +[engines] +smtbmc boolector + +[script] +uncovered: read -define FAIL +assert: read -define FAIL_ASSERT +preunsat: read -define PREUNSAT +read -sv test.sv +prep -top top + +[file test.sv] +module test(input foo); +`ifdef PREUNSAT +always @* assume(!foo); +`endif +always @* cover(foo); +`ifdef FAIL +always @* cover(!foo); +`endif +`ifdef FAIL_ASSERT +always @* assert(!foo); +`endif +endmodule + +module top(); +test test_i ( +.foo(1'b1) +); +endmodule diff --git a/tests/junit_timeout_error.sby b/tests/junit_timeout_error.sby new file mode 100644 index 00000000..551de49e --- /dev/null +++ b/tests/junit_timeout_error.sby @@ -0,0 +1,42 @@ +[tasks] +syntax error +solver error +timeout + +[options] +mode cover +depth 1 +timeout: timeout 1 +error: expect error +timeout: expect timeout + +[engines] +~solver: smtbmc --dumpsmt2 --progress --stbv z3 +solver: smtbmc foo + +[script] +read -noverific +syntax: read -define SYNTAX_ERROR +read -sv primes.sv +prep -top primes + +[file primes.sv] +module primes; + parameter [8:0] offset = 7; + (* anyconst *) reg [8:0] prime1; + wire [9:0] prime2 = prime1 + offset; + (* allconst *) reg [4:0] factor; + +`ifdef SYNTAX_ERROR + foo +`endif + + always @* begin + if (1 < factor && factor < prime1) + assume ((prime1 % factor) != 0); + if (1 < factor && factor < prime2) + assume ((prime2 % factor) != 0); + assume (1 < prime1); + cover (1); + end +endmodule From 7142f790e4ff6ca6f64e711d6722254643d32ccc Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Thu, 24 Feb 2022 22:44:11 +0100 Subject: [PATCH 018/220] add testcase for overall run result --- sbysrc/sby_core.py | 21 +++++++++++++++++---- tests/junit_nocodeloc.sby | 20 ++++++++++++++++++++ 2 files changed, 37 insertions(+), 4 deletions(-) create mode 100644 tests/junit_nocodeloc.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index b50ca3e8..e1ee51c3 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -766,8 +766,10 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric junit_skipped += 1 else: junit_errors += 1 - if junit_errors == 0 and self.status == "ERROR": - junit_errors = 1 + if self.retcode == 16: + junit_errors += 1 + elif self.retcode != 0: + junit_failures += 1 else: junit_tests = 1 junit_errors = 1 if self.retcode == 16 else 0 @@ -782,6 +784,13 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric print(f'', file=f) print(f'', file=f) if self.precise_prop_status: + print(f'', file=f) + if self.retcode == 16: + print(f'', file=f) # type mandatory, message optional + elif self.retcode != 0: + print(f'', file=f) + print(f'', file=f) + for check in checks: if junit_format_strict: detail_attrs = '' @@ -789,14 +798,18 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric detail_attrs = f' type="{check.type}" location="{check.location}" id="{check.name}"' if check.tracefile: detail_attrs += f' tracefile="{check.tracefile}"' - print(f'', file=f) + if check.location: + junit_prop_name = f"Property {check.type} in {check.hierarchy} at {check.location}" + else: + junit_prop_name = f"Property {check.type} {check.name} in {check.hierarchy}" + print(f'', file=f) if check.status == "PASS": pass elif check.status == "UNKNOWN": print(f'', file=f) elif check.status == "FAIL": traceinfo = f' Trace file: {check.tracefile}' if check.type == check.Type.ASSERT else '' - print(f'', file=f) + print(f'', file=f) elif check.status == "ERROR": print(f'', file=f) # type mandatory, message optional print(f'', file=f) diff --git a/tests/junit_nocodeloc.sby b/tests/junit_nocodeloc.sby new file mode 100644 index 00000000..5d2afc88 --- /dev/null +++ b/tests/junit_nocodeloc.sby @@ -0,0 +1,20 @@ +[options] +mode bmc + +expect fail + +[engines] +smtbmc boolector + +[script] +read -sv multi_assert.v +prep -top test +setattr -unset src + +[file multi_assert.v] +module test(); +always @* begin +assert (1); +assert (0); +end +endmodule From 8a81b61321413f0c47e8ab950b8d36f36976bf2e Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 7 Mar 2022 08:34:01 +0100 Subject: [PATCH 019/220] fix ci --- .github/workflows/ci.yml | 2 +- tests/Makefile | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9e055251..ea48d06b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,4 +9,4 @@ jobs: - uses: actions/checkout@v2 - uses: YosysHQ/setup-oss-cad-suite@v1 - name: Run checks - run: make ci + run: tabbypip install xmlschema && make ci diff --git a/tests/Makefile b/tests/Makefile index 4b8ac358..9370991a 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -14,4 +14,4 @@ test_%: %.sby FORCE python3 ../sbysrc/sby.py -f $< $(JUNIT_TESTS): $(SBY_TESTS) - python validate_junit.py $@/$@.xml + python3 validate_junit.py $@/$@.xml From 244194065350842a806cce8d37fb1885d7c51c1d Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 15 Mar 2022 15:12:59 +0100 Subject: [PATCH 020/220] ci housekeeping --- tests/.gitignore | 3 +++ tests/Makefile | 4 ++-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/tests/.gitignore b/tests/.gitignore index 1feaa191..212f4ddb 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -7,3 +7,6 @@ /prv32fmcmp*/ /redxor*/ /stopfirst*/ +/junit_*/ +/submod_props*/ +/multi_assert*/ diff --git a/tests/Makefile b/tests/Makefile index 9370991a..8f1d00c5 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -6,8 +6,6 @@ junit_timeout_error_timeout junit_timeout_error_syntax junit_timeout_error_solve .PHONY: test validate_junit -FORCE: - test: $(JUNIT_TESTS) test_%: %.sby FORCE @@ -15,3 +13,5 @@ test_%: %.sby FORCE $(JUNIT_TESTS): $(SBY_TESTS) python3 validate_junit.py $@/$@.xml + +FORCE: From 5dc7fc9a4d7eda59ce49d7553c48603d22a40467 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Fri, 18 Mar 2022 16:36:41 +0100 Subject: [PATCH 021/220] translate backslashes in cell names the same way as smt2 backend does --- sbysrc/sby_design.py | 8 +++++--- sbysrc/sby_engine_smtbmc.py | 7 ++++--- tests/submod_props.sby | 3 +++ 3 files changed, 12 insertions(+), 6 deletions(-) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 98a57f13..6dfbaecb 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -91,11 +91,13 @@ def find_property(self, hierarchy, location): raise KeyError(f"Could not find assert at {location} in properties list!") return prop - def find_property_by_cellname(self, cell_name): + def find_property_by_cellname(self, cell_name, trans_dict=dict()): + # backends may need to mangle names irreversibly, so allow applying + # the same transformation here for prop in self: - if prop.name == cell_name: + if cell_name == prop.name.translate(str.maketrans(trans_dict)): return prop - raise KeyError(f"No such property: {cell_name}") + raise KeyError(f"No such property: {smt2_name}") def design_hierarchy(filename): design_json = json.load(filename) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 492e2a57..ab0c7e5f 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -160,6 +160,7 @@ def run(mode, task, engine_idx, engine): def output_callback(line): nonlocal proc_status nonlocal last_prop + smt2_trans = {'\\':'/', '|':'/'} match = re.match(r"^## [0-9: ]+ Status: FAILED", line) if match: @@ -184,7 +185,7 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Assert failed in (\S+): (\S+) \((\S+)\)", line) if match: cell_name = match[3] - prop = task.design_hierarchy.find_property_by_cellname(cell_name) + prop = task.design_hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" last_prop = prop return line @@ -192,7 +193,7 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) \((\S+)\) in step \d+.", line) if match: cell_name = match[2] - prop = task.design_hierarchy.find_property_by_cellname(cell_name) + prop = task.design_hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "PASS" last_prop = prop return line @@ -206,7 +207,7 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\).", line) if match: cell_name = match[2] - prop = task.design_hierarchy.find_property_by_cellname(cell_name) + prop = task.design_hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" return line diff --git a/tests/submod_props.sby b/tests/submod_props.sby index 93abc9c1..99336767 100644 --- a/tests/submod_props.sby +++ b/tests/submod_props.sby @@ -1,10 +1,12 @@ [tasks] bmc cover +flatten [options] bmc: mode bmc cover: mode cover +flatten: mode bmc expect fail @@ -14,6 +16,7 @@ smtbmc boolector [script] read -sv test.sv prep -top top +flatten: flatten [file test.sv] module test(input foo); From c7e4785a8a503e95065329eee3f6e20db46954a0 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 22 Mar 2022 16:16:02 +0100 Subject: [PATCH 022/220] junit: handle multiple asserts failing with the same trace --- sbysrc/sby_engine_smtbmc.py | 11 ++++++----- tests/2props1trace.sby | 22 ++++++++++++++++++++++ 2 files changed, 28 insertions(+), 5 deletions(-) create mode 100644 tests/2props1trace.sby diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 492e2a57..46054082 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -155,7 +155,7 @@ def run(mode, task, engine_idx, engine): task.induction_procs.append(proc) proc_status = None - last_prop = None + last_prop = [] def output_callback(line): nonlocal proc_status @@ -186,7 +186,7 @@ def output_callback(line): cell_name = match[3] prop = task.design_hierarchy.find_property_by_cellname(cell_name) prop.status = "FAIL" - last_prop = prop + last_prop.append(prop) return line match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) \((\S+)\) in step \d+.", line) @@ -194,13 +194,14 @@ def output_callback(line): cell_name = match[2] prop = task.design_hierarchy.find_property_by_cellname(cell_name) prop.status = "PASS" - last_prop = prop + last_prop.append(prop) return line match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) if match and last_prop: - last_prop.tracefile = match[1] - last_prop = None + for p in last_prop: + p.tracefile = match[1] + last_prop = [] return line match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\).", line) diff --git a/tests/2props1trace.sby b/tests/2props1trace.sby new file mode 100644 index 00000000..8f51fde6 --- /dev/null +++ b/tests/2props1trace.sby @@ -0,0 +1,22 @@ +[options] +mode bmc +depth 1 +expect fail + +[engines] +smtbmc + +[script] +read -sv top.sv +prep -top top + +[file top.sv] +module top( +input foo, +input bar +); +always @(*) begin + assert (foo); + assert (bar); +end +endmodule From 7824460e27eb65cf240ab92e306b4be57eb0b161 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 21 Mar 2022 18:36:09 +0100 Subject: [PATCH 023/220] Initial support for the new smtbmc --keep-going option So far this only passes on the option and adjusts the trace_prefix to support multiple numbered traces. Further changes are needed to correctly associate individual traces with the assertions failing in that trace. --- sbysrc/sby_engine_smtbmc.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 492e2a57..9b7ff2b8 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -31,11 +31,12 @@ def run(mode, task, engine_idx, engine): progress = False basecase_only = False induction_only = False + keep_going = False random_seed = None task.precise_prop_status = True opts, args = getopt.getopt(engine[1:], "", ["nomem", "syn", "stbv", "stdt", "presat", - "nopresat", "unroll", "nounroll", "dumpsmt2", "progress", "basecase", "induction", "seed="]) + "nopresat", "unroll", "nounroll", "dumpsmt2", "progress", "basecase", "induction", "keep-going", "seed="]) for o, a in opts: if o == "--nomem": @@ -66,6 +67,10 @@ def run(mode, task, engine_idx, engine): if basecase_only: task.error("smtbmc options --basecase and --induction are exclusive.") induction_only = True + elif o == "--keep-going": + if mode not in ("bmc", "prove", "prove_basecase", "prove_induction"): + task.error("smtbmc option --keep-going is only supported in bmc and prove mode.") + keep_going = True elif o == "--seed": random_seed = a else: @@ -126,6 +131,10 @@ def run(mode, task, engine_idx, engine): smtbmc_opts.append("-c") trace_prefix += "%" + if keep_going: + smtbmc_opts.append("--keep-going") + trace_prefix += "%" + if dumpsmt2: smtbmc_opts += ["--dump-smt2", trace_prefix.replace("%", "") + ".smt2"] From 079df4d95f6955b9f4dc0b48b8fd282b0c0983ac Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 25 Mar 2022 11:38:22 +0100 Subject: [PATCH 024/220] Use `-no-startoffset`, avoiding index mismatch between aiger and smt2 --- sbysrc/sby_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index e1ee51c3..b78ef853 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -503,7 +503,7 @@ def instance_hierarchy_callback(retcode): print("abc -g AND -fast", file=f) print("opt_clean", file=f) print("stat", file=f) - print("write_aiger -I -B -zinit -map design_aiger.aim design_aiger.aig", file=f) + print("write_aiger -I -B -zinit -no-startoffset -map design_aiger.aim design_aiger.aig", file=f) proc = SbyProc( self, From a434252ca130dc673eb58cc698f8a9e39a273c1b Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 24 Mar 2022 13:12:25 +0100 Subject: [PATCH 025/220] Test signals with nonzero start offsets in aim files with smtbmc --- tests/.gitignore | 1 + tests/aim_vs_smt2_nonzero_start_offset.sby | 33 ++++++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 tests/aim_vs_smt2_nonzero_start_offset.sby diff --git a/tests/.gitignore b/tests/.gitignore index 212f4ddb..120675be 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -10,3 +10,4 @@ /junit_*/ /submod_props*/ /multi_assert*/ +/aim_vs_smt2_nonzero_start_offset*/ diff --git a/tests/aim_vs_smt2_nonzero_start_offset.sby b/tests/aim_vs_smt2_nonzero_start_offset.sby new file mode 100644 index 00000000..43095513 --- /dev/null +++ b/tests/aim_vs_smt2_nonzero_start_offset.sby @@ -0,0 +1,33 @@ +[tasks] +bmc +prove + +[options] +bmc: mode bmc +prove: mode prove +expect fail +wait on + +[engines] +bmc: abc bmc3 +bmc: abc sim3 +prove: aiger avy +prove: aiger suprove +prove: abc pdr + +[script] +read -sv test.sv +prep -top test + +[file test.sv] +module test ( + input clk, + input [8:1] nonzero_offset +); + reg [7:0] counter = 0; + + always @(posedge clk) begin + if (counter == 3) assert(nonzero_offset[1]); + counter <= counter + 1; + end +endmodule From 3834fe76226f39b87f4104d5e908d9cb82106cdf Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Fri, 25 Mar 2022 18:01:09 +0100 Subject: [PATCH 026/220] document btor engine, add overview of mode/engine/solver combinations, remove unimplemented modes --- docs/source/reference.rst | 54 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 52 insertions(+), 2 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index dc5f3363..9cbf78bc 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -120,10 +120,12 @@ Mode Description ``prove`` Unbounded model check to verify safety properties (``assert(...)`` statements) ``live`` Unbounded model check to verify liveness properties (``assert(s_eventually ...)`` statements) ``cover`` Generate set of shortest traces required to reach all cover() statements -``equiv`` Formal equivalence checking (usually to verify pre- and post-synthesis equivalence) -``synth`` Reactive Synthesis (synthesis of circuit from safety properties) ========= =========== +.. + ``equiv`` Formal equivalence checking (usually to verify pre- and post-synthesis equivalence) + ``synth`` Reactive Synthesis (synthesis of circuit from safety properties) + All other options have default values and thus are optional. The available options are: @@ -197,6 +199,38 @@ solver options. In the 2nd line ``abc`` is the engine, there are no engine options, ``sim3`` is the solver, and ``-W 15`` are solver options. +The following mode/engine/solver combinations are currently supported: + ++-----------+--------------------------+ +| Mode | Engine | ++===========+==========================+ +| ``bmc`` | ``smtbmc [all solvers]`` | +| | | +| | ``btor btormc`` | +| | | +| | ``btor pono`` | +| | | +| | ``abc bmc3`` | +| | | +| | ``abc sim3`` | ++-----------+--------------------------+ +| ``prove`` | ``smtbmc [all solvers]`` | +| | | +| | ``abc pdr`` | +| | | +| | ``aiger avy`` | +| | | +| | ``aiger suprove`` | ++-----------+--------------------------+ +| ``cover`` | ``smtbmc [all solvers]`` | +| | | +| | ``btor btormc`` | ++-----------+--------------------------+ +| ``live`` | ``aiger suprove`` | +| | | +| | ``aiger avy`` | ++-----------+--------------------------+ + ``smtbmc`` engine ~~~~~~~~~~~~~~~~~ @@ -240,12 +274,28 @@ The following solvers are currently supported by ``yosys-smtbmc``: * yices * boolector + * bitwuzla * z3 * mathsat * cvc4 Any additional options after ``--`` are passed to ``yosys-smtbmc`` as-is. +``btor`` engine +~~~~~~~~~~~~~~~ + +The ``btor`` engine supports hardware modelcheckers that accept btor2 files. +The engine supports no engine options and supports the following solvers: + ++-------------------------------+---------------------------------+ +| Solver | Modes | ++===============================+=================================+ +| ``btormc`` | ``bmc``, ``cover`` | ++-------------------------------+---------------------------------+ +| ``pono`` | ``bmc`` | ++-------------------------------+---------------------------------+ + + ``aiger`` engine ~~~~~~~~~~~~~~~~ From 008d020c4defd135b779d0d052c077260a33829f Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 29 Mar 2022 19:10:29 +0200 Subject: [PATCH 027/220] note unexpected return statuses in junit --- sbysrc/sby_core.py | 8 ++++++-- tests/.gitignore | 1 + tests/Makefile | 5 ++++- tests/scripted/.gitignore | 1 + tests/scripted/Makefile | 11 +++++++++++ tests/scripted/junit_expect.sby | 16 ++++++++++++++++ tests/scripted/junit_expect.sh | 5 +++++ 7 files changed, 44 insertions(+), 3 deletions(-) create mode 100644 tests/scripted/.gitignore create mode 100644 tests/scripted/Makefile create mode 100644 tests/scripted/junit_expect.sby create mode 100644 tests/scripted/junit_expect.sh diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index b78ef853..1e96d00e 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -788,7 +788,11 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric if self.retcode == 16: print(f'', file=f) # type mandatory, message optional elif self.retcode != 0: - print(f'', file=f) + if len(self.expect) > 1 or "PASS" not in self.expect: + expected = " ".join(self.expect) + print(f'', file=f) + else: + print(f'', file=f) print(f'', file=f) for check in checks: @@ -814,11 +818,11 @@ def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_stric print(f'', file=f) # type mandatory, message optional print(f'', file=f) else: - junit_type = "assert" if self.opt_mode in ["bmc", "prove"] else self.opt_mode print(f'', file=f) if junit_errors: print(f'', file=f) # type mandatory, message optional elif junit_failures: + junit_type = "assert" if self.opt_mode in ["bmc", "prove"] else self.opt_mode print(f'', file=f) print(f'', file=f) print('', end="", file=f) diff --git a/tests/.gitignore b/tests/.gitignore index 120675be..c6bf5b56 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -11,3 +11,4 @@ /submod_props*/ /multi_assert*/ /aim_vs_smt2_nonzero_start_offset*/ +/2props1trace*/ diff --git a/tests/Makefile b/tests/Makefile index 8f1d00c5..15e87ff3 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -4,7 +4,7 @@ JUNIT_TESTS=junit_assert_pass junit_assert_fail junit_assert_preunsat \ junit_cover_pass junit_cover_uncovered junit_cover_assert junit_cover_preunsat \ junit_timeout_error_timeout junit_timeout_error_syntax junit_timeout_error_solver -.PHONY: test validate_junit +.PHONY: test validate_junit scripted test: $(JUNIT_TESTS) @@ -14,4 +14,7 @@ test_%: %.sby FORCE $(JUNIT_TESTS): $(SBY_TESTS) python3 validate_junit.py $@/$@.xml +scripted: + make -C scripted + FORCE: diff --git a/tests/scripted/.gitignore b/tests/scripted/.gitignore new file mode 100644 index 00000000..6403b855 --- /dev/null +++ b/tests/scripted/.gitignore @@ -0,0 +1 @@ +/junit_*/ diff --git a/tests/scripted/Makefile b/tests/scripted/Makefile new file mode 100644 index 00000000..ca27199f --- /dev/null +++ b/tests/scripted/Makefile @@ -0,0 +1,11 @@ +SH_FILES=$(wildcard *.sh) +SH_TESTS=$(addprefix test_,$(SH_FILES:.sh=)) + +test: $(SH_TESTS) + +test_%: %.sh FORCE + bash $< + +FORCE: + +.PHONY: test FORCE diff --git a/tests/scripted/junit_expect.sby b/tests/scripted/junit_expect.sby new file mode 100644 index 00000000..63d65a6e --- /dev/null +++ b/tests/scripted/junit_expect.sby @@ -0,0 +1,16 @@ +[options] +mode bmc +depth 1 +expect fail,timeout + +[engines] +smtbmc + +[script] +read -formal foo.v +prep -top foo + +[file foo.v] +module foo; +always_comb assert(1); +endmodule diff --git a/tests/scripted/junit_expect.sh b/tests/scripted/junit_expect.sh new file mode 100644 index 00000000..27b972d6 --- /dev/null +++ b/tests/scripted/junit_expect.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +# this is expected to return 1 so don't use 'set -e' +python3 ../../sbysrc/sby.py -f junit_expect.sby +grep '' junit_expect/junit_expect.xml From 2d3d96478a001f9686d185ecf26373a29bc52b4d Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 24 Mar 2022 16:36:59 +0100 Subject: [PATCH 028/220] Tests for `--keep-going` This also changes the test Makefile to run `.check.py` files after running the corresponding `.sby` file to allow more precise testing of the keep going feature. --- tests/.gitignore | 1 + tests/Makefile | 7 ++++++- tests/check_output.py | 17 +++++++++++++++++ tests/keepgoing_multi_step.check.py | 27 +++++++++++++++++++++++++++ tests/keepgoing_multi_step.sby | 18 ++++++++++++++++++ tests/keepgoing_multi_step.sv | 22 ++++++++++++++++++++++ tests/keepgoing_same_step.check.py | 18 ++++++++++++++++++ tests/keepgoing_same_step.sby | 13 +++++++++++++ tests/keepgoing_same_step.sv | 17 +++++++++++++++++ tests/keepgoing_smtc.check.py | 24 ++++++++++++++++++++++++ tests/keepgoing_smtc.sby | 19 +++++++++++++++++++ 11 files changed, 182 insertions(+), 1 deletion(-) create mode 100644 tests/check_output.py create mode 100644 tests/keepgoing_multi_step.check.py create mode 100644 tests/keepgoing_multi_step.sby create mode 100644 tests/keepgoing_multi_step.sv create mode 100644 tests/keepgoing_same_step.check.py create mode 100644 tests/keepgoing_same_step.sby create mode 100644 tests/keepgoing_same_step.sv create mode 100644 tests/keepgoing_smtc.check.py create mode 100644 tests/keepgoing_smtc.sby diff --git a/tests/.gitignore b/tests/.gitignore index 212f4ddb..86d3851a 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -8,5 +8,6 @@ /redxor*/ /stopfirst*/ /junit_*/ +/keepgoing_*/ /submod_props*/ /multi_assert*/ diff --git a/tests/Makefile b/tests/Makefile index 8f1d00c5..46ec23b2 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,16 +1,21 @@ SBY_FILES=$(wildcard *.sby) SBY_TESTS=$(addprefix test_,$(SBY_FILES:.sby=)) +CHECK_PY_FILES=$(wildcard *.check.py) +CHECK_PY_TASKS=$(addprefix check_,$(CHECK_PY_FILES:.check.py=)) JUNIT_TESTS=junit_assert_pass junit_assert_fail junit_assert_preunsat \ junit_cover_pass junit_cover_uncovered junit_cover_assert junit_cover_preunsat \ junit_timeout_error_timeout junit_timeout_error_syntax junit_timeout_error_solver .PHONY: test validate_junit -test: $(JUNIT_TESTS) +test: $(JUNIT_TESTS) $(CHECK_PY_TASKS) test_%: %.sby FORCE python3 ../sbysrc/sby.py -f $< +$(CHECK_PY_TASKS): check_%: %.check.py test_% + python3 $< + $(JUNIT_TESTS): $(SBY_TESTS) python3 validate_junit.py $@/$@.xml diff --git a/tests/check_output.py b/tests/check_output.py new file mode 100644 index 00000000..ab531ebc --- /dev/null +++ b/tests/check_output.py @@ -0,0 +1,17 @@ +import re + + +def line_ref(dir, filename, pattern): + with open(dir + "/src/" + filename) as file: + if isinstance(pattern, str): + pattern_re = re.compile(re.escape(pattern)) + else: + pattern_re = pattern + pattern = pattern.pattern + + for number, line in enumerate(file, 1): + if pattern_re.search(line): + # Needs to match source locations for both verilog frontends + return fr"{filename}:(?:{number}|\d+.\d+-{number}.\d+)" + + raise RuntimeError("%s: pattern `%s` not found" % (filename, pattern)) diff --git a/tests/keepgoing_multi_step.check.py b/tests/keepgoing_multi_step.check.py new file mode 100644 index 00000000..0b7d49eb --- /dev/null +++ b/tests/keepgoing_multi_step.check.py @@ -0,0 +1,27 @@ +from check_output import * + +src = "keepgoing_multi_step.sv" + +for task in ["keepgoing_multi_step_bmc", "keepgoing_multi_step_prove"]: + assert_0 = line_ref(task, src, "assert(0)") + step_3_7 = line_ref(task, src, "step 3,7") + step_5 = line_ref(task, src, "step 5") + step_7 = line_ref(task, src, "step 7") + + log = open(task + "/logfile.txt").read() + log_per_trace = log.split("Writing trace to VCD file")[:-1] + + assert len(log_per_trace) == 4 + + + assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[0], re.M) + + for i in range(1, 4): + assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[i], re.M) + + + assert re.search(r"Assert failed in test: %s \(.*\)$" % step_3_7, log_per_trace[1], re.M) + assert re.search(r"Assert failed in test: %s \(.*\)$" % step_5, log_per_trace[2], re.M) + assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) + assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) + diff --git a/tests/keepgoing_multi_step.sby b/tests/keepgoing_multi_step.sby new file mode 100644 index 00000000..b9b0ba58 --- /dev/null +++ b/tests/keepgoing_multi_step.sby @@ -0,0 +1,18 @@ +[tasks] +bmc +prove + +[options] +bmc: mode bmc +prove: mode prove +expect fail + +[engines] +smtbmc --keep-going boolector + +[script] +read -sv keepgoing_multi_step.sv +prep -top test + +[files] +keepgoing_multi_step.sv diff --git a/tests/keepgoing_multi_step.sv b/tests/keepgoing_multi_step.sv new file mode 100644 index 00000000..8d5d8e36 --- /dev/null +++ b/tests/keepgoing_multi_step.sv @@ -0,0 +1,22 @@ +module test ( + input clk, a +); + reg [7:0] counter = 0; + + always @(posedge clk) begin + counter <= counter + 1; + end + + always @(posedge clk) begin + assert(0); + if (counter == 3 || counter == 7) begin + assert(a); // step 3,7 + end + if (counter == 5) begin + assert(a); // step 5 + end + if (counter == 7) begin + assert(a); // step 7 + end + end +endmodule diff --git a/tests/keepgoing_same_step.check.py b/tests/keepgoing_same_step.check.py new file mode 100644 index 00000000..35cd3140 --- /dev/null +++ b/tests/keepgoing_same_step.check.py @@ -0,0 +1,18 @@ +from check_output import * + +task = "keepgoing_same_step" +src = "keepgoing_same_step.sv" + +assert_a = line_ref(task, src, "assert(a)") +assert_not_a = line_ref(task, src, "assert(!a)") +assert_0 = line_ref(task, src, "assert(0)") + +log = open(task + "/logfile.txt").read() +log_per_trace = log.split("Writing trace to VCD file")[:-1] + +assert len(log_per_trace) == 2 + +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_a, log, re.M) +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_not_a, log, re.M) +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[0], re.M) +assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[1], re.M) diff --git a/tests/keepgoing_same_step.sby b/tests/keepgoing_same_step.sby new file mode 100644 index 00000000..d344dcc5 --- /dev/null +++ b/tests/keepgoing_same_step.sby @@ -0,0 +1,13 @@ +[options] +mode bmc +expect fail + +[engines] +smtbmc --keep-going boolector + +[script] +read -sv keepgoing_same_step.sv +prep -top test + +[files] +keepgoing_same_step.sv diff --git a/tests/keepgoing_same_step.sv b/tests/keepgoing_same_step.sv new file mode 100644 index 00000000..98fe6d0c --- /dev/null +++ b/tests/keepgoing_same_step.sv @@ -0,0 +1,17 @@ +module test ( + input clk, a +); + reg [7:0] counter = 0; + + always @(posedge clk) begin + counter <= counter + 1; + end + + always @(posedge clk) begin + if (counter == 3) begin + assert(a); + assert(!a); + assert(0); + end + end +endmodule diff --git a/tests/keepgoing_smtc.check.py b/tests/keepgoing_smtc.check.py new file mode 100644 index 00000000..5749e3f9 --- /dev/null +++ b/tests/keepgoing_smtc.check.py @@ -0,0 +1,24 @@ +from check_output import * + +task = "keepgoing_smtc" +src = "keepgoing_same_step.sv" + +assert_a = line_ref(task, src, "assert(a)") +assert_not_a = line_ref(task, src, "assert(!a)") +assert_0 = line_ref(task, src, "assert(0)") + +assert_false = line_ref(task, "extra.smtc", "assert false") +assert_distinct = line_ref(task, "extra.smtc", "assert (distinct") + +log = open(task + "/logfile.txt").read() +log_per_trace = log.split("Writing trace to VCD file")[:-1] + +assert len(log_per_trace) == 4 + +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_a, log, re.M) +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_not_a, log, re.M) + +assert re.search(r"Assert src/%s failed: false" % assert_false, log_per_trace[0], re.M) +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[1], re.M) +assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[2], re.M) +assert re.search(r"Assert src/%s failed: \(distinct" % assert_distinct, log_per_trace[3], re.M) diff --git a/tests/keepgoing_smtc.sby b/tests/keepgoing_smtc.sby new file mode 100644 index 00000000..a4cc7621 --- /dev/null +++ b/tests/keepgoing_smtc.sby @@ -0,0 +1,19 @@ +[options] +mode bmc +expect fail + +[engines] +smtbmc --keep-going boolector -- --smtc src/extra.smtc + +[script] +read -sv keepgoing_same_step.sv +prep -top test + +[files] +keepgoing_same_step.sv + +[file extra.smtc] +state 2 +assert false +always +assert (distinct [counter] #b00000111) From b725bfed0c986777f239f362596b096c8cc05326 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 30 Mar 2022 13:35:57 +0200 Subject: [PATCH 029/220] Prefer the first tracefile for each failing assertion --- sbysrc/sby_engine_smtbmc.py | 3 ++- tests/keepgoing_multi_step.check.py | 2 ++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 18cfb092..4ec365de 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -210,7 +210,8 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) if match and last_prop: for p in last_prop: - p.tracefile = match[1] + if not p.tracefile: + p.tracefile = match[1] last_prop = [] return line diff --git a/tests/keepgoing_multi_step.check.py b/tests/keepgoing_multi_step.check.py index 0b7d49eb..78c713f2 100644 --- a/tests/keepgoing_multi_step.check.py +++ b/tests/keepgoing_multi_step.check.py @@ -25,3 +25,5 @@ assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) + pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.vcd" + assert re.search(pattern, open(f"{task}/{task}.xml").read()) From a78eaa57db9c8c5aae0f90eafc17c77b1a4d6029 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 31 Mar 2022 13:12:15 +0200 Subject: [PATCH 030/220] Fix variable name in find_property_by_cellname's error path --- sbysrc/sby_design.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 6dfbaecb..dc183504 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -97,7 +97,7 @@ def find_property_by_cellname(self, cell_name, trans_dict=dict()): for prop in self: if cell_name == prop.name.translate(str.maketrans(trans_dict)): return prop - raise KeyError(f"No such property: {smt2_name}") + raise KeyError(f"No such property: {cell_name}") def design_hierarchy(filename): design_json = json.load(filename) From 4b512668b21613ead9af7f2389eb134c804fc012 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 31 Mar 2022 15:51:58 +0200 Subject: [PATCH 031/220] Fix design_hierarchy handling of $paramod cells --- sbysrc/sby_design.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index dc183504..d18d6d9b 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -108,7 +108,7 @@ def make_mod_hier(instance_name, module_name, hierarchy=""): cells = design_json["modules"][module_name]["cells"] for cell_name, cell in cells.items(): sub_hierarchy=f"{hierarchy}/{instance_name}" if hierarchy else instance_name - if cell["type"][0] != '$': + if cell["type"][0] != '$' or cell["type"].startswith("$paramod"): mod.submodules[cell_name] = make_mod_hier(cell_name, cell["type"], hierarchy=sub_hierarchy) if cell["type"] in ["$assume", "$assert", "$cover", "$live"]: try: From ef236eeddc81176672ac8c3d3a71d938fe31317c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 1 Apr 2022 19:25:09 +0200 Subject: [PATCH 032/220] Regression test: do not merge FFs with unconstrained initvals Currently done by `opt -keepdc` via `opt_merge` but not valid in a formal context. --- tests/.gitignore | 1 + tests/invalid_ff_dcinit_merge.sby | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) create mode 100644 tests/invalid_ff_dcinit_merge.sby diff --git a/tests/.gitignore b/tests/.gitignore index 21f5f780..f91f05ae 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -12,4 +12,5 @@ /submod_props*/ /multi_assert*/ /aim_vs_smt2_nonzero_start_offset*/ +/invalid_ff_dcinit_merge*/ /2props1trace*/ diff --git a/tests/invalid_ff_dcinit_merge.sby b/tests/invalid_ff_dcinit_merge.sby new file mode 100644 index 00000000..a23d8f02 --- /dev/null +++ b/tests/invalid_ff_dcinit_merge.sby @@ -0,0 +1,29 @@ +[options] +mode bmc +depth 4 +expect fail + +[engines] +smtbmc + +[script] +read -formal top.sv +prep -top top + +[file top.sv] +module top( +input clk, d +); + +reg q1; +reg q2; + +always @(posedge clk) begin + q1 <= d; + q2 <= d; +end; + +// q1 and q2 are unconstrained in the initial state, so this should fail +always @(*) assert(q1 == q2); + +endmodule From 8ce526c22d872f65af8fd409f42f09be3ff3a2d9 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Thu, 10 Mar 2022 17:46:08 +0100 Subject: [PATCH 033/220] junit: use write_jny instead of write_json --- sbysrc/sby_core.py | 2 +- sbysrc/sby_design.py | 38 +++++++++++++++++++++++--------------- 2 files changed, 24 insertions(+), 16 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 1e96d00e..3908f657 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -395,7 +395,7 @@ def print_common_prep(): print(cmd, file=f) # the user must designate a top module in [script] print("hierarchy -simcheck", file=f) - print(f"""write_json ../model/design.json""", file=f) + print(f"""write_jny -no-connections ../model/design.json""", file=f) print(f"""write_rtlil ../model/design.il""", file=f) proc = SbyProc( diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index d18d6d9b..8fc78955 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -103,26 +103,34 @@ def design_hierarchy(filename): design_json = json.load(filename) def make_mod_hier(instance_name, module_name, hierarchy=""): # print(instance_name,":", module_name) + sub_hierarchy=f"{hierarchy}/{instance_name}" if hierarchy else instance_name mod = SbyModule(name=instance_name, type=module_name) - cells = design_json["modules"][module_name]["cells"] - for cell_name, cell in cells.items(): - sub_hierarchy=f"{hierarchy}/{instance_name}" if hierarchy else instance_name - if cell["type"][0] != '$' or cell["type"].startswith("$paramod"): - mod.submodules[cell_name] = make_mod_hier(cell_name, cell["type"], hierarchy=sub_hierarchy) - if cell["type"] in ["$assume", "$assert", "$cover", "$live"]: - try: - location = cell["attributes"]["src"] - except KeyError: - location = "" - p = SbyProperty(name=cell_name, type=SbyProperty.Type.from_cell(cell["type"]), location=location, hierarchy=sub_hierarchy) - mod.properties.append(p) + for m in design_json["modules"]: + if m["name"] == module_name: + cell_sorts = m["cell_sorts"] + break + else: + raise ValueError(f"Cannot find module {module_name}") + + for sort in cell_sorts: + if sort["type"] in ["$assume", "$assert", "$cover", "$live"]: + for cell in sort["cells"]: + try: + location = cell["attributes"]["src"] + except KeyError: + location = "" + p = SbyProperty(name=cell["name"], type=SbyProperty.Type.from_cell(sort["type"]), location=location, hierarchy=sub_hierarchy) + mod.properties.append(p) + if sort["type"][0] != '$' or sort["type"].startswith("$paramod"): + for cell in sort["cells"]: + mod.submodules[cell["name"]] = make_mod_hier(cell["name"], sort["type"], hierarchy=sub_hierarchy) return mod - for module_name in design_json["modules"]: - attrs = design_json["modules"][module_name]["attributes"] + for m in design_json["modules"]: + attrs = m["attributes"] if "top" in attrs and int(attrs["top"]) == 1: - hierarchy = make_mod_hier(module_name, module_name) + hierarchy = make_mod_hier(m["name"], m["name"]) return hierarchy else: raise ValueError("Cannot find top module") From a1909940980fce4efd710a6cff9f874ae7ffa9d0 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 11 Apr 2022 17:35:11 +0200 Subject: [PATCH 034/220] Add envvar to enable automatic .gitignore creation for workdirs --- sbysrc/sby.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 5616bc0f..d28cd29b 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -415,6 +415,10 @@ def run_task(taskname): my_opt_tmpdir = True my_workdir = tempfile.mkdtemp() + if os.getenv("SBY_WORKDIR_GITIGNORE"): + with open(f"{my_workdir}/.gitignore", "w") as gitignore: + print("*", file=gitignore) + junit_ts_name = os.path.basename(sbyfile[:-4]) if sbyfile is not None else workdir if workdir is not None else "stdin" junit_tc_name = taskname if taskname is not None else "default" From 6daa434d85cd07d5feea5511143c9af793ac1658 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 11 Apr 2022 17:37:27 +0200 Subject: [PATCH 035/220] Add --dumptaskinfo option to output some .sby metadata as json --- sbysrc/sby.py | 22 +++++- sbysrc/sby_core.py | 190 ++++++++++++++++++++++++--------------------- 2 files changed, 120 insertions(+), 92 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index d28cd29b..d13960c7 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -17,9 +17,9 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import argparse, os, sys, shutil, tempfile, re +import argparse, json, os, sys, shutil, tempfile, re ##yosys-sys-path## -from sby_core import SbyTask, SbyAbort, process_filename +from sby_core import SbyConfig, SbyTask, SbyAbort, process_filename import time, platform class DictAction(argparse.Action): @@ -72,6 +72,8 @@ def __call__(self, parser, namespace, values, option_string=None): help="print the list of tasks") parser.add_argument("--dumpdefaults", action="store_true", dest="dump_defaults", help="print the list of default tasks") +parser.add_argument("--dumptaskinfo", action="store_true", dest="dump_taskinfo", + help="output a summary of tasks as JSON") parser.add_argument("--dumpfiles", action="store_true", dest="dump_files", help="print the list of source files") parser.add_argument("--setup", action="store_true", dest="setupmode", @@ -102,6 +104,7 @@ def __call__(self, parser, namespace, values, option_string=None): dump_tags = args.dump_tags dump_tasks = args.dump_tasks dump_defaults = args.dump_defaults +dump_taskinfo = args.dump_taskinfo dump_files = args.dump_files reusedir = False setupmode = args.setupmode @@ -367,6 +370,21 @@ def find_files(taskname): print(name) sys.exit(0) +if dump_taskinfo: + _, _, tasknames, _ = read_sbyconfig(sbydata, None) + taskinfo = {} + for taskname in tasknames or [None]: + task_sbyconfig, _, _, _ = read_sbyconfig(sbydata, taskname) + taskinfo[taskname or ""] = info = {} + cfg = SbyConfig() + cfg.parse_config(task_sbyconfig) + taskinfo[taskname or ""] = { + "mode": cfg.options.get("mode"), + "engines": cfg.engines, + } + print(json.dumps(taskinfo, indent=2)) + sys.exit(0) + if len(tasknames) == 0: _, _, tasknames, _ = read_sbyconfig(sbydata, None) if len(tasknames) == 0: diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 3908f657..ab10614f 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -209,14 +209,110 @@ class SbyAbort(BaseException): pass -class SbyTask: - def __init__(self, sbyconfig, workdir, early_logs, reusedir): +class SbyConfig: + def __init__(self): self.options = dict() - self.used_options = set() self.engines = list() self.script = list() self.files = dict() self.verbatim_files = dict() + pass + + def parse_config(self, f): + mode = None + + for line in f: + raw_line = line + if mode in ["options", "engines", "files"]: + line = re.sub(r"\s*(\s#.*)?$", "", line) + if line == "" or line[0] == "#": + continue + else: + line = line.rstrip() + # print(line) + if mode is None and (len(line) == 0 or line[0] == "#"): + continue + match = re.match(r"^\s*\[(.*)\]\s*$", line) + if match: + entries = match.group(1).split() + if len(entries) == 0: + self.error(f"sby file syntax error: {line}") + + if entries[0] == "options": + mode = "options" + if len(self.options) != 0 or len(entries) != 1: + self.error(f"sby file syntax error: {line}") + continue + + if entries[0] == "engines": + mode = "engines" + if len(self.engines) != 0 or len(entries) != 1: + self.error(f"sby file syntax error: {line}") + continue + + if entries[0] == "script": + mode = "script" + if len(self.script) != 0 or len(entries) != 1: + self.error(f"sby file syntax error: {line}") + continue + + if entries[0] == "file": + mode = "file" + if len(entries) != 2: + self.error(f"sby file syntax error: {line}") + current_verbatim_file = entries[1] + if current_verbatim_file in self.verbatim_files: + self.error(f"duplicate file: {entries[1]}") + self.verbatim_files[current_verbatim_file] = list() + continue + + if entries[0] == "files": + mode = "files" + if len(entries) != 1: + self.error(f"sby file syntax error: {line}") + continue + + self.error(f"sby file syntax error: {line}") + + if mode == "options": + entries = line.split() + if len(entries) != 2: + self.error(f"sby file syntax error: {line}") + self.options[entries[0]] = entries[1] + continue + + if mode == "engines": + entries = line.split() + self.engines.append(entries) + continue + + if mode == "script": + self.script.append(line) + continue + + if mode == "files": + entries = line.split() + if len(entries) == 1: + self.files[os.path.basename(entries[0])] = entries[0] + elif len(entries) == 2: + self.files[entries[0]] = entries[1] + else: + self.error(f"sby file syntax error: {line}") + continue + + if mode == "file": + self.verbatim_files[current_verbatim_file].append(raw_line) + continue + + self.error(f"sby file syntax error: {line}") + + def error(self, logmessage): + raise SbyAbort(logmessage) + +class SbyTask(SbyConfig): + def __init__(self, sbyconfig, workdir, early_logs, reusedir): + super().__init__() + self.used_options = set() self.models = dict() self.workdir = workdir self.reusedir = reusedir @@ -550,94 +646,8 @@ def update_status(self, new_status): assert 0 def run(self, setupmode): - mode = None - key = None - with open(f"{self.workdir}/config.sby", "r") as f: - for line in f: - raw_line = line - if mode in ["options", "engines", "files"]: - line = re.sub(r"\s*(\s#.*)?$", "", line) - if line == "" or line[0] == "#": - continue - else: - line = line.rstrip() - # print(line) - if mode is None and (len(line) == 0 or line[0] == "#"): - continue - match = re.match(r"^\s*\[(.*)\]\s*$", line) - if match: - entries = match.group(1).split() - if len(entries) == 0: - self.error(f"sby file syntax error: {line}") - - if entries[0] == "options": - mode = "options" - if len(self.options) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") - continue - - if entries[0] == "engines": - mode = "engines" - if len(self.engines) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") - continue - - if entries[0] == "script": - mode = "script" - if len(self.script) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") - continue - - if entries[0] == "file": - mode = "file" - if len(entries) != 2: - self.error(f"sby file syntax error: {line}") - current_verbatim_file = entries[1] - if current_verbatim_file in self.verbatim_files: - self.error(f"duplicate file: {entries[1]}") - self.verbatim_files[current_verbatim_file] = list() - continue - - if entries[0] == "files": - mode = "files" - if len(entries) != 1: - self.error(f"sby file syntax error: {line}") - continue - - self.error(f"sby file syntax error: {line}") - - if mode == "options": - entries = line.split() - if len(entries) != 2: - self.error(f"sby file syntax error: {line}") - self.options[entries[0]] = entries[1] - continue - - if mode == "engines": - entries = line.split() - self.engines.append(entries) - continue - - if mode == "script": - self.script.append(line) - continue - - if mode == "files": - entries = line.split() - if len(entries) == 1: - self.files[os.path.basename(entries[0])] = entries[0] - elif len(entries) == 2: - self.files[entries[0]] = entries[1] - else: - self.error(f"sby file syntax error: {line}") - continue - - if mode == "file": - self.verbatim_files[current_verbatim_file].append(raw_line) - continue - - self.error(f"sby file syntax error: {line}") + self.parse_config(f) self.handle_str_option("mode", None) From 8da6f07cb3c11375ac59b625aee4f75b40ca8464 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 11 Apr 2022 17:39:05 +0200 Subject: [PATCH 036/220] Refactor tests Organize tests into subdirectories and use a new makefile that scans .sby files and allows selecting tests by mode, engine, solver and/or subdirectory. Automatically skips tests that use engines/solvers that are not found in the PATH. See `cd tests; make help` for a description of supported make targets. --- tests/.gitignore | 18 +-- tests/Makefile | 32 ++--- tests/{ => junit}/JUnit.xsd | 0 tests/junit/Makefile | 2 + tests/{ => junit}/junit_assert.sby | 0 tests/junit/junit_assert.sh | 4 + tests/{ => junit}/junit_cover.sby | 0 tests/junit/junit_cover.sh | 4 + tests/{scripted => junit}/junit_expect.sby | 0 tests/junit/junit_expect.sh | 4 + tests/{ => junit}/junit_nocodeloc.sby | 0 tests/junit/junit_nocodeloc.sh | 4 + tests/{ => junit}/junit_timeout_error.sby | 0 tests/junit/junit_timeout_error.sh | 4 + tests/{ => junit}/validate_junit.py | 0 tests/keepgoing/Makefile | 2 + tests/{ => keepgoing}/check_output.py | 0 tests/keepgoing/keepgoing_multi_step.py | 31 ++++ .../{ => keepgoing}/keepgoing_multi_step.sby | 0 tests/keepgoing/keepgoing_multi_step.sh | 4 + tests/{ => keepgoing}/keepgoing_multi_step.sv | 0 .../keepgoing_same_step.py} | 11 +- tests/{ => keepgoing}/keepgoing_same_step.sby | 0 tests/keepgoing/keepgoing_same_step.sh | 4 + tests/{ => keepgoing}/keepgoing_same_step.sv | 0 .../keepgoing_smtc.py} | 15 +- tests/{ => keepgoing}/keepgoing_smtc.sby | 0 tests/keepgoing/keepgoing_smtc.sh | 4 + tests/keepgoing_multi_step.check.py | 29 ---- tests/make/collect_tests.py | 47 ++++++ tests/make/help.txt | 20 +++ tests/make/subdir.mk | 15 ++ tests/make/test_rules.py | 135 ++++++++++++++++++ tests/regression/Makefile | 2 + .../aim_vs_smt2_nonzero_start_offset.sby | 17 ++- .../invalid_ff_dcinit_merge.sby | 0 tests/scripted/.gitignore | 1 - tests/scripted/Makefile | 11 -- tests/scripted/junit_expect.sh | 5 - tests/{ => unsorted}/2props1trace.sby | 0 tests/unsorted/Makefile | 2 + tests/{ => unsorted}/both_ex.sby | 0 tests/{ => unsorted}/both_ex.v | 0 tests/{ => unsorted}/cover.sby | 0 tests/{ => unsorted}/cover.sv | 0 tests/{ => unsorted}/cover_fail.sby | 0 tests/{ => unsorted}/demo.sby | 0 tests/{ => unsorted}/demo.sv | 0 tests/{ => unsorted}/memory.sby | 0 tests/{ => unsorted}/memory.sv | 0 tests/{ => unsorted}/mixed.sby | 0 tests/{ => unsorted}/mixed.v | 0 tests/{ => unsorted}/multi_assert.sby | 0 tests/{ => unsorted}/preunsat.sby | 0 tests/{ => unsorted}/prv32fmcmp.sby | 2 +- tests/{ => unsorted}/prv32fmcmp.v | 0 tests/{ => unsorted}/redxor.sby | 0 tests/{ => unsorted}/redxor.v | 0 tests/{ => unsorted}/stopfirst.sby | 0 tests/{ => unsorted}/submod_props.sby | 0 60 files changed, 328 insertions(+), 101 deletions(-) rename tests/{ => junit}/JUnit.xsd (100%) create mode 100644 tests/junit/Makefile rename tests/{ => junit}/junit_assert.sby (100%) create mode 100644 tests/junit/junit_assert.sh rename tests/{ => junit}/junit_cover.sby (100%) create mode 100644 tests/junit/junit_cover.sh rename tests/{scripted => junit}/junit_expect.sby (100%) create mode 100644 tests/junit/junit_expect.sh rename tests/{ => junit}/junit_nocodeloc.sby (100%) create mode 100644 tests/junit/junit_nocodeloc.sh rename tests/{ => junit}/junit_timeout_error.sby (100%) create mode 100644 tests/junit/junit_timeout_error.sh rename tests/{ => junit}/validate_junit.py (100%) create mode 100644 tests/keepgoing/Makefile rename tests/{ => keepgoing}/check_output.py (100%) create mode 100644 tests/keepgoing/keepgoing_multi_step.py rename tests/{ => keepgoing}/keepgoing_multi_step.sby (100%) create mode 100644 tests/keepgoing/keepgoing_multi_step.sh rename tests/{ => keepgoing}/keepgoing_multi_step.sv (100%) rename tests/{keepgoing_same_step.check.py => keepgoing/keepgoing_same_step.py} (69%) rename tests/{ => keepgoing}/keepgoing_same_step.sby (100%) create mode 100644 tests/keepgoing/keepgoing_same_step.sh rename tests/{ => keepgoing}/keepgoing_same_step.sv (100%) rename tests/{keepgoing_smtc.check.py => keepgoing/keepgoing_smtc.py} (66%) rename tests/{ => keepgoing}/keepgoing_smtc.sby (100%) create mode 100644 tests/keepgoing/keepgoing_smtc.sh delete mode 100644 tests/keepgoing_multi_step.check.py create mode 100644 tests/make/collect_tests.py create mode 100644 tests/make/help.txt create mode 100644 tests/make/subdir.mk create mode 100644 tests/make/test_rules.py create mode 100644 tests/regression/Makefile rename tests/{ => regression}/aim_vs_smt2_nonzero_start_offset.sby (66%) rename tests/{ => regression}/invalid_ff_dcinit_merge.sby (100%) delete mode 100644 tests/scripted/.gitignore delete mode 100644 tests/scripted/Makefile delete mode 100644 tests/scripted/junit_expect.sh rename tests/{ => unsorted}/2props1trace.sby (100%) create mode 100644 tests/unsorted/Makefile rename tests/{ => unsorted}/both_ex.sby (100%) rename tests/{ => unsorted}/both_ex.v (100%) rename tests/{ => unsorted}/cover.sby (100%) rename tests/{ => unsorted}/cover.sv (100%) rename tests/{ => unsorted}/cover_fail.sby (100%) rename tests/{ => unsorted}/demo.sby (100%) rename tests/{ => unsorted}/demo.sv (100%) rename tests/{ => unsorted}/memory.sby (100%) rename tests/{ => unsorted}/memory.sv (100%) rename tests/{ => unsorted}/mixed.sby (100%) rename tests/{ => unsorted}/mixed.v (100%) rename tests/{ => unsorted}/multi_assert.sby (100%) rename tests/{ => unsorted}/preunsat.sby (100%) rename tests/{ => unsorted}/prv32fmcmp.sby (89%) rename tests/{ => unsorted}/prv32fmcmp.v (100%) rename tests/{ => unsorted}/redxor.sby (100%) rename tests/{ => unsorted}/redxor.v (100%) rename tests/{ => unsorted}/stopfirst.sby (100%) rename tests/{ => unsorted}/submod_props.sby (100%) diff --git a/tests/.gitignore b/tests/.gitignore index f91f05ae..9737325a 100644 --- a/tests/.gitignore +++ b/tests/.gitignore @@ -1,16 +1,2 @@ -/both_ex*/ -/cover*/ -/demo*/ -/memory*/ -/mixed*/ -/preunsat*/ -/prv32fmcmp*/ -/redxor*/ -/stopfirst*/ -/junit_*/ -/keepgoing_*/ -/submod_props*/ -/multi_assert*/ -/aim_vs_smt2_nonzero_start_offset*/ -/invalid_ff_dcinit_merge*/ -/2props1trace*/ +/make/rules +__pycache__ diff --git a/tests/Makefile b/tests/Makefile index 177688c0..eb941e23 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -1,25 +1,19 @@ -SBY_FILES=$(wildcard *.sby) -SBY_TESTS=$(addprefix test_,$(SBY_FILES:.sby=)) -CHECK_PY_FILES=$(wildcard *.check.py) -CHECK_PY_TASKS=$(addprefix check_,$(CHECK_PY_FILES:.check.py=)) -JUNIT_TESTS=junit_assert_pass junit_assert_fail junit_assert_preunsat \ -junit_cover_pass junit_cover_uncovered junit_cover_assert junit_cover_preunsat \ -junit_timeout_error_timeout junit_timeout_error_syntax junit_timeout_error_solver +test: -.PHONY: test validate_junit scripted +.PHONY: test clean refresh help -test: $(JUNIT_TESTS) $(CHECK_PY_TASKS) +help: + @cat make/help.txt -test_%: %.sby FORCE - python3 ../sbysrc/sby.py -f $< +export SBY_WORKDIR_GITIGNORE=1 +export SBY_MAIN=$(realpath $(dir $(firstword $(MAKEFILE_LIST)))/../sbysrc/sby.py) -$(CHECK_PY_TASKS): check_%: %.check.py test_% - python3 $< +make/rules/collect.mk: make/collect_tests.py + python3 make/collect_tests.py -$(JUNIT_TESTS): $(SBY_TESTS) - python3 validate_junit.py $@/$@.xml +make/rules/test/%.mk: + python3 make/test_rules.py $< -scripted: - make -C scripted - -FORCE: +ifneq (help,$(MAKECMDGOALS)) +include make/rules/collect.mk +endif diff --git a/tests/JUnit.xsd b/tests/junit/JUnit.xsd similarity index 100% rename from tests/JUnit.xsd rename to tests/junit/JUnit.xsd diff --git a/tests/junit/Makefile b/tests/junit/Makefile new file mode 100644 index 00000000..dd894033 --- /dev/null +++ b/tests/junit/Makefile @@ -0,0 +1,2 @@ +SUBDIR=junit +include ../make/subdir.mk diff --git a/tests/junit_assert.sby b/tests/junit/junit_assert.sby similarity index 100% rename from tests/junit_assert.sby rename to tests/junit/junit_assert.sby diff --git a/tests/junit/junit_assert.sh b/tests/junit/junit_assert.sh new file mode 100644 index 00000000..f18d8caa --- /dev/null +++ b/tests/junit/junit_assert.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 validate_junit.py $WORKDIR/$WORKDIR.xml diff --git a/tests/junit_cover.sby b/tests/junit/junit_cover.sby similarity index 100% rename from tests/junit_cover.sby rename to tests/junit/junit_cover.sby diff --git a/tests/junit/junit_cover.sh b/tests/junit/junit_cover.sh new file mode 100644 index 00000000..f18d8caa --- /dev/null +++ b/tests/junit/junit_cover.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 validate_junit.py $WORKDIR/$WORKDIR.xml diff --git a/tests/scripted/junit_expect.sby b/tests/junit/junit_expect.sby similarity index 100% rename from tests/scripted/junit_expect.sby rename to tests/junit/junit_expect.sby diff --git a/tests/junit/junit_expect.sh b/tests/junit/junit_expect.sh new file mode 100644 index 00000000..cb66b10c --- /dev/null +++ b/tests/junit/junit_expect.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +! python3 $SBY_MAIN -f $SBY_FILE $TASK +grep '' $WORKDIR/$WORKDIR.xml diff --git a/tests/junit_nocodeloc.sby b/tests/junit/junit_nocodeloc.sby similarity index 100% rename from tests/junit_nocodeloc.sby rename to tests/junit/junit_nocodeloc.sby diff --git a/tests/junit/junit_nocodeloc.sh b/tests/junit/junit_nocodeloc.sh new file mode 100644 index 00000000..f18d8caa --- /dev/null +++ b/tests/junit/junit_nocodeloc.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 validate_junit.py $WORKDIR/$WORKDIR.xml diff --git a/tests/junit_timeout_error.sby b/tests/junit/junit_timeout_error.sby similarity index 100% rename from tests/junit_timeout_error.sby rename to tests/junit/junit_timeout_error.sby diff --git a/tests/junit/junit_timeout_error.sh b/tests/junit/junit_timeout_error.sh new file mode 100644 index 00000000..f18d8caa --- /dev/null +++ b/tests/junit/junit_timeout_error.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 validate_junit.py $WORKDIR/$WORKDIR.xml diff --git a/tests/validate_junit.py b/tests/junit/validate_junit.py similarity index 100% rename from tests/validate_junit.py rename to tests/junit/validate_junit.py diff --git a/tests/keepgoing/Makefile b/tests/keepgoing/Makefile new file mode 100644 index 00000000..0727e8b6 --- /dev/null +++ b/tests/keepgoing/Makefile @@ -0,0 +1,2 @@ +SUBDIR=keepgoing +include ../make/subdir.mk diff --git a/tests/check_output.py b/tests/keepgoing/check_output.py similarity index 100% rename from tests/check_output.py rename to tests/keepgoing/check_output.py diff --git a/tests/keepgoing/keepgoing_multi_step.py b/tests/keepgoing/keepgoing_multi_step.py new file mode 100644 index 00000000..c724c663 --- /dev/null +++ b/tests/keepgoing/keepgoing_multi_step.py @@ -0,0 +1,31 @@ +import sys +from check_output import * + +src = "keepgoing_multi_step.sv" + +workdir = sys.argv[1] + +assert_0 = line_ref(workdir, src, "assert(0)") +step_3_7 = line_ref(workdir, src, "step 3,7") +step_5 = line_ref(workdir, src, "step 5") +step_7 = line_ref(workdir, src, "step 7") + +log = open(workdir + "/logfile.txt").read() +log_per_trace = log.split("Writing trace to VCD file")[:-1] + +assert len(log_per_trace) == 4 + + +assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[0], re.M) + +for i in range(1, 4): + assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[i], re.M) + + +assert re.search(r"Assert failed in test: %s \(.*\)$" % step_3_7, log_per_trace[1], re.M) +assert re.search(r"Assert failed in test: %s \(.*\)$" % step_5, log_per_trace[2], re.M) +assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) +assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) + +pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.vcd" +assert re.search(pattern, open(f"{workdir}/{workdir}.xml").read()) diff --git a/tests/keepgoing_multi_step.sby b/tests/keepgoing/keepgoing_multi_step.sby similarity index 100% rename from tests/keepgoing_multi_step.sby rename to tests/keepgoing/keepgoing_multi_step.sby diff --git a/tests/keepgoing/keepgoing_multi_step.sh b/tests/keepgoing/keepgoing_multi_step.sh new file mode 100644 index 00000000..aca8be67 --- /dev/null +++ b/tests/keepgoing/keepgoing_multi_step.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 ${SBY_FILE%.sby}.py $WORKDIR diff --git a/tests/keepgoing_multi_step.sv b/tests/keepgoing/keepgoing_multi_step.sv similarity index 100% rename from tests/keepgoing_multi_step.sv rename to tests/keepgoing/keepgoing_multi_step.sv diff --git a/tests/keepgoing_same_step.check.py b/tests/keepgoing/keepgoing_same_step.py similarity index 69% rename from tests/keepgoing_same_step.check.py rename to tests/keepgoing/keepgoing_same_step.py index 35cd3140..e2739167 100644 --- a/tests/keepgoing_same_step.check.py +++ b/tests/keepgoing/keepgoing_same_step.py @@ -1,13 +1,14 @@ +import sys from check_output import * -task = "keepgoing_same_step" +workdir = sys.argv[1] src = "keepgoing_same_step.sv" -assert_a = line_ref(task, src, "assert(a)") -assert_not_a = line_ref(task, src, "assert(!a)") -assert_0 = line_ref(task, src, "assert(0)") +assert_a = line_ref(workdir, src, "assert(a)") +assert_not_a = line_ref(workdir, src, "assert(!a)") +assert_0 = line_ref(workdir, src, "assert(0)") -log = open(task + "/logfile.txt").read() +log = open(workdir + "/logfile.txt").read() log_per_trace = log.split("Writing trace to VCD file")[:-1] assert len(log_per_trace) == 2 diff --git a/tests/keepgoing_same_step.sby b/tests/keepgoing/keepgoing_same_step.sby similarity index 100% rename from tests/keepgoing_same_step.sby rename to tests/keepgoing/keepgoing_same_step.sby diff --git a/tests/keepgoing/keepgoing_same_step.sh b/tests/keepgoing/keepgoing_same_step.sh new file mode 100644 index 00000000..aca8be67 --- /dev/null +++ b/tests/keepgoing/keepgoing_same_step.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 ${SBY_FILE%.sby}.py $WORKDIR diff --git a/tests/keepgoing_same_step.sv b/tests/keepgoing/keepgoing_same_step.sv similarity index 100% rename from tests/keepgoing_same_step.sv rename to tests/keepgoing/keepgoing_same_step.sv diff --git a/tests/keepgoing_smtc.check.py b/tests/keepgoing/keepgoing_smtc.py similarity index 66% rename from tests/keepgoing_smtc.check.py rename to tests/keepgoing/keepgoing_smtc.py index 5749e3f9..e0fd27db 100644 --- a/tests/keepgoing_smtc.check.py +++ b/tests/keepgoing/keepgoing_smtc.py @@ -1,16 +1,17 @@ +import sys from check_output import * -task = "keepgoing_smtc" +workdir = sys.argv[1] src = "keepgoing_same_step.sv" -assert_a = line_ref(task, src, "assert(a)") -assert_not_a = line_ref(task, src, "assert(!a)") -assert_0 = line_ref(task, src, "assert(0)") +assert_a = line_ref(workdir, src, "assert(a)") +assert_not_a = line_ref(workdir, src, "assert(!a)") +assert_0 = line_ref(workdir, src, "assert(0)") -assert_false = line_ref(task, "extra.smtc", "assert false") -assert_distinct = line_ref(task, "extra.smtc", "assert (distinct") +assert_false = line_ref(workdir, "extra.smtc", "assert false") +assert_distinct = line_ref(workdir, "extra.smtc", "assert (distinct") -log = open(task + "/logfile.txt").read() +log = open(workdir + "/logfile.txt").read() log_per_trace = log.split("Writing trace to VCD file")[:-1] assert len(log_per_trace) == 4 diff --git a/tests/keepgoing_smtc.sby b/tests/keepgoing/keepgoing_smtc.sby similarity index 100% rename from tests/keepgoing_smtc.sby rename to tests/keepgoing/keepgoing_smtc.sby diff --git a/tests/keepgoing/keepgoing_smtc.sh b/tests/keepgoing/keepgoing_smtc.sh new file mode 100644 index 00000000..aca8be67 --- /dev/null +++ b/tests/keepgoing/keepgoing_smtc.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN -f $SBY_FILE $TASK +python3 ${SBY_FILE%.sby}.py $WORKDIR diff --git a/tests/keepgoing_multi_step.check.py b/tests/keepgoing_multi_step.check.py deleted file mode 100644 index 78c713f2..00000000 --- a/tests/keepgoing_multi_step.check.py +++ /dev/null @@ -1,29 +0,0 @@ -from check_output import * - -src = "keepgoing_multi_step.sv" - -for task in ["keepgoing_multi_step_bmc", "keepgoing_multi_step_prove"]: - assert_0 = line_ref(task, src, "assert(0)") - step_3_7 = line_ref(task, src, "step 3,7") - step_5 = line_ref(task, src, "step 5") - step_7 = line_ref(task, src, "step 7") - - log = open(task + "/logfile.txt").read() - log_per_trace = log.split("Writing trace to VCD file")[:-1] - - assert len(log_per_trace) == 4 - - - assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[0], re.M) - - for i in range(1, 4): - assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[i], re.M) - - - assert re.search(r"Assert failed in test: %s \(.*\)$" % step_3_7, log_per_trace[1], re.M) - assert re.search(r"Assert failed in test: %s \(.*\)$" % step_5, log_per_trace[2], re.M) - assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) - assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) - - pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.vcd" - assert re.search(pattern, open(f"{task}/{task}.xml").read()) diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py new file mode 100644 index 00000000..cf782b99 --- /dev/null +++ b/tests/make/collect_tests.py @@ -0,0 +1,47 @@ +from pathlib import Path +import re + +tests = [] +checked_dirs = [] + +SAFE_PATH = re.compile(r"^[a-zA-Z0-9_./]*$") + +def collect(path): + # don't pick up any paths that need escaping nor any sby workdirs + if not SAFE_PATH.match(str(path)) or (path / "config.sby").exists(): + return + + checked_dirs.append(path) + for entry in path.glob("*.sby"): + filename = str(entry) + if not SAFE_PATH.match(filename): + continue + if not re.match(r"^[a-zA-Z0-9_./]*$", filename): + print(f"skipping {filename!r}, use only [a-zA-Z0-9_./] in filenames") + continue + tests.append(entry) + for entry in path.glob("*"): + if entry.is_dir(): + collect(entry) + + +collect(Path(".")) + +out_file = Path("make/rules/collect.mk") +out_file.parent.mkdir(exist_ok=True) + +with out_file.open("w") as output: + + + for checked_dir in checked_dirs: + print(f"{out_file}: {checked_dir}", file=output) + + for test in tests: + print(f"make/rules/test/{test}.mk: {test}", file=output) + for ext in [".sh", ".py"]: + script_file = test.parent / (test.stem + ext) + if script_file.exists(): + print(f"make/rules/test/{test}.mk: {script_file}", file=output) + print(f"make/rules/test/{test}.mk: make/test_rules.py", file=output) + for test in tests: + print(f"-include make/rules/test/{test}.mk", file=output) diff --git a/tests/make/help.txt b/tests/make/help.txt new file mode 100644 index 00000000..c840c4c6 --- /dev/null +++ b/tests/make/help.txt @@ -0,0 +1,20 @@ +make test: + run all tests (default) + +make clean: + remove all sby workdirs + +make test[_m_][_e_][_s_]: + run all tests that use a specific mode, engine and solver + +make : + run the test for .sby + +make refresh: + do nothing apart from refreshing generated make rules + +make help: + show this help + +running make in a subdirectory or prefixing the target with the subdirectory +limits the test selection to that directory diff --git a/tests/make/subdir.mk b/tests/make/subdir.mk new file mode 100644 index 00000000..b1f367a1 --- /dev/null +++ b/tests/make/subdir.mk @@ -0,0 +1,15 @@ +test: + @$(MAKE) -C .. $(SUBDIR)/$@ + +.PHONY: test refresh IMPLICIT_PHONY + +IMPLICIT_PHONY: + +refresh: + @$(MAKE) -C .. refresh + +help: + @$(MAKE) -C .. help + +%: IMPLICIT_PHONY + @$(MAKE) -C .. $(SUBDIR)/$@ diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py new file mode 100644 index 00000000..1ad49ba9 --- /dev/null +++ b/tests/make/test_rules.py @@ -0,0 +1,135 @@ +import shutil +import sys +import os +import subprocess +import json +from pathlib import Path + + +sby_file = Path(sys.argv[1]) +sby_dir = sby_file.parent + + +taskinfo = json.loads( + subprocess.check_output( + [sys.executable, os.getenv("SBY_MAIN"), "--dumptaskinfo", sby_file] + ) +) + + +def parse_engine(engine): + engine, *args = engine + default_solvers = {"smtbmc": "yices"} + for arg in args: + if not arg.startswith("-"): + return engine, arg + return engine, default_solvers.get(engine) + + +REQUIRED_TOOLS = { + ("smtbmc", "yices"): ["yices-smt2"], + ("smtbmc", "z3"): ["z3"], + ("smtbmc", "cvc4"): ["cvc4"], + ("smtbmc", "mathsat"): ["mathsat"], + ("smtbmc", "boolector"): ["boolector"], + ("smtbmc", "bitwuzla"): ["bitwuzla"], + ("smtbmc", "abc"): ["yosys-abc"], + ("aiger", "suprove"): ["suprove"], + ("aiger", "avy"): ["avy"], + ("aiger", "aigbmc"): ["aigbmc"], + ("btor", "btormc"): ["btormc"], + ("btor", "pono"): ["pono"], +} + +rules_file = Path("make/rules/test") / sby_dir / (sby_file.name + ".mk") +rules_file.parent.mkdir(exist_ok=True, parents=True) + +with rules_file.open("w") as rules: + name = str(sby_dir / sby_file.stem) + + for task, info in taskinfo.items(): + target = name + workdirname = sby_file.stem + if task: + target += f"_{task}" + workdirname += f"_{task}" + + engines = set() + solvers = set() + engine_solvers = set() + + required_tools = set() + + for engine in info["engines"]: + engine, solver = parse_engine(engine) + engines.add(engine) + required_tools.update(REQUIRED_TOOLS.get((engine, solver), ())) + if solver: + solvers.add(solver) + engine_solvers.add((engine, solver)) + + print(f".PHONY: {target}", file=rules) + print(f"{target}:", file=rules) + + shell_script = sby_dir / f"{sby_file.stem}.sh" + + missing_tools = sorted( + f"`{tool}`" for tool in required_tools if shutil.which(tool) is None + ) + + if missing_tools: + print( + f"\t@echo; echo 'SKIPPING {target}: {', '.join(missing_tools)} not found'; echo", + file=rules, + ) + + elif shell_script.exists(): + print( + f"\tcd {sby_dir} && SBY_FILE={sby_file.name} WORKDIR={workdirname} TASK={task} bash {shell_script.name}", + file=rules, + ) + else: + print( + f"\tcd {sby_dir} && python3 $(SBY_MAIN) -f {sby_file.name} {task}", + file=rules, + ) + + print(f".PHONY: clean-{target}", file=rules) + print(f"clean-{target}:", file=rules) + print(f"\trm -rf {target}", file=rules) + + test_groups = [] + + mode = info["mode"] + + test_groups.append(f"test_m_{mode}") + + for engine in sorted(engines): + test_groups.append(f"test_e_{engine}") + test_groups.append(f"test_m_{mode}_e_{engine}") + + for solver in sorted(solvers): + test_groups.append(f"test_s_{solver}") + test_groups.append(f"test_m_{mode}_s_{solver}") + + for engine, solver in sorted(engine_solvers): + test_groups.append(f"test_e_{engine}_s_{solver}") + test_groups.append(f"test_m_{mode}_e_{engine}_s_{solver}") + + prefix = "" + + for part in [*sby_dir.parts, ""]: + print(f".PHONY: {prefix}clean {prefix}test", file=rules) + print(f"{prefix}clean: clean-{target}", file=rules) + print(f"{prefix}test: {target}", file=rules) + + for test_group in test_groups: + print(f".PHONY: {prefix}{test_group}", file=rules) + print(f"{prefix}{test_group}: {target}", file=rules) + prefix += f"{part}/" + + tasks = [task for task in taskinfo.keys() if task] + + if tasks: + print(f".PHONY: {name}", file=rules) + print(f"{name}:", *(f"{name}_{task}" for task in tasks), file=rules) diff --git a/tests/regression/Makefile b/tests/regression/Makefile new file mode 100644 index 00000000..0d9b6848 --- /dev/null +++ b/tests/regression/Makefile @@ -0,0 +1,2 @@ +SUBDIR=regression +include ../make/subdir.mk diff --git a/tests/aim_vs_smt2_nonzero_start_offset.sby b/tests/regression/aim_vs_smt2_nonzero_start_offset.sby similarity index 66% rename from tests/aim_vs_smt2_nonzero_start_offset.sby rename to tests/regression/aim_vs_smt2_nonzero_start_offset.sby index 43095513..94591d74 100644 --- a/tests/aim_vs_smt2_nonzero_start_offset.sby +++ b/tests/regression/aim_vs_smt2_nonzero_start_offset.sby @@ -1,6 +1,9 @@ [tasks] -bmc -prove +abc_bmc3 bmc +abc_sim3 bmc +aiger_avy prove +aiger_suprove prove +abc_pdr prove [options] bmc: mode bmc @@ -9,11 +12,11 @@ expect fail wait on [engines] -bmc: abc bmc3 -bmc: abc sim3 -prove: aiger avy -prove: aiger suprove -prove: abc pdr +abc_bmc3: abc bmc3 +abc_sim3: abc sim3 +aiger_avy: aiger avy +aiger_suprove: aiger suprove +abc_pdr: abc pdr [script] read -sv test.sv diff --git a/tests/invalid_ff_dcinit_merge.sby b/tests/regression/invalid_ff_dcinit_merge.sby similarity index 100% rename from tests/invalid_ff_dcinit_merge.sby rename to tests/regression/invalid_ff_dcinit_merge.sby diff --git a/tests/scripted/.gitignore b/tests/scripted/.gitignore deleted file mode 100644 index 6403b855..00000000 --- a/tests/scripted/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/junit_*/ diff --git a/tests/scripted/Makefile b/tests/scripted/Makefile deleted file mode 100644 index ca27199f..00000000 --- a/tests/scripted/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -SH_FILES=$(wildcard *.sh) -SH_TESTS=$(addprefix test_,$(SH_FILES:.sh=)) - -test: $(SH_TESTS) - -test_%: %.sh FORCE - bash $< - -FORCE: - -.PHONY: test FORCE diff --git a/tests/scripted/junit_expect.sh b/tests/scripted/junit_expect.sh deleted file mode 100644 index 27b972d6..00000000 --- a/tests/scripted/junit_expect.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash - -# this is expected to return 1 so don't use 'set -e' -python3 ../../sbysrc/sby.py -f junit_expect.sby -grep '' junit_expect/junit_expect.xml diff --git a/tests/2props1trace.sby b/tests/unsorted/2props1trace.sby similarity index 100% rename from tests/2props1trace.sby rename to tests/unsorted/2props1trace.sby diff --git a/tests/unsorted/Makefile b/tests/unsorted/Makefile new file mode 100644 index 00000000..61c3a6fd --- /dev/null +++ b/tests/unsorted/Makefile @@ -0,0 +1,2 @@ +SUBDIR=unsorted +include ../make/subdir.mk diff --git a/tests/both_ex.sby b/tests/unsorted/both_ex.sby similarity index 100% rename from tests/both_ex.sby rename to tests/unsorted/both_ex.sby diff --git a/tests/both_ex.v b/tests/unsorted/both_ex.v similarity index 100% rename from tests/both_ex.v rename to tests/unsorted/both_ex.v diff --git a/tests/cover.sby b/tests/unsorted/cover.sby similarity index 100% rename from tests/cover.sby rename to tests/unsorted/cover.sby diff --git a/tests/cover.sv b/tests/unsorted/cover.sv similarity index 100% rename from tests/cover.sv rename to tests/unsorted/cover.sv diff --git a/tests/cover_fail.sby b/tests/unsorted/cover_fail.sby similarity index 100% rename from tests/cover_fail.sby rename to tests/unsorted/cover_fail.sby diff --git a/tests/demo.sby b/tests/unsorted/demo.sby similarity index 100% rename from tests/demo.sby rename to tests/unsorted/demo.sby diff --git a/tests/demo.sv b/tests/unsorted/demo.sv similarity index 100% rename from tests/demo.sv rename to tests/unsorted/demo.sv diff --git a/tests/memory.sby b/tests/unsorted/memory.sby similarity index 100% rename from tests/memory.sby rename to tests/unsorted/memory.sby diff --git a/tests/memory.sv b/tests/unsorted/memory.sv similarity index 100% rename from tests/memory.sv rename to tests/unsorted/memory.sv diff --git a/tests/mixed.sby b/tests/unsorted/mixed.sby similarity index 100% rename from tests/mixed.sby rename to tests/unsorted/mixed.sby diff --git a/tests/mixed.v b/tests/unsorted/mixed.v similarity index 100% rename from tests/mixed.v rename to tests/unsorted/mixed.v diff --git a/tests/multi_assert.sby b/tests/unsorted/multi_assert.sby similarity index 100% rename from tests/multi_assert.sby rename to tests/unsorted/multi_assert.sby diff --git a/tests/preunsat.sby b/tests/unsorted/preunsat.sby similarity index 100% rename from tests/preunsat.sby rename to tests/unsorted/preunsat.sby diff --git a/tests/prv32fmcmp.sby b/tests/unsorted/prv32fmcmp.sby similarity index 89% rename from tests/prv32fmcmp.sby rename to tests/unsorted/prv32fmcmp.sby index 2412eb86..bd4e0964 100644 --- a/tests/prv32fmcmp.sby +++ b/tests/unsorted/prv32fmcmp.sby @@ -17,5 +17,5 @@ read -sv prv32fmcmp.v prep -top prv32fmcmp [files] -../extern/picorv32.v +../../extern/picorv32.v prv32fmcmp.v diff --git a/tests/prv32fmcmp.v b/tests/unsorted/prv32fmcmp.v similarity index 100% rename from tests/prv32fmcmp.v rename to tests/unsorted/prv32fmcmp.v diff --git a/tests/redxor.sby b/tests/unsorted/redxor.sby similarity index 100% rename from tests/redxor.sby rename to tests/unsorted/redxor.sby diff --git a/tests/redxor.v b/tests/unsorted/redxor.v similarity index 100% rename from tests/redxor.v rename to tests/unsorted/redxor.v diff --git a/tests/stopfirst.sby b/tests/unsorted/stopfirst.sby similarity index 100% rename from tests/stopfirst.sby rename to tests/unsorted/stopfirst.sby diff --git a/tests/submod_props.sby b/tests/unsorted/submod_props.sby similarity index 100% rename from tests/submod_props.sby rename to tests/unsorted/submod_props.sby From ee769996d04809b3b9e2fe8bffa061f298dda98e Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 27 Apr 2022 09:02:16 +1200 Subject: [PATCH 037/220] Initial add of fifo example Has tests which pass, committing before messing with it while tidying. --- docs/examples/fifo/.gitignore | 1 + docs/examples/fifo/fifo.sby | 22 ++++ docs/examples/fifo/fifo.sv | 183 ++++++++++++++++++++++++++++++++++ 3 files changed, 206 insertions(+) create mode 100644 docs/examples/fifo/.gitignore create mode 100644 docs/examples/fifo/fifo.sby create mode 100644 docs/examples/fifo/fifo.sv diff --git a/docs/examples/fifo/.gitignore b/docs/examples/fifo/.gitignore new file mode 100644 index 00000000..22b7dbdd --- /dev/null +++ b/docs/examples/fifo/.gitignore @@ -0,0 +1 @@ +fifo_* \ No newline at end of file diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby new file mode 100644 index 00000000..586e7b3f --- /dev/null +++ b/docs/examples/fifo/fifo.sby @@ -0,0 +1,22 @@ +[tasks] +cover +prove + +[options] +cover: +mode cover +-- +prove: +mode prove +-- + +[engines] +cover: smtbmc +prove: abc pdr + +[script] +read -formal fifo.sv +prep -top fifo + +[files] +fifo.sv diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv new file mode 100644 index 00000000..5d30883f --- /dev/null +++ b/docs/examples/fifo/fifo.sv @@ -0,0 +1,183 @@ +// Define our top level fifo entity +module fifo ( + input wen, ren, clk, rst_n, + input [7:0] wdata, + output [7:0] rdata, + output [3:0] count, + output full, empty +); + parameter MAX_DATA = 16; + + // internals + reg [3:0] data_count; + initial begin + data_count <= 0; + end + + // wire up our sub modules + wire [3:0] waddr, raddr; + wire wskip, rskip; + storage #(.MAX_DATA(MAX_DATA)) fifo_storage ( + .wen (wen ), + .ren (ren ), + .clk (clk ), + .rst_n (rst_n), + .waddr (waddr), + .raddr (raddr), + .wdata (wdata), + .rdata (rdata) + ); + + addr_gen #(.MAX_DATA(MAX_DATA)) fifo_writer ( + .en (wen || wskip), + .clk (clk ), + .rst_n (rst_n), + .addr (waddr) + ); + + addr_gen #(.MAX_DATA(MAX_DATA)) fifo_reader ( + .en (ren || rskip), + .clk (clk ), + .rst_n (rst_n), + .addr (raddr) + ); + + always @(posedge clk or negedge rst_n) begin + if (~rst_n) + data_count <= 0; + else if (wen && !ren && data_count < MAX_DATA-1) + data_count <= data_count + 1; + else if (ren && !wen && data_count > 0) + data_count <= data_count - 1; + else + data_count <= data_count; + end + + assign full = data_count == MAX_DATA-1; + assign empty = data_count == 0; + assign count = data_count; + + // write while full => overwrite oldest data, move read pointer + assign rskip = wen && !ren && data_count >= MAX_DATA-1; + // read while empty => read invalid data, keep write pointer in sync + assign wskip = ren && !wen && data_count == 0; + +`ifdef FORMAL + // observers + wire [4:0] addr_diff; + assign addr_diff = waddr >= raddr ? waddr - raddr : waddr + MAX_DATA - raddr; + + // tests should not run through a reset + // not entirely sure what this actually does + default clocking @(posedge clk); endclocking + default disable iff (~rst_n); + + // tests + always @(posedge clk or negedge rst_n) begin + // waddr and raddr are zero while reset is low + ap_reset: assert property (~rst_n |=> !waddr && !raddr); + wp_reset: cover property (rst_n); + + // waddr and raddr can only be non zero if reset is high + ap_nreset: assert property (waddr || raddr |=> $past(rst_n)); + wp_nreset: cover property (waddr || raddr); + + // count never less than zero, or more than max + ap_uflow: assert (count >= 0); + ap_uflow2: assert (raddr >= 0); + ap_oflow: assert (count < MAX_DATA); + ap_oflow2: assert (waddr < MAX_DATA); + + // count should be equal to the difference between writer and reader address + ap_count_diff: assert (count == addr_diff); + + // count should only be able to increase or decrease by 1 + ap_counts: assert (count == 0 + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); + + // read/write addresses can only increase (or stay the same) + ap_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + ap_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); + + // read/write enables enable + ap_raddr2: assert property (ren |=> raddr != $past(raddr)); + ap_waddr2: assert property (wen |=> waddr != $past(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (!ren && !full |=> raddr == $past(raddr)); + ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); + + // full and empty work as expected + ap_full: assert property (wen && !ren && count == MAX_DATA-2 |=> full); + wp_full: cover property (wen && !ren && count == MAX_DATA-2); + ap_empty: assert property (ren && !wen && count == 1 |=> empty); + wp_empty: cover property (ren && !wen && count == 1); + + // can we corrupt our data? + ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); + wp_overfill: cover property (wen && full); + ap_underfill: assert property (ren && empty |=> waddr != $past(waddr)); + wp_underfill: cover property (ren && empty); + end + + // assumptions + always @(posedge clk or negedge rst_n) begin + // when writing the write data will change (so that we can line up reads with writes) + assume property (wen |=> wdata != $past(wdata)); + assume (wdata); + end +`endif + +endmodule + +// Define the fifo storage +module storage ( + input wen, ren, clk, rst_n, + input [3:0] waddr, raddr, + input [7:0] wdata, + output [7:0] rdata +); + parameter MAX_DATA = 16; + + // 8 bit data, fifo depth 16 / 4 bit address + // reset not defined + reg [7:0] data [MAX_DATA-1:0]; + always @(posedge clk) begin + if (wen) + data[waddr] <= wdata; + end + + assign rdata = data[raddr]; +endmodule + +// address generator/counter +module addr_gen ( + input en, clk, rst_n, + output reg [3:0] addr +); + parameter MAX_DATA = 16; + + initial begin + addr <= 0; + end + + // async reset + // increment address when enabled + always @(posedge clk or negedge rst_n) begin + if (~rst_n) + addr <= 0; + else if (en) + if (addr == MAX_DATA-1) + addr <= 0; + else + addr <= addr + 1; + else + addr <= addr; + end +endmodule From 679df4d898b90525ac61b6d120d81d586b99c07b Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 27 Apr 2022 09:05:16 +1200 Subject: [PATCH 038/220] Fixing .gitignore to ignore just directories --- docs/examples/fifo/.gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/fifo/.gitignore b/docs/examples/fifo/.gitignore index 22b7dbdd..2bcf7d7b 100644 --- a/docs/examples/fifo/.gitignore +++ b/docs/examples/fifo/.gitignore @@ -1 +1 @@ -fifo_* \ No newline at end of file +fifo_*/ \ No newline at end of file From ec02e25f5c289a90a6719a77e6113b2f849fbca4 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 27 Apr 2022 09:24:14 +1200 Subject: [PATCH 039/220] Split fifo.sv into two files fifo.sv contains the components, top.sv for toplevel design. --- docs/examples/fifo/fifo.sby | 4 +- docs/examples/fifo/fifo.sv | 138 ------------------------------------ docs/examples/fifo/top.sv | 137 +++++++++++++++++++++++++++++++++++ 3 files changed, 140 insertions(+), 139 deletions(-) create mode 100644 docs/examples/fifo/top.sv diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby index 586e7b3f..d6f6c273 100644 --- a/docs/examples/fifo/fifo.sby +++ b/docs/examples/fifo/fifo.sby @@ -15,8 +15,10 @@ cover: smtbmc prove: abc pdr [script] -read -formal fifo.sv +read -sv fifo.sv +read -formal top.sv prep -top fifo [files] fifo.sv +top.sv diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 5d30883f..5e7e6c8e 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -1,141 +1,3 @@ -// Define our top level fifo entity -module fifo ( - input wen, ren, clk, rst_n, - input [7:0] wdata, - output [7:0] rdata, - output [3:0] count, - output full, empty -); - parameter MAX_DATA = 16; - - // internals - reg [3:0] data_count; - initial begin - data_count <= 0; - end - - // wire up our sub modules - wire [3:0] waddr, raddr; - wire wskip, rskip; - storage #(.MAX_DATA(MAX_DATA)) fifo_storage ( - .wen (wen ), - .ren (ren ), - .clk (clk ), - .rst_n (rst_n), - .waddr (waddr), - .raddr (raddr), - .wdata (wdata), - .rdata (rdata) - ); - - addr_gen #(.MAX_DATA(MAX_DATA)) fifo_writer ( - .en (wen || wskip), - .clk (clk ), - .rst_n (rst_n), - .addr (waddr) - ); - - addr_gen #(.MAX_DATA(MAX_DATA)) fifo_reader ( - .en (ren || rskip), - .clk (clk ), - .rst_n (rst_n), - .addr (raddr) - ); - - always @(posedge clk or negedge rst_n) begin - if (~rst_n) - data_count <= 0; - else if (wen && !ren && data_count < MAX_DATA-1) - data_count <= data_count + 1; - else if (ren && !wen && data_count > 0) - data_count <= data_count - 1; - else - data_count <= data_count; - end - - assign full = data_count == MAX_DATA-1; - assign empty = data_count == 0; - assign count = data_count; - - // write while full => overwrite oldest data, move read pointer - assign rskip = wen && !ren && data_count >= MAX_DATA-1; - // read while empty => read invalid data, keep write pointer in sync - assign wskip = ren && !wen && data_count == 0; - -`ifdef FORMAL - // observers - wire [4:0] addr_diff; - assign addr_diff = waddr >= raddr ? waddr - raddr : waddr + MAX_DATA - raddr; - - // tests should not run through a reset - // not entirely sure what this actually does - default clocking @(posedge clk); endclocking - default disable iff (~rst_n); - - // tests - always @(posedge clk or negedge rst_n) begin - // waddr and raddr are zero while reset is low - ap_reset: assert property (~rst_n |=> !waddr && !raddr); - wp_reset: cover property (rst_n); - - // waddr and raddr can only be non zero if reset is high - ap_nreset: assert property (waddr || raddr |=> $past(rst_n)); - wp_nreset: cover property (waddr || raddr); - - // count never less than zero, or more than max - ap_uflow: assert (count >= 0); - ap_uflow2: assert (raddr >= 0); - ap_oflow: assert (count < MAX_DATA); - ap_oflow2: assert (waddr < MAX_DATA); - - // count should be equal to the difference between writer and reader address - ap_count_diff: assert (count == addr_diff); - - // count should only be able to increase or decrease by 1 - ap_counts: assert (count == 0 - || count == $past(count) - || count == $past(count) + 1 - || count == $past(count) - 1); - - // read/write addresses can only increase (or stay the same) - ap_raddr: assert (raddr == 0 - || raddr == $past(raddr) - || raddr == $past(raddr + 1)); - ap_waddr: assert (waddr == 0 - || waddr == $past(waddr) - || waddr == $past(waddr + 1)); - - // read/write enables enable - ap_raddr2: assert property (ren |=> raddr != $past(raddr)); - ap_waddr2: assert property (wen |=> waddr != $past(waddr)); - - // read/write needs enable UNLESS full/empty - ap_raddr3: assert property (!ren && !full |=> raddr == $past(raddr)); - ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); - - // full and empty work as expected - ap_full: assert property (wen && !ren && count == MAX_DATA-2 |=> full); - wp_full: cover property (wen && !ren && count == MAX_DATA-2); - ap_empty: assert property (ren && !wen && count == 1 |=> empty); - wp_empty: cover property (ren && !wen && count == 1); - - // can we corrupt our data? - ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); - wp_overfill: cover property (wen && full); - ap_underfill: assert property (ren && empty |=> waddr != $past(waddr)); - wp_underfill: cover property (ren && empty); - end - - // assumptions - always @(posedge clk or negedge rst_n) begin - // when writing the write data will change (so that we can line up reads with writes) - assume property (wen |=> wdata != $past(wdata)); - assume (wdata); - end -`endif - -endmodule - // Define the fifo storage module storage ( input wen, ren, clk, rst_n, diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv new file mode 100644 index 00000000..07f40040 --- /dev/null +++ b/docs/examples/fifo/top.sv @@ -0,0 +1,137 @@ +// Define our top level fifo entity +module fifo ( + input wen, ren, clk, rst_n, + input [7:0] wdata, + output [7:0] rdata, + output [3:0] count, + output full, empty +); + parameter MAX_DATA = 16; + + // internals + reg [3:0] data_count; + initial begin + data_count <= 0; + end + + // wire up our sub modules + wire [3:0] waddr, raddr; + wire wskip, rskip; + storage #(.MAX_DATA(MAX_DATA)) fifo_storage ( + .wen (wen ), + .ren (ren ), + .clk (clk ), + .rst_n (rst_n), + .waddr (waddr), + .raddr (raddr), + .wdata (wdata), + .rdata (rdata) + ); + + addr_gen #(.MAX_DATA(MAX_DATA)) fifo_writer ( + .en (wen || wskip), + .clk (clk ), + .rst_n (rst_n), + .addr (waddr) + ); + + addr_gen #(.MAX_DATA(MAX_DATA)) fifo_reader ( + .en (ren || rskip), + .clk (clk ), + .rst_n (rst_n), + .addr (raddr) + ); + + always @(posedge clk or negedge rst_n) begin + if (~rst_n) + data_count <= 0; + else if (wen && !ren && data_count < MAX_DATA-1) + data_count <= data_count + 1; + else if (ren && !wen && data_count > 0) + data_count <= data_count - 1; + else + data_count <= data_count; + end + + assign full = data_count == MAX_DATA-1; + assign empty = data_count == 0; + assign count = data_count; + + // write while full => overwrite oldest data, move read pointer + assign rskip = wen && !ren && data_count >= MAX_DATA-1; + // read while empty => read invalid data, keep write pointer in sync + assign wskip = ren && !wen && data_count == 0; + +`ifdef FORMAL + // observers + wire [4:0] addr_diff; + assign addr_diff = waddr >= raddr ? waddr - raddr : waddr + MAX_DATA - raddr; + + // tests should not run through a reset + // not entirely sure what this actually does + default clocking @(posedge clk); endclocking + default disable iff (~rst_n); + + // tests + always @(posedge clk or negedge rst_n) begin + // waddr and raddr are zero while reset is low + ap_reset: assert property (~rst_n |=> !waddr && !raddr); + wp_reset: cover property (rst_n); + + // waddr and raddr can only be non zero if reset is high + ap_nreset: assert property (waddr || raddr |=> $past(rst_n)); + wp_nreset: cover property (waddr || raddr); + + // count never less than zero, or more than max + ap_uflow: assert (count >= 0); + ap_uflow2: assert (raddr >= 0); + ap_oflow: assert (count < MAX_DATA); + ap_oflow2: assert (waddr < MAX_DATA); + + // count should be equal to the difference between writer and reader address + ap_count_diff: assert (count == addr_diff); + + // count should only be able to increase or decrease by 1 + ap_counts: assert (count == 0 + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); + + // read/write addresses can only increase (or stay the same) + ap_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + ap_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); + + // read/write enables enable + ap_raddr2: assert property (ren |=> raddr != $past(raddr)); + ap_waddr2: assert property (wen |=> waddr != $past(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (!ren && !full |=> raddr == $past(raddr)); + ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); + + // full and empty work as expected + ap_full: assert property (wen && !ren && count == MAX_DATA-2 |=> full); + wp_full: cover property (wen && !ren && count == MAX_DATA-2); + ap_empty: assert property (ren && !wen && count == 1 |=> empty); + wp_empty: cover property (ren && !wen && count == 1); + + // can we corrupt our data? + ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); + wp_overfill: cover property (wen && full); + ap_underfill: assert property (ren && empty |=> waddr != $past(waddr)); + wp_underfill: cover property (ren && empty); + end + + // assumptions + always @(posedge clk or negedge rst_n) begin + // when writing the write data will change (so that we can line up reads with writes) + assume property (wen |=> wdata != $past(wdata)); + assume (wdata); + end +`endif + +endmodule From e106d5c16186ec46b5121a592b721fcd8da45aab Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 27 Apr 2022 09:36:44 +1200 Subject: [PATCH 040/220] Adjusting assumptions --- docs/examples/fifo/top.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index 07f40040..5e066925 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -128,9 +128,9 @@ module fifo ( // assumptions always @(posedge clk or negedge rst_n) begin - // when writing the write data will change (so that we can line up reads with writes) + // data will change when writing (and only when writing) so we can line up reads with writes assume property (wen |=> wdata != $past(wdata)); - assume (wdata); + assume property (!wen |=> wdata == $past(wdata)); end `endif From e8c5ae678d6cff315b057ad59ea3705f16eb343d Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 2 May 2022 10:31:51 +1200 Subject: [PATCH 041/220] Adding instructions for CAD Currently taken verbatim from this repo's README.md --- docs/source/install.rst | 28 ++++++++++++++++++++++++++-- 1 file changed, 26 insertions(+), 2 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 293ee719..7f198b2f 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -1,5 +1,29 @@ -Installing -========== +CAD Suite(s) +============ + +SymbiYosys (sby) is part of the `Tabby CAD Suite +`_ and the `OSS CAD Suite +`_! The easiest way to use sby +is to install the binary software suite, which contains all required +dependencies, including all supported solvers. + +* `Contact YosysHQ `_ for a `Tabby CAD Suite + `_ Evaluation License and + download link +* OR go to https://github.com/YosysHQ/oss-cad-suite-build/releases to download + the free OSS CAD Suite +* Follow the `Install Instructions on GitHub + `_ + +Make sure to get a Tabby CAD Suite Evaluation License for extensive +SystemVerilog Assertion (SVA) support, as well as industry-grade SystemVerilog +and VHDL parsers! + +For more information about the difference between Tabby CAD Suite and the OSS +CAD Suite, please visit https://www.yosyshq.com/tabby-cad-datasheet. + +Installing from source +====================== Follow the instructions below to install SymbiYosys and its dependencies. Yosys, SymbiYosys, and Z3 are non-optional. The other packages are only From f33c2eda522214fe4a9371b368cbcbdc179c5aa2 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 2 May 2022 10:43:10 +1200 Subject: [PATCH 042/220] Updating/rearranging links --- docs/source/install.rst | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 7f198b2f..98dcc11b 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -45,7 +45,7 @@ Installing prerequisites (this command is for Ubuntu 16.04): Yosys, Yosys-SMTBMC and ABC --------------------------- -https://yosyshq.net/yosys/ +https://www.yosyshq.com/open-source https://people.eecs.berkeley.edu/~alanmi/abc/ @@ -53,7 +53,7 @@ Next install Yosys, Yosys-SMTBMC and ABC (``yosys-abc``): .. code-block:: text - git clone https://github.com/YosysHQ/yosys.git yosys + git clone https://github.com/YosysHQ/yosys cd yosys make -j$(nproc) sudo make install @@ -61,39 +61,39 @@ Next install Yosys, Yosys-SMTBMC and ABC (``yosys-abc``): SymbiYosys ---------- -https://github.com/YosysHQ/SymbiYosys +https://github.com/YosysHQ/sby .. code-block:: text - git clone https://github.com/YosysHQ/SymbiYosys.git SymbiYosys - cd SymbiYosys + git clone https://github.com/YosysHQ/sby + cd sby sudo make install -Yices 2 -------- +Z3 +-- -http://yices.csl.sri.com/ +https://github.com/Z3Prover/z3/wiki .. code-block:: text - git clone https://github.com/SRI-CSL/yices2.git yices2 - cd yices2 - autoconf - ./configure + git clone https://github.com/Z3Prover/z3 + cd z3 + python scripts/mk_make.py + cd build make -j$(nproc) sudo make install -Z3 --- +Yices 2 +------- -https://github.com/Z3Prover/z3/wiki +http://yices.csl.sri.com/ .. code-block:: text - git clone https://github.com/Z3Prover/z3.git z3 - cd z3 - python scripts/mk_make.py - cd build + git clone https://github.com/SRI-CSL/yices2 + cd yices2 + autoconf + ./configure make -j$(nproc) sudo make install From 48d846d529485b0b5a02774609bc1530e482c236 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 2 May 2022 12:20:27 +1200 Subject: [PATCH 043/220] Adjusting for use with OSS i.e. doesn't use concurrent assertions --- docs/examples/fifo/top.sv | 124 +++++++++++++++++++------------------- 1 file changed, 63 insertions(+), 61 deletions(-) diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index 5e066925..e153a57c 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -54,7 +54,7 @@ module fifo ( end assign full = data_count == MAX_DATA-1; - assign empty = data_count == 0; + assign empty = (data_count == 0) && rst_n; assign count = data_count; // write while full => overwrite oldest data, move read pointer @@ -65,72 +65,74 @@ module fifo ( `ifdef FORMAL // observers wire [4:0] addr_diff; - assign addr_diff = waddr >= raddr ? waddr - raddr : waddr + MAX_DATA - raddr; - - // tests should not run through a reset - // not entirely sure what this actually does - default clocking @(posedge clk); endclocking - default disable iff (~rst_n); + assign addr_diff = waddr >= raddr + ? waddr - raddr + : waddr + MAX_DATA - raddr; // tests - always @(posedge clk or negedge rst_n) begin - // waddr and raddr are zero while reset is low - ap_reset: assert property (~rst_n |=> !waddr && !raddr); - wp_reset: cover property (rst_n); - - // waddr and raddr can only be non zero if reset is high - ap_nreset: assert property (waddr || raddr |=> $past(rst_n)); - wp_nreset: cover property (waddr || raddr); - - // count never less than zero, or more than max - ap_uflow: assert (count >= 0); - ap_uflow2: assert (raddr >= 0); - ap_oflow: assert (count < MAX_DATA); - ap_oflow2: assert (waddr < MAX_DATA); - - // count should be equal to the difference between writer and reader address - ap_count_diff: assert (count == addr_diff); - - // count should only be able to increase or decrease by 1 - ap_counts: assert (count == 0 - || count == $past(count) - || count == $past(count) + 1 - || count == $past(count) - 1); - - // read/write addresses can only increase (or stay the same) - ap_raddr: assert (raddr == 0 - || raddr == $past(raddr) - || raddr == $past(raddr + 1)); - ap_waddr: assert (waddr == 0 - || waddr == $past(waddr) - || waddr == $past(waddr + 1)); - - // read/write enables enable - ap_raddr2: assert property (ren |=> raddr != $past(raddr)); - ap_waddr2: assert property (wen |=> waddr != $past(waddr)); - - // read/write needs enable UNLESS full/empty - ap_raddr3: assert property (!ren && !full |=> raddr == $past(raddr)); - ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); - - // full and empty work as expected - ap_full: assert property (wen && !ren && count == MAX_DATA-2 |=> full); - wp_full: cover property (wen && !ren && count == MAX_DATA-2); - ap_empty: assert property (ren && !wen && count == 1 |=> empty); - wp_empty: cover property (ren && !wen && count == 1); - - // can we corrupt our data? - ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); - wp_overfill: cover property (wen && full); - ap_underfill: assert property (ren && empty |=> waddr != $past(waddr)); - wp_underfill: cover property (ren && empty); + always @(posedge clk) begin + if (rst_n) begin + // waddr and raddr can only be non zero if reset is high + w_nreset: cover (waddr || raddr); + + // count never less than zero, or more than max + a_uflow: assert (count >= 0); + a_uflow2: assert (raddr >= 0); + a_oflow: assert (count < MAX_DATA); + a_oflow2: assert (waddr < MAX_DATA); + + // count should be equal to the difference between writer and reader address + a_count_diff: assert (count == addr_diff); + + // count should only be able to increase or decrease by 1 + a_counts: assert (count == 0 + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); + + // read/write addresses can only increase (or stay the same) + a_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + a_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); + + // read/write enables enable + // ap_raddr2: assert property (ren |=> raddr != $past(raddr)); + // ap_waddr2: assert property (wen |=> waddr != $past(waddr)); + + // read/write needs enable UNLESS full/empty + // ap_raddr3: assert property (!ren && !full |=> raddr == $past(raddr)); + // ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); + + // full and empty work as expected + a_full: assert (!full || full && count == MAX_DATA-1); + w_full: cover (wen && !ren && count == MAX_DATA-2); + a_empty: assert (!empty || empty && count == 0); + w_empty: cover property (ren && !wen && count == 1); + + // can we corrupt our data? + // ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); + w_overfill: cover ($past(rskip) && raddr); + // ap_underfill: assert property (ren && empty |=> waddr != $past(waddr)); + w_underfill: cover ($past(wskip) && waddr); + end else begin + // waddr and raddr are zero while reset is low + a_reset: assert (!waddr && !raddr); + w_reset: cover (~rst_n); + + // outputs are zero while reset is low + a_zero_out: assert (!empty && !full && !count); + end end // assumptions - always @(posedge clk or negedge rst_n) begin + always @(posedge clk) begin // data will change when writing (and only when writing) so we can line up reads with writes - assume property (wen |=> wdata != $past(wdata)); - assume property (!wen |=> wdata == $past(wdata)); + assume ((wen && wdata != $past(wdata)) || (!wen && wdata == $past(wdata))); + // assume property (wen |=> wdata != $past(wdata)); + // assume property (!wen |=> wdata == $past(wdata)); end `endif From 60de15293dce8816a15c731d8c9fd943b230b881 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 2 May 2022 12:34:57 +1200 Subject: [PATCH 044/220] Now actually fills up properly As opposed to only storing MAX-1 --- docs/examples/fifo/top.sv | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index e153a57c..0db2ece9 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -3,13 +3,13 @@ module fifo ( input wen, ren, clk, rst_n, input [7:0] wdata, output [7:0] rdata, - output [3:0] count, + output [4:0] count, output full, empty ); parameter MAX_DATA = 16; // internals - reg [3:0] data_count; + reg [4:0] data_count; initial begin data_count <= 0; end @@ -45,7 +45,7 @@ module fifo ( always @(posedge clk or negedge rst_n) begin if (~rst_n) data_count <= 0; - else if (wen && !ren && data_count < MAX_DATA-1) + else if (wen && !ren && data_count < MAX_DATA) data_count <= data_count + 1; else if (ren && !wen && data_count > 0) data_count <= data_count - 1; @@ -53,12 +53,12 @@ module fifo ( data_count <= data_count; end - assign full = data_count == MAX_DATA-1; + assign full = data_count == MAX_DATA; assign empty = (data_count == 0) && rst_n; assign count = data_count; // write while full => overwrite oldest data, move read pointer - assign rskip = wen && !ren && data_count >= MAX_DATA-1; + assign rskip = wen && !ren && data_count >= MAX_DATA; // read while empty => read invalid data, keep write pointer in sync assign wskip = ren && !wen && data_count == 0; @@ -78,11 +78,12 @@ module fifo ( // count never less than zero, or more than max a_uflow: assert (count >= 0); a_uflow2: assert (raddr >= 0); - a_oflow: assert (count < MAX_DATA); + a_oflow: assert (count <= MAX_DATA); a_oflow2: assert (waddr < MAX_DATA); // count should be equal to the difference between writer and reader address - a_count_diff: assert (count == addr_diff); + a_count_diff: assert (count == addr_diff + || count == MAX_DATA && addr_diff == 0); // count should only be able to increase or decrease by 1 a_counts: assert (count == 0 @@ -107,8 +108,8 @@ module fifo ( // ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); // full and empty work as expected - a_full: assert (!full || full && count == MAX_DATA-1); - w_full: cover (wen && !ren && count == MAX_DATA-2); + a_full: assert (!full || full && count == MAX_DATA); + w_full: cover (wen && !ren && count == MAX_DATA-1); a_empty: assert (!empty || empty && count == 0); w_empty: cover property (ren && !wen && count == 1); From 8f227336980e14597c794626d84a3ccad9d1d8c9 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 4 May 2022 10:07:31 +1200 Subject: [PATCH 045/220] Revert change from yosyshq.net to yosyshq.com --- docs/source/install.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 98dcc11b..068736b3 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -45,7 +45,7 @@ Installing prerequisites (this command is for Ubuntu 16.04): Yosys, Yosys-SMTBMC and ABC --------------------------- -https://www.yosyshq.com/open-source +https://yosyshq.net/yosys/ https://people.eecs.berkeley.edu/~alanmi/abc/ From 12b854b55457636daee615eb796d9d591b18bd6a Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 4 May 2022 10:50:38 +1200 Subject: [PATCH 046/220] Headings for optional/required installs --- docs/source/install.rst | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 068736b3..686adfb8 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -42,8 +42,11 @@ Installing prerequisites (this command is for Ubuntu 16.04): libboost-program-options-dev autoconf libgmp-dev \ cmake curl +Required components +------------------- + Yosys, Yosys-SMTBMC and ABC ---------------------------- +^^^^^^^^^^^^^^^^^^^^^^^^^^^ https://yosyshq.net/yosys/ @@ -59,7 +62,7 @@ Next install Yosys, Yosys-SMTBMC and ABC (``yosys-abc``): sudo make install SymbiYosys ----------- +^^^^^^^^^^ https://github.com/YosysHQ/sby @@ -70,7 +73,7 @@ https://github.com/YosysHQ/sby sudo make install Z3 --- +^^ https://github.com/Z3Prover/z3/wiki @@ -83,8 +86,11 @@ https://github.com/Z3Prover/z3/wiki make -j$(nproc) sudo make install +Optional components +------------------- + Yices 2 -------- +^^^^^^^ http://yices.csl.sri.com/ @@ -98,7 +104,7 @@ http://yices.csl.sri.com/ sudo make install super_prove ------------ +^^^^^^^^^^^ https://github.com/sterin/super-prove-build @@ -136,7 +142,7 @@ And make this wrapper script executable: sudo chmod +x /usr/local/bin/suprove Avy ---- +^^^ https://arieg.bitbucket.io/avy/ @@ -151,7 +157,7 @@ https://arieg.bitbucket.io/avy/ sudo cp avy/src/{avy,avybmc} /usr/local/bin/ Boolector ---------- +^^^^^^^^^ http://fmv.jku.at/boolector/ From 7468e7655d25f98d6ccc43b41e4d94157e85b512 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 10 May 2022 11:03:40 +1200 Subject: [PATCH 047/220] Alignment fixing --- docs/examples/fifo/top.sv | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index 0db2ece9..428bf1fc 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -108,10 +108,10 @@ module fifo ( // ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); // full and empty work as expected - a_full: assert (!full || full && count == MAX_DATA); - w_full: cover (wen && !ren && count == MAX_DATA-1); + a_full: assert (!full || full && count == MAX_DATA); + w_full: cover (wen && !ren && count == MAX_DATA-1); a_empty: assert (!empty || empty && count == 0); - w_empty: cover property (ren && !wen && count == 1); + w_empty: cover (ren && !wen && count == 1); // can we corrupt our data? // ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); From ee15ebd0f12bd727b4ce941868f9b49c851c35a1 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 10 May 2022 11:40:17 +1200 Subject: [PATCH 048/220] Title case for license.rst --- docs/source/license.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/license.rst b/docs/source/license.rst index e102ae18..786dc596 100644 --- a/docs/source/license.rst +++ b/docs/source/license.rst @@ -1,5 +1,5 @@ -SymbiYosys License +SymbiYosys license ================== SymbiYosys (sby) itself is licensed under the ISC license: From 7ec35dc4255bec380ecc619ff99cb0d8d6bae6f0 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 10 May 2022 11:41:01 +1200 Subject: [PATCH 049/220] Adding (small) intro to installation guide Also a cross reference link. --- docs/source/install.rst | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 686adfb8..db555078 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -1,5 +1,12 @@ -CAD Suite(s) -============ +.. _install-doc: + +Installation guide +================== + +This document will guide you through the process of installing SymbiYosys. + +CAD suite(s) +************ SymbiYosys (sby) is part of the `Tabby CAD Suite `_ and the `OSS CAD Suite @@ -23,7 +30,7 @@ For more information about the difference between Tabby CAD Suite and the OSS CAD Suite, please visit https://www.yosyshq.com/tabby-cad-datasheet. Installing from source -====================== +********************** Follow the instructions below to install SymbiYosys and its dependencies. Yosys, SymbiYosys, and Z3 are non-optional. The other packages are only From 21dfd355169ea25158089a42823a9c6102d0cef6 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 10 May 2022 11:41:15 +1200 Subject: [PATCH 050/220] Adding new Getting started guide --- docs/source/index.rst | 2 +- docs/source/newstart.rst | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) create mode 100644 docs/source/newstart.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index 0527fb4e..4ee57a4b 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -20,7 +20,7 @@ at the moment.) :maxdepth: 3 install.rst - quickstart.rst + newstart.rst reference.rst verilog.rst verific.rst diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst new file mode 100644 index 00000000..95921e0f --- /dev/null +++ b/docs/source/newstart.rst @@ -0,0 +1,9 @@ + +Getting started +=============== + +.. note:: This tutorial assumes sby installation as per the :ref:`install-doc`. + It is also recommended to install + `GTKWave `_, an open source VCD viewer. + + From fc9ff3d73323d900e1f5d44b7af121647818d372 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 10 May 2022 12:08:49 +1200 Subject: [PATCH 051/220] Initial FIFO description --- docs/source/newstart.rst | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 95921e0f..0f13f2b3 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -6,4 +6,31 @@ Getting started It is also recommended to install `GTKWave `_, an open source VCD viewer. +First In, First Out (FIFO) buffer +******************************** +From `Wikipedia `_, +a FIFO is + + a method for organizing the manipulation of a data structure (often, + specifically a data buffer) where the oldest (first) entry, or "head" of the + queue, is processed first. + + Such processing is analogous to servicing people in a queue area on a + first-come, first-served (FCFS) basis, i.e. in the same sequence in which + they arrive at the queue's tail. + +In hardware we can create such a construct by providing two addresses into a +register file. See the Verilog code below for the two main modules of an +example implementation. + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + +Notice that this register design includes a synchronous write and asynchronous +read. Each word is 8 bits, and up to 16 words can be stored in the buffer. The +address generator module will be instantiated twice; once for the write address +and once for the read address. In both cases, the address will start at and +reset to 0, and will increment by 1 when an enable signal is received. When the +address pointers increment from the maximum storage value they reset back to 0, +providing a circular queue. From fedfae0e9c1b0e5fdd81b268f7b94e40f08e9349 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 11 May 2022 10:38:54 +0200 Subject: [PATCH 052/220] examples: Fix use of SVA value change expressions The $stable value change expression cannot be true for a non-x signal in the initial state. This is now correctly handled by the verific import, so the dpmem example needs to start assuming `$stable` only after leaving the initial state. --- docs/examples/multiclk/dpmem.sv | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/examples/multiclk/dpmem.sv b/docs/examples/multiclk/dpmem.sv index 87e4f61f..4a920e4e 100644 --- a/docs/examples/multiclk/dpmem.sv +++ b/docs/examples/multiclk/dpmem.sv @@ -47,9 +47,9 @@ module top ( (* gclk *) reg gclk; always @(posedge gclk) begin - assume ($stable(rc) || $stable(wc)); - if (!init) begin + assume ($stable(rc) || $stable(wc)); + if ($rose(rc) && shadow_valid && shadow_addr == $past(ra)) begin assert (shadow_data == rd); end From ad2c33dd373b8764eb8dff7ad8107632a0bacb88 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 24 May 2022 11:39:10 +0200 Subject: [PATCH 053/220] docs: add instructions for newer btorsim version required --- docs/source/install.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/docs/source/install.rst b/docs/source/install.rst index 293ee719..50fc45b5 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -142,3 +142,11 @@ http://fmv.jku.at/boolector/ sudo cp build/bin/{boolector,btor*} /usr/local/bin/ sudo cp deps/btor2tools/bin/btorsim /usr/local/bin/ +To use the ``btor`` engine you additionally need a newer version of btorsim than the boolector setup script builds: + +.. code-block:: text + + git clone https://github.com/boolector/btor2tools + ./configure.sh + cmake . + make install From 3f32deb8c9a8deffeff339c5447464b4f80dea22 Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Tue, 24 May 2022 17:51:48 -0700 Subject: [PATCH 054/220] add test for yosys's $divfloor and $modfloor cells Depends on: https://github.com/YosysHQ/yosys/pull/3335 --- tests/unsorted/floor_divmod.sby | 44 +++++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 tests/unsorted/floor_divmod.sby diff --git a/tests/unsorted/floor_divmod.sby b/tests/unsorted/floor_divmod.sby new file mode 100644 index 00000000..53218cc7 --- /dev/null +++ b/tests/unsorted/floor_divmod.sby @@ -0,0 +1,44 @@ +[options] +mode bmc + +[engines] +smtbmc + +[script] +read_verilog -icells -formal test.v +prep -top top + +[file test.v] +module top; + wire [7:0] a = $anyconst, b = $anyconst, fdiv, fmod, a2; + assign a2 = b * fdiv + fmod; + + \$divfloor #( + .A_WIDTH(8), + .B_WIDTH(8), + .A_SIGNED(1), + .B_SIGNED(1), + .Y_WIDTH(8), + ) fdiv_m ( + .A(a), + .B(b), + .Y(fdiv) + ); + + \$modfloor #( + .A_WIDTH(8), + .B_WIDTH(8), + .A_SIGNED(1), + .B_SIGNED(1), + .Y_WIDTH(8), + ) fmod_m ( + .A(a), + .B(b), + .Y(fmod) + ); + + always @* begin + assume(b != 0); + assert(a == a2); + end +endmodule From a87d21a8020e3f48d0ecdebfa08fe43f0ce7d5fc Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Wed, 25 May 2022 03:35:21 -0700 Subject: [PATCH 055/220] add depth 1 --- tests/unsorted/floor_divmod.sby | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/unsorted/floor_divmod.sby b/tests/unsorted/floor_divmod.sby index 53218cc7..df35f8a2 100644 --- a/tests/unsorted/floor_divmod.sby +++ b/tests/unsorted/floor_divmod.sby @@ -1,5 +1,6 @@ [options] mode bmc +depth 1 [engines] smtbmc From 939e000036e8293a67baf6a359554c8189244fe5 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 30 May 2022 13:35:57 +0200 Subject: [PATCH 056/220] Makefile: Rename run_tests to test, update help, use .PHONY --- Makefile | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/Makefile b/Makefile index 3b58d87f..56824a83 100644 --- a/Makefile +++ b/Makefile @@ -10,6 +10,8 @@ ifeq ($(OS), Windows_NT) PYTHON = $(shell cygpath -w -m $(PREFIX)/bin/python3) endif +.PHONY: help install ci test html clean + help: @echo "" @echo "sudo make install" @@ -19,7 +21,11 @@ help: @echo " build documentation in docs/build/html/" @echo "" @echo "make test" - @echo " run examples" + @echo " run tests" + @echo "" + @echo "make ci" + @echo " run tests and check examples" + @echo " note: this requires a full Tabby CAD Suite or OSS CAD Suite install" @echo "" @echo "make clean" @echo " cleanup" @@ -47,7 +53,7 @@ ci: \ test_puzzles_djb2hash test_puzzles_pour853to4 test_puzzles_wolfgoatcabbage \ test_puzzles_primegen_primegen test_puzzles_primegen_primes_pass test_puzzles_primegen_primes_fail \ test_quickstart_demo test_quickstart_cover test_quickstart_prove test_quickstart_memory \ - run_tests + test if yosys -qp 'read -verific' 2> /dev/null; then set -x; \ YOSYS_NOVERIFIC=1 $(MAKE) ci; \ fi @@ -113,7 +119,7 @@ test_quickstart_prove: test_quickstart_memory: cd docs/examples/quickstart && python3 ../../../sbysrc/sby.py -f memory.sby -run_tests: +test: $(MAKE) -C tests test html: From dc22d97362162c39bd826df2600f34c6b9ba7fbe Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 30 May 2022 14:37:20 +0200 Subject: [PATCH 057/220] Better checking of available solvers Check for required auxiliary tools and always regenerate the make rules when the set of available tools changes. --- tests/Makefile | 24 ++++++++++++++++++++++++ tests/make/collect_tests.py | 2 +- tests/make/test_rules.py | 17 +++++++++++------ 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/tests/Makefile b/tests/Makefile index eb941e23..6992f921 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -2,6 +2,23 @@ test: .PHONY: test clean refresh help +TOOL_LIST := \ + btorsim \ + yices \ + aigbmc \ + avy \ + bitwuzla \ + boolector \ + btormc \ + cvc4 \ + mathsat \ + pono \ + suprove \ + yices-smt2 \ + yices \ + yosys-abc \ + z3 + help: @cat make/help.txt @@ -15,5 +32,12 @@ make/rules/test/%.mk: python3 make/test_rules.py $< ifneq (help,$(MAKECMDGOALS)) + +FIND_TOOLS := $(shell \ + TOOLS=$$(which $(TOOL_LIST) 2>/dev/null || true); \ + echo $$TOOLS | cmp -s make/rules/found_tools || echo $$TOOLS > make/rules/found_tools \ +) + include make/rules/collect.mk + endif diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py index cf782b99..7b0cda37 100644 --- a/tests/make/collect_tests.py +++ b/tests/make/collect_tests.py @@ -37,7 +37,7 @@ def collect(path): print(f"{out_file}: {checked_dir}", file=output) for test in tests: - print(f"make/rules/test/{test}.mk: {test}", file=output) + print(f"make/rules/test/{test}.mk: {test} make/rules/found_tools", file=output) for ext in [".sh", ".py"]: script_file = test.parent / (test.stem + ext) if script_file.exists(): diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 1ad49ba9..149e524f 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -26,6 +26,8 @@ def parse_engine(engine): return engine, default_solvers.get(engine) +# When adding new tools, also update TOOL_LIST in Makefile to make sure we regenerate +# the rules when the user installs or removes any of the tools REQUIRED_TOOLS = { ("smtbmc", "yices"): ["yices-smt2"], ("smtbmc", "z3"): ["z3"], @@ -34,11 +36,12 @@ def parse_engine(engine): ("smtbmc", "boolector"): ["boolector"], ("smtbmc", "bitwuzla"): ["bitwuzla"], ("smtbmc", "abc"): ["yosys-abc"], - ("aiger", "suprove"): ["suprove"], - ("aiger", "avy"): ["avy"], - ("aiger", "aigbmc"): ["aigbmc"], - ("btor", "btormc"): ["btormc"], - ("btor", "pono"): ["pono"], + ("aiger", "suprove"): ["suprove", "yices"], + ("aiger", "avy"): ["avy", "yices"], + ("aiger", "aigbmc"): ["aigbmc", "yices"], + ("btor", "btormc"): ["btormc", "btorsim"], + ("btor", "pono"): ["pono", "btorsim"], + ("abc"): ["yices"], } rules_file = Path("make/rules/test") / sby_dir / (sby_file.name + ".mk") @@ -63,7 +66,9 @@ def parse_engine(engine): for engine in info["engines"]: engine, solver = parse_engine(engine) engines.add(engine) - required_tools.update(REQUIRED_TOOLS.get((engine, solver), ())) + required_tools.update( + REQUIRED_TOOLS.get((engine, solver), REQUIRED_TOOLS.get(engine, ())) + ) if solver: solvers.add(solver) engine_solvers.add((engine, solver)) From 206562e5de2d9f6bd976d239f1f7e4a25abee033 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 30 May 2022 15:27:14 +0200 Subject: [PATCH 058/220] Check for the tabby/oss cad suite before running make ci checks --- Makefile | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 56824a83..07f85a41 100644 --- a/Makefile +++ b/Makefile @@ -45,7 +45,12 @@ else chmod +x $(DESTDIR)$(PREFIX)/bin/sby endif -ci: \ +.PHONY: check_cad_suite run_ci + +ci: check_cad_suite + @$(MAKE) run_ci + +run_ci: \ test_demo1 test_demo2 test_demo3 \ test_abstract_abstr test_abstract_props \ test_demos_fib_cover test_demos_fib_prove test_demos_fib_live \ @@ -55,7 +60,14 @@ ci: \ test_quickstart_demo test_quickstart_cover test_quickstart_prove test_quickstart_memory \ test if yosys -qp 'read -verific' 2> /dev/null; then set -x; \ - YOSYS_NOVERIFIC=1 $(MAKE) ci; \ + YOSYS_NOVERIFIC=1 $(MAKE) run_ci; \ + fi + +check_cad_suite: + @if ! which tabbypip >/dev/null 2>&1; then \ + echo "'make ci' requries the Tabby CAD Suite or the OSS CAD Suite"; \ + echo "try 'make test' instead or run 'make run_ci' to proceed anyway."; \ + exit 1; \ fi test_demo1: From 8e87b0f7f44a025274e3aa8c0f139f3945b007d5 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 30 May 2022 16:18:37 +0200 Subject: [PATCH 059/220] Suggest -f when the workdir already exists --- sbysrc/sby.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index d13960c7..19a962b4 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -424,7 +424,7 @@ def run_task(taskname): if reusedir: pass elif os.path.isdir(my_workdir): - print(f"ERROR: Directory '{my_workdir}' already exists.") + print(f"ERROR: Directory '{my_workdir}' already exists, use -f to overwrite the existing directory.") sys.exit(1) else: os.makedirs(my_workdir) From b18f22cf433dfd0f33d490611121856d84ac303c Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 31 May 2022 11:18:05 +1200 Subject: [PATCH 060/220] Removing install details for optional engines --- docs/source/install.rst | 78 +++++------------------------------------ 1 file changed, 8 insertions(+), 70 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index db555078..418a0c53 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -95,87 +95,25 @@ https://github.com/Z3Prover/z3/wiki Optional components ------------------- +Additional solver engines can be installed as per their instructions, links are +provided below. Yices 2 ^^^^^^^ + http://yices.csl.sri.com/ -http://yices.csl.sri.com/ - -.. code-block:: text - - git clone https://github.com/SRI-CSL/yices2 - cd yices2 - autoconf - ./configure - make -j$(nproc) - sudo make install + https://github.com/SRI-CSL/yices2 super_prove ^^^^^^^^^^^ - -https://github.com/sterin/super-prove-build - -.. code-block:: text - - sudo apt-get install cmake ninja-build g++ python-dev python-setuptools \ - python-pip git - git clone --recursive https://github.com/sterin/super-prove-build - cd super-prove-build - mkdir build - cd build - cmake -DCMAKE_BUILD_TYPE=Release -G Ninja .. - ninja - ninja package - -This creates a .tar.gz archive for the target system. Extract it to -``/usr/local/super_prove`` - -.. code-block:: text - - sudo tar -C /usr/local -x super_prove-X-Y-Release.tar.gz - -Then create a wrapper script ``/usr/local/bin/suprove`` with the following contents: - -.. code-block:: text - - #!/bin/bash - tool=super_prove; if [ "$1" != "${1#+}" ]; then tool="${1#+}"; shift; fi - exec /usr/local/super_prove/bin/${tool}.sh "$@" - -And make this wrapper script executable: - -.. code-block:: text - - sudo chmod +x /usr/local/bin/suprove + https://github.com/sterin/super-prove-build Avy ^^^ - -https://arieg.bitbucket.io/avy/ - -.. code-block:: text - - git clone https://bitbucket.org/arieg/extavy.git - cd extavy - git submodule update --init - mkdir build; cd build - cmake -DCMAKE_BUILD_TYPE=Release .. - make -j$(nproc) - sudo cp avy/src/{avy,avybmc} /usr/local/bin/ + https://arieg.bitbucket.io/avy/ Boolector ^^^^^^^^^ + http://fmv.jku.at/boolector/ -http://fmv.jku.at/boolector/ - -.. code-block:: text - - git clone https://github.com/boolector/boolector - cd boolector - ./contrib/setup-btor2tools.sh - ./contrib/setup-lingeling.sh - ./configure.sh - make -C build -j$(nproc) - sudo cp build/bin/{boolector,btor*} /usr/local/bin/ - sudo cp deps/btor2tools/bin/btorsim /usr/local/bin/ - + https://github.com/boolector/boolector From f5257011f6ec9a4ad82925d1f73b81205670b160 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 31 May 2022 11:31:20 +1200 Subject: [PATCH 061/220] Specifying z3 to support minimum required install --- docs/examples/fifo/fifo.sby | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby index d6f6c273..91cb3c55 100644 --- a/docs/examples/fifo/fifo.sby +++ b/docs/examples/fifo/fifo.sby @@ -11,7 +11,7 @@ mode prove -- [engines] -cover: smtbmc +cover: smtbmc z3 prove: abc pdr [script] From 41cd8e5b5e6d61b2fb28e00b7aa0616a00348b5d Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Wed, 1 Jun 2022 16:51:28 +0200 Subject: [PATCH 062/220] update install instructions for btorsim --- docs/source/install.rst | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 50fc45b5..7fed53cc 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -147,6 +147,8 @@ To use the ``btor`` engine you additionally need a newer version of btorsim than .. code-block:: text git clone https://github.com/boolector/btor2tools + cd btor2tools ./configure.sh - cmake . - make install + cmake . -DBUILD_SHARED_LIBS=OFF + make -j$(nproc) + sudo make install From 00efdecb4bdb06adecf07ad09c096c5883171697 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 2 Jun 2022 16:24:30 +0200 Subject: [PATCH 063/220] tests: Check for btorsim --vcd --- tests/Makefile | 17 +++++++--- tests/make/required_tools.py | 64 ++++++++++++++++++++++++++++++++++++ tests/make/test_rules.py | 22 ++----------- 3 files changed, 79 insertions(+), 24 deletions(-) create mode 100644 tests/make/required_tools.py diff --git a/tests/Makefile b/tests/Makefile index 6992f921..ccb983c2 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -33,10 +33,19 @@ make/rules/test/%.mk: ifneq (help,$(MAKECMDGOALS)) -FIND_TOOLS := $(shell \ - TOOLS=$$(which $(TOOL_LIST) 2>/dev/null || true); \ - echo $$TOOLS | cmp -s make/rules/found_tools || echo $$TOOLS > make/rules/found_tools \ -) +# This should run every time but only trigger anything depending on it whenever +# the script overwrites make/rules/found_tools. This doesn't really match how +# make targets usually work, so we manually shell out here. + +FIND_TOOLS := $(shell python3 make/required_tools.py || echo error) + +ifneq (,$(findstring error,$(FIND_TOOLS))) +$(error could not run 'python3 make/required_tools.py') +endif + +ifneq (,$(FIND_TOOLS)) +$(warning $(FIND_TOOLS)) +endif include make/rules/collect.mk diff --git a/tests/make/required_tools.py b/tests/make/required_tools.py new file mode 100644 index 00000000..4d06e100 --- /dev/null +++ b/tests/make/required_tools.py @@ -0,0 +1,64 @@ +import shutil + +REQUIRED_TOOLS = { + ("smtbmc", "yices"): ["yices-smt2"], + ("smtbmc", "z3"): ["z3"], + ("smtbmc", "cvc4"): ["cvc4"], + ("smtbmc", "mathsat"): ["mathsat"], + ("smtbmc", "boolector"): ["boolector"], + ("smtbmc", "bitwuzla"): ["bitwuzla"], + ("smtbmc", "abc"): ["yosys-abc"], + ("aiger", "suprove"): ["suprove", "yices"], + ("aiger", "avy"): ["avy", "yices"], + ("aiger", "aigbmc"): ["aigbmc", "yices"], + ("btor", "btormc"): ["btormc", "btorsim"], + ("btor", "pono"): ["pono", "btorsim"], + ("abc"): ["yices"], +} + + +if __name__ == "__main__": + import subprocess + import sys + from pathlib import Path + + found_tools = [] + check_tools = set() + for tools in REQUIRED_TOOLS.values(): + check_tools.update(tools) + + for tool in sorted(check_tools): + if not shutil.which(tool): + continue + + if tool == "btorsim": + error_msg = subprocess.run( + ["btorsim", "--vcd"], + capture_output=True, + text=True, + ).stderr + if "invalid command line option" in error_msg: + print( + "found `btorsim` binary is too old " + "to support the `--vcd` option, ignoring" + ) + continue + + found_tools.append(tool) + + found_tools = "\n".join(found_tools + [""]) + + try: + with open("make/rules/found_tools") as found_tools_file: + if found_tools_file.read() == found_tools: + exit(0) + except FileNotFoundError: + pass + + Path("make/rules").mkdir(exist_ok=True) + + with open("make/rules/found_tools", "w") as found_tools_file: + found_tools_file.write(found_tools) +else: + with open("make/rules/found_tools") as found_tools_file: + found_tools = [tool.strip() for tool in found_tools_file.readlines()] diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 149e524f..d03fc6c1 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -1,10 +1,10 @@ -import shutil import sys import os import subprocess import json from pathlib import Path +from required_tools import REQUIRED_TOOLS, found_tools sby_file = Path(sys.argv[1]) sby_dir = sby_file.parent @@ -26,24 +26,6 @@ def parse_engine(engine): return engine, default_solvers.get(engine) -# When adding new tools, also update TOOL_LIST in Makefile to make sure we regenerate -# the rules when the user installs or removes any of the tools -REQUIRED_TOOLS = { - ("smtbmc", "yices"): ["yices-smt2"], - ("smtbmc", "z3"): ["z3"], - ("smtbmc", "cvc4"): ["cvc4"], - ("smtbmc", "mathsat"): ["mathsat"], - ("smtbmc", "boolector"): ["boolector"], - ("smtbmc", "bitwuzla"): ["bitwuzla"], - ("smtbmc", "abc"): ["yosys-abc"], - ("aiger", "suprove"): ["suprove", "yices"], - ("aiger", "avy"): ["avy", "yices"], - ("aiger", "aigbmc"): ["aigbmc", "yices"], - ("btor", "btormc"): ["btormc", "btorsim"], - ("btor", "pono"): ["pono", "btorsim"], - ("abc"): ["yices"], -} - rules_file = Path("make/rules/test") / sby_dir / (sby_file.name + ".mk") rules_file.parent.mkdir(exist_ok=True, parents=True) @@ -79,7 +61,7 @@ def parse_engine(engine): shell_script = sby_dir / f"{sby_file.stem}.sh" missing_tools = sorted( - f"`{tool}`" for tool in required_tools if shutil.which(tool) is None + f"`{tool}`" for tool in required_tools if tool not in found_tools ) if missing_tools: From d398a3c2df7b3761cda5e768b6881ca6186f0429 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 2 Jun 2022 16:25:11 +0200 Subject: [PATCH 064/220] tests: Fail on CI when any required tool is missing --- .github/workflows/ci.yml | 2 +- tests/Makefile | 6 ++++++ tests/make/test_rules.py | 2 +- 3 files changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ea48d06b..67abe1f6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,4 +9,4 @@ jobs: - uses: actions/checkout@v2 - uses: YosysHQ/setup-oss-cad-suite@v1 - name: Run checks - run: tabbypip install xmlschema && make ci + run: tabbypip install xmlschema && make ci NOSKIP=1 diff --git a/tests/Makefile b/tests/Makefile index ccb983c2..6b02872d 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -31,6 +31,12 @@ make/rules/collect.mk: make/collect_tests.py make/rules/test/%.mk: python3 make/test_rules.py $< +ifdef NOSKIP +SKIP_COMMAND := echo "NOSKIP was set, treating this as an error"; echo; false +else +SKIP_COMMAND := echo +endif + ifneq (help,$(MAKECMDGOALS)) # This should run every time but only trigger anything depending on it whenever diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index d03fc6c1..4871b11b 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -66,7 +66,7 @@ def parse_engine(engine): if missing_tools: print( - f"\t@echo; echo 'SKIPPING {target}: {', '.join(missing_tools)} not found'; echo", + f"\t@echo; echo 'SKIPPING {target}: {', '.join(missing_tools)} not found'; $(SKIP_COMMAND)", file=rules, ) From b4c110815ce1d6017c5380a07dd7dcb23f7a304c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 3 Jun 2022 16:48:21 +0200 Subject: [PATCH 065/220] Test designs using $allconst --- tests/unsorted/allconst.sby | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 tests/unsorted/allconst.sby diff --git a/tests/unsorted/allconst.sby b/tests/unsorted/allconst.sby new file mode 100644 index 00000000..0d43f12a --- /dev/null +++ b/tests/unsorted/allconst.sby @@ -0,0 +1,30 @@ +[tasks] +yices +z3 + +[options] +mode cover +depth 1 + +[engines] +yices: smtbmc --stbv yices +z3: smtbmc --stdt z3 + +[script] +read -noverific +read -formal primegen.sv +prep -top primegen + +[file primegen.sv] + +module primegen; + (* anyconst *) reg [9:0] prime; + (* allconst *) reg [4:0] factor; + + always @* begin + if (1 < factor && factor < prime) + assume ((prime % factor) != 0); + assume (prime > 800); + cover (1); + end +endmodule From 80eacf34ca575c0c458fef845a046d76d2cd1b33 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 3 Jun 2022 17:16:01 +0200 Subject: [PATCH 066/220] Don't fail tests when xmlschema is missing --- tests/junit/validate_junit.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/tests/junit/validate_junit.py b/tests/junit/validate_junit.py index c1c0573f..1999551c 100644 --- a/tests/junit/validate_junit.py +++ b/tests/junit/validate_junit.py @@ -1,4 +1,13 @@ -from xmlschema import XMLSchema, XMLSchemaValidationError +try: + from xmlschema import XMLSchema, XMLSchemaValidationError +except ImportError: + import os + if "NOSKIP" not in os.environ.get("MAKEFLAGS", ""): + print() + print("SKIPPING python library xmlschema not found, skipping JUnit output validation") + print() + exit(0) + import argparse def main(): From aed5a33bef3a2252a1c03359541f727559a2b620 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 7 Jun 2022 10:22:04 +1200 Subject: [PATCH 067/220] Add init check Prevent rst_n from going low once it has gone high. --- docs/examples/fifo/top.sv | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index 428bf1fc..3e126011 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -69,6 +69,15 @@ module fifo ( ? waddr - raddr : waddr + MAX_DATA - raddr; + reg init = 0; + always @(posedge clk) begin + if (rst_n) + init <= 1; + // if init is low we don't care about the value of rst_n + // if init is high (rst_n has ben high), then rst_n must remain high + assume (!init || init && rst_n); + end + // tests always @(posedge clk) begin if (rst_n) begin From fef6d3a8a68aa66a090fe11357417c71280bab1f Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 7 Jun 2022 11:49:25 +1200 Subject: [PATCH 068/220] Adding USE_VERIFIC flag Adding variations in .sby file where tabby uses verific and oss doesn't. --- docs/examples/fifo/fifo.sby | 8 +++++-- docs/examples/fifo/top.sv | 45 ++++++++++++++++++++++++------------- 2 files changed, 35 insertions(+), 18 deletions(-) diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby index 91cb3c55..ab063a9c 100644 --- a/docs/examples/fifo/fifo.sby +++ b/docs/examples/fifo/fifo.sby @@ -1,6 +1,9 @@ [tasks] -cover -prove +prove_oss prove oss +prove_tabby prove tabby +cover_oss cover oss +cover_tabby cover tabby +prove_oss cover_oss : default [options] cover: @@ -16,6 +19,7 @@ prove: abc pdr [script] read -sv fifo.sv +tabby: read -define USE_VERIFIC=1 read -formal top.sv prep -top fifo diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index 3e126011..f9ba4753 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -108,14 +108,6 @@ module fifo ( || waddr == $past(waddr) || waddr == $past(waddr + 1)); - // read/write enables enable - // ap_raddr2: assert property (ren |=> raddr != $past(raddr)); - // ap_waddr2: assert property (wen |=> waddr != $past(waddr)); - - // read/write needs enable UNLESS full/empty - // ap_raddr3: assert property (!ren && !full |=> raddr == $past(raddr)); - // ap_waddr3: assert property (!wen && !empty |=> waddr == $past(waddr)); - // full and empty work as expected a_full: assert (!full || full && count == MAX_DATA); w_full: cover (wen && !ren && count == MAX_DATA-1); @@ -123,9 +115,7 @@ module fifo ( w_empty: cover (ren && !wen && count == 1); // can we corrupt our data? - // ap_overfill: assert property (wen && full |=> raddr != $past(raddr)); w_overfill: cover ($past(rskip) && raddr); - // ap_underfill: assert property (ren && empty |=> waddr != $past(waddr)); w_underfill: cover ($past(wskip) && waddr); end else begin // waddr and raddr are zero while reset is low @@ -137,13 +127,36 @@ module fifo ( end end - // assumptions +`ifdef USE_VERIFIC + // if we have verific we can also do the following additional tests + always @(posedge clk) begin + if (rst_n) begin + // read/write enables enable + ap_raddr2: assert property (ren |=> $changed(raddr)); + ap_waddr2: assert property (wen |=> $changed(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); + ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); + + // can we corrupt our data? + ap_overfill: assert property (wen && full |=> $changed(raddr)); + ap_underfill: assert property (ren && empty |=> $changed(waddr)); + + // change data when writing (and only when writing) so we can line + // up reads with writes + assume property (wen |=> $changed(wdata)); + assume property (!wen |=> $stable(wdata)); + end + end +`else // !USE_VERIFIC + // without verific we are more limited in describing the above assumption always @(posedge clk) begin - // data will change when writing (and only when writing) so we can line up reads with writes - assume ((wen && wdata != $past(wdata)) || (!wen && wdata == $past(wdata))); - // assume property (wen |=> wdata != $past(wdata)); - // assume property (!wen |=> wdata == $past(wdata)); + assume ((wen && wdata != $past(wdata)) + || (!wen && wdata == $past(wdata))); end -`endif +`endif // USE_VERIFIC + +`endif // FORMAL endmodule From 66ef51d846bcbebaee0579bafe728737f2bb269f Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 7 Jun 2022 11:50:26 +1200 Subject: [PATCH 069/220] Verification properties in doc --- docs/source/newstart.rst | 79 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 77 insertions(+), 2 deletions(-) diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 0f13f2b3..bf8dd199 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -7,7 +7,7 @@ Getting started `GTKWave `_, an open source VCD viewer. First In, First Out (FIFO) buffer -******************************** +********************************* From `Wikipedia `_, a FIFO is @@ -33,4 +33,79 @@ address generator module will be instantiated twice; once for the write address and once for the read address. In both cases, the address will start at and reset to 0, and will increment by 1 when an enable signal is received. When the address pointers increment from the maximum storage value they reset back to 0, -providing a circular queue. +providing a circular queue. The top level design implemented, can be found in +``top.sv``. + +Verification properties +*********************** + +In order to verify our design we must first define properties that it must +satisfy. For example, there must never be a negative number of values in the +FIFO. Similarly, there must never be more than there is memory available. By +assigning a signal to count the number of values in the buffer, we can make the +following assertions in the code: + +.. code-block:: systemverilog + + a_uflow: assert (count >= 0); + a_oflow: assert (count <= MAX_DATA); + +It is also possible to use the prior value of a signal for comparison. This can +be used, for example, to ensure that the count is only able to increase or +decrease by 1. A case must be added to handle resetting the count directly to +0, as well as if the count does not change. This can be seen in the following +code; at least one of these conditions must be true at all times if our design +is to be correct. + +.. code-block:: systemverilog + + a_counts: assert (count == 0 + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); + +As our count signal is used independently of the read and write pointers, we +must verify that the count is always correct. While the write pointer will +always be at the same point or *after* the read pointer, the circular buffer +means that the write *address* could wrap around and appear *less than* the read +address. So we must first perform some simple arithmetic to find the absolute +difference in addresses, and then compare with the count signal. + +.. code-block:: systemverilog + + assign addr_diff = waddr >= raddr + ? waddr - raddr + : waddr + MAX_DATA - raddr; + + a_count_diff: assert (count == addr_diff + || count == MAX_DATA && addr_diff == 0); + +Concurrent assertions +********************* + +Until this point, all of the properties described have been *immediate* +assertions. As the name suggests, immediate assertions are evaluated +immediately whereas concurrent assertions allow for the capture of sequences of +events which occur across time. The use of concurrent assertions requires a +more advanced parser, such as Verific. Verific is included for use in the +*Tabby CAD Suite*. + +With concurrent assertions we are able to verify more fully that our enables and +status flags work as desired. For example, we can assert that if the read +enable signal is high then the address of the read pointer *must* change. +Because of our earlier *immediate* assertions that the pointer address can only +increment or remain the same we do not need to specify that here. We can also +assert that if the enable is low, and the buffer is not full and potentially +requires a skip in the read address, then the read address will *not* change. + +.. code-block:: systemverilog + + ap_raddr2: assert property (ren |=> $changed(raddr)); + ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); + + +Further information +******************* +For more information on the uses of assertions and the difference between +immediate and concurrent assertions, refer to appnote 109: Property Checking +with SystemVerilog Assertions. From 34d6adf098ff03d63818ec20d59f7c9554336133 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 7 Jun 2022 14:29:25 +0200 Subject: [PATCH 070/220] tests: Move required tool checks from rule generation to execution This avoids regenerating the test makefile rules when the set of installed tools changes and is a bit simpler overall. --- tests/Makefile | 6 ------ tests/make/collect_tests.py | 2 +- tests/make/required_tools.py | 32 +++++++++++++++++++++++++++++--- tests/make/test_rules.py | 29 +++++++++-------------------- 4 files changed, 39 insertions(+), 30 deletions(-) diff --git a/tests/Makefile b/tests/Makefile index 6b02872d..ccb983c2 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -31,12 +31,6 @@ make/rules/collect.mk: make/collect_tests.py make/rules/test/%.mk: python3 make/test_rules.py $< -ifdef NOSKIP -SKIP_COMMAND := echo "NOSKIP was set, treating this as an error"; echo; false -else -SKIP_COMMAND := echo -endif - ifneq (help,$(MAKECMDGOALS)) # This should run every time but only trigger anything depending on it whenever diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py index 7b0cda37..cf782b99 100644 --- a/tests/make/collect_tests.py +++ b/tests/make/collect_tests.py @@ -37,7 +37,7 @@ def collect(path): print(f"{out_file}: {checked_dir}", file=output) for test in tests: - print(f"make/rules/test/{test}.mk: {test} make/rules/found_tools", file=output) + print(f"make/rules/test/{test}.mk: {test}", file=output) for ext in [".sh", ".py"]: script_file = test.parent / (test.stem + ext) if script_file.exists(): diff --git a/tests/make/required_tools.py b/tests/make/required_tools.py index 4d06e100..67b5d2b2 100644 --- a/tests/make/required_tools.py +++ b/tests/make/required_tools.py @@ -17,11 +17,40 @@ } +def found_tools(): + with open("make/rules/found_tools") as found_tools_file: + return [tool.strip() for tool in found_tools_file.readlines()] + + if __name__ == "__main__": import subprocess import sys + import os from pathlib import Path + args = sys.argv[1:] + + if args and args[0] == "run": + target, command, *required_tools = args[1:] + + with open("make/rules/found_tools") as found_tools_file: + found_tools = set(tool.strip() for tool in found_tools_file.readlines()) + + missing_tools = sorted( + f"`{tool}`" for tool in required_tools if tool not in found_tools + ) + if missing_tools: + noskip = "NOSKIP" in os.environ.get("MAKEFLAGS", "") + print() + print(f"SKIPPING {target}: {', '.join(missing_tools)} not found") + if noskip: + print("NOSKIP was set, treating this as an error") + print() + exit(noskip) + + print(command, flush=True) + exit(subprocess.call(command, shell=True)) + found_tools = [] check_tools = set() for tools in REQUIRED_TOOLS.values(): @@ -59,6 +88,3 @@ with open("make/rules/found_tools", "w") as found_tools_file: found_tools_file.write(found_tools) -else: - with open("make/rules/found_tools") as found_tools_file: - found_tools = [tool.strip() for tool in found_tools_file.readlines()] diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 4871b11b..04d2226d 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -2,9 +2,10 @@ import os import subprocess import json +import shlex from pathlib import Path -from required_tools import REQUIRED_TOOLS, found_tools +from required_tools import REQUIRED_TOOLS sby_file = Path(sys.argv[1]) sby_dir = sby_file.parent @@ -55,31 +56,19 @@ def parse_engine(engine): solvers.add(solver) engine_solvers.add((engine, solver)) + required_tools = sorted(required_tools) + print(f".PHONY: {target}", file=rules) print(f"{target}:", file=rules) shell_script = sby_dir / f"{sby_file.stem}.sh" - missing_tools = sorted( - f"`{tool}`" for tool in required_tools if tool not in found_tools - ) - - if missing_tools: - print( - f"\t@echo; echo 'SKIPPING {target}: {', '.join(missing_tools)} not found'; $(SKIP_COMMAND)", - file=rules, - ) - - elif shell_script.exists(): - print( - f"\tcd {sby_dir} && SBY_FILE={sby_file.name} WORKDIR={workdirname} TASK={task} bash {shell_script.name}", - file=rules, - ) + if shell_script.exists(): + command = f"cd {sby_dir} && SBY_FILE={sby_file.name} WORKDIR={workdirname} TASK={task} bash {shell_script.name}" else: - print( - f"\tcd {sby_dir} && python3 $(SBY_MAIN) -f {sby_file.name} {task}", - file=rules, - ) + command = f"cd {sby_dir} && python3 $(SBY_MAIN) -f {sby_file.name} {task}" + + print(f"\t@python3 make/required_tools.py run {target} {shlex.quote(command)} {shlex.join(required_tools)}", file=rules) print(f".PHONY: clean-{target}", file=rules) print(f"clean-{target}:", file=rules) From 675dc03dfedee8c697d76fd0f0f67fbb6f6b6e8d Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 8 Jun 2022 11:32:35 +0200 Subject: [PATCH 071/220] tests: Remove unused tool list in test Makefile The checks for available tools moved to a python script, so need need to have a copy of the tool list in the Makefile. --- tests/Makefile | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/tests/Makefile b/tests/Makefile index ccb983c2..9b65da77 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -2,23 +2,6 @@ test: .PHONY: test clean refresh help -TOOL_LIST := \ - btorsim \ - yices \ - aigbmc \ - avy \ - bitwuzla \ - boolector \ - btormc \ - cvc4 \ - mathsat \ - pono \ - suprove \ - yices-smt2 \ - yices \ - yosys-abc \ - z3 - help: @cat make/help.txt From d0da57f54f5d3395b341756ec866a39629abaefb Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 8 Jun 2022 13:33:12 +0200 Subject: [PATCH 072/220] Test that cvc4 and cvc5 can be used --- tests/unsorted/demo.sby | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/unsorted/demo.sby b/tests/unsorted/demo.sby index bc40cd68..c6965714 100644 --- a/tests/unsorted/demo.sby +++ b/tests/unsorted/demo.sby @@ -1,6 +1,8 @@ [tasks] btormc pono +cvc4 +cvc5 [options] mode bmc @@ -10,6 +12,8 @@ expect fail [engines] btormc: btor btormc pono: btor pono +cvc4: smtbmc cvc4 +cvc5: smtbmc cvc5 [script] read -formal demo.sv From 41e427640a0f54655f523dae9d53bdb04aacaf84 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Thu, 9 Jun 2022 14:26:17 +1200 Subject: [PATCH 073/220] Adding noskip task Demonstrate failing model check by disabling rskip and wskip. --- docs/examples/fifo/fifo.sby | 4 +++- docs/examples/fifo/top.sv | 5 +++++ 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby index ab063a9c..62e9d858 100644 --- a/docs/examples/fifo/fifo.sby +++ b/docs/examples/fifo/fifo.sby @@ -1,7 +1,8 @@ [tasks] prove_oss prove oss -prove_tabby prove tabby +noskip prove oss cover_oss cover oss +prove_tabby prove tabby cover_tabby cover tabby prove_oss cover_oss : default @@ -20,6 +21,7 @@ prove: abc pdr [script] read -sv fifo.sv tabby: read -define USE_VERIFIC=1 +noskip: read -define NOSKIP=1 read -formal top.sv prep -top fifo diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv index f9ba4753..f5eec519 100644 --- a/docs/examples/fifo/top.sv +++ b/docs/examples/fifo/top.sv @@ -57,10 +57,15 @@ module fifo ( assign empty = (data_count == 0) && rst_n; assign count = data_count; +`ifndef NOSKIP // write while full => overwrite oldest data, move read pointer assign rskip = wen && !ren && data_count >= MAX_DATA; // read while empty => read invalid data, keep write pointer in sync assign wskip = ren && !wen && data_count == 0; +`else + assign rskip = 0; + assign wskip = 0; +`endif // NOSKIP `ifdef FORMAL // observers From 069197aeaa4dace1032ffc8ca515bbc280e2239c Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Thu, 9 Jun 2022 14:29:21 +1200 Subject: [PATCH 074/220] Add section on sby to newstart List tasks and run through failing noskip example. Includes pictures (both fail and pass) plus .gtkw file for setting up. --- docs/examples/fifo/noskip.gtkw | 28 ++++++ docs/source/media/gtkwave_coverskip.png | Bin 0 -> 13487 bytes docs/source/media/gtkwave_noskip.png | Bin 0 -> 13593 bytes docs/source/newstart.rst | 111 ++++++++++++++++++++++++ 4 files changed, 139 insertions(+) create mode 100644 docs/examples/fifo/noskip.gtkw create mode 100644 docs/source/media/gtkwave_coverskip.png create mode 100644 docs/source/media/gtkwave_noskip.png diff --git a/docs/examples/fifo/noskip.gtkw b/docs/examples/fifo/noskip.gtkw new file mode 100644 index 00000000..df81a20a --- /dev/null +++ b/docs/examples/fifo/noskip.gtkw @@ -0,0 +1,28 @@ +[*] +[*] GTKWave Analyzer v3.4.0 (w)1999-2022 BSI +[*] Thu Jun 09 02:02:01 2022 +[*] +[timestart] 0 +[size] 1000 320 +[pos] -1 -1 +*-3.253757 18 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 -1 +[sst_width] 246 +[signals_width] 200 +[sst_expanded] 1 +[sst_vpaned_height] 58 +@28 +fifo.clk +@22 +fifo.data_count[4:0] +fifo.addr_diff[4:0] +@28 +fifo.ren +fifo.wen +@22 +fifo.raddr[3:0] +fifo.waddr[3:0] +@28 +fifo.rskip +fifo.wskip +[pattern_trace] 1 +[pattern_trace] 0 diff --git a/docs/source/media/gtkwave_coverskip.png b/docs/source/media/gtkwave_coverskip.png new file mode 100644 index 0000000000000000000000000000000000000000..a0b4d4a414713738972ea59ce26f099d749fe1e3 GIT binary patch literal 13487 zcmeHucT`hpw|B6Cq9CH6QljFBs1#8t0*PY53Oay*l%Uul1rS0>2ujmIh>8w4G!>Om znxK$CAfYITAc!adk`P0S5J>0&LXvwzQ)ig>z3YD8de^tscmDtaob#Oh>}T)a-ut)r zxp~6UV%d_lOF$sdvcre={|W-lMKJksp%L@JcY1JP-dI6=y<||3?khK@y%58rfRleoo@|}zw8n-@P z($jnPpy5z1=jCeF_HxlG8fSYp+iZ=(Sd<|%2KquDX?qTGXZ%~zd!q*qgdEG3)W?sz zbwqseIDsq1j^dXu=Dw;?(IS_CRZRr`r!J;+}*!P^Qu%qpkP6Kc%a-6q(MNy8Qi>Qy^h{b zJl=2vKTvpq+>9M$iy*V}K@@_uwm?bsLgXuG!~#r5~xT}dy>3xGpA+*kIT zbI4x$dU<7poAi%_D_`-8)-D!fx5r2s& zU-jYPg4N&w_;`Q7O}ViV+-TYJn7B19F3Be1jnYR^@s*bsc4**>pTp((BH4yYbkaC@ zzZ&(H7Ph8}Tsg5(ItZuYCU8&&ft(80(Rr>F1pYPSEn$KpDj;UDnn$R>`SC{Z`KuP{ z*YdY!M3T}KBqo!yD1RTZiXa|qFsre-?A${D3>Fk!wYI&*f_}~0=CrcpvcBc%dksp3 zXo^4$)qf-#(Prl|h9KB)11+!z?^(l{-_56pg?rQY%AnVMrG`cOvlhOe`p1^v2I2qi z&&)s`&Nq+I(h@GKU<9=y#nUD(Cqw#?1*vv*39CvM#D4AWzKl*(K5$yTK-U@Su(d$< z40g}udyavHgH>n}Soi@Cd?RvO{P_$_Vomm<_4I4OFy-Ar!(YS@or3uI_{t3nYU=83 zHW*rEH6Wqb-A!3CicWXkmDzcxG9z?7l3SkmCTFf{kiK^?KWvnpnnBMVy2MemYf z$t{>D_?S)Z^ZKk7>$-wFcB8fP&YO}n=qS|p&+XWgChERDh|J-M?oDE@SJTe-*w^w`XT{oXLHF!3 zIC~(y)VZwXF>aN{*yn3xv8ILcwFd+4X9h;1HMrrEccol11Rj6xBGC|Ox)|({?f0pu z?iiaa^r$oQPkG=ZU3~{omwJsSrJ$fy744)d7$M^oa(B`1CocJldnq6HAPQwA-sH3l zNvq?N!bxW{611ZRHaABBx^CJ^k62C6!;Oq|j~ga=gqxsrCJN#*6-*)sZI?`R%Ih&Y z2+>7Angmb#xEj&SC`^3(#`0Acvvc!?3my}N>I6tSBfkrhw~KDaCeq4J=i999 zIQd#7(QTR6$LIQMxr#`r3iD={0-r#-=qrr5$atlhCwAcb{vmim;r5Wzed=2_czR7_`;wiBRhixO)!Dj9o#kx5eNNO_8w&0hnQlBw62|HL0|9mam*|78Vx8P>2C^G51y@#+^$XP=x+1E%B9m$E;}=-fRkLRmqm>{IHAdy1vf`cfOx8v1tqU)6tb7 z>CNm@vf;+3HT9eEj~U-o-ItfD$&C6uYQrjTLfr&V!hzoz&l)Bko86rP7aTOY9@Q^7 zhBNc-WkZ7XOVy5~nEvLQ;ysHcixnT<8kV}Jz&$Z@pE+^96tLBV-&JDsT_A_9D_g17 zR+c7KVRX|;bv?j`!@5(K#;ClpIJ|Ou@zZ|bXeKmvx|yOi(K#q~e!OndG1-&UIj!JXy00{ow#tY90DscRHD@5&M10#xbd^6tVd zHn&{{?eR5VWmYBA`}8{mr`l8vD;A}bw#f8jOBtQtgs=54=G3m?Qd{tqZFxR~^go7a z7vKEb4U}cuXRcj5kk|F%>*rW(X;EcrzKwGmf|R94uU7``W;^|RSvG5Z;xBFMWs&U@ zV_kN|r1W&xSkM=vs~6WYDiyem#omS+=#PF+ z)ZKj6;^VzH{d;{s$xE|dhqrotVAQXMl*oe!H`+T?1G73ZgJR&~D}g}UhG#_) z6iu8@L%sR#ct)+0vG9N3k`r%rv6@c7AQ`l)%IrbSdNP!sOD$U{6O5A1TR(I5v$1lt zgX$uyybe^V))+qHPv0S=7Q^|1lA5=UehohBEZK{U_2}`LIns^|B@mDLhAPp`)4o0` zAG)A5&oBmhFbTAgX(Fv8iGI?7-vONcdzB2 zmjlkLKN5=l-=f+a;ewBeGFLfH*~PmIn_46Xt3E*eERWR<;HAwk36>rGMb>6^PEVc<*}{t%BWG)Vh)e_gKb&QHpqeS11J znquOdu&lZfq~l9V^;)%EHG8*$=LedCi}}W_Nm=055qlQIKj_pEJnn`wK9?uZmj=w; zuYYfVj>~E7f;0||JATDJ6iEHzQysIUYt&eB-;rGcW0f0SC!WifSJ(#xV&BlCFM(Hc z1L@>yH6gzTyUg5vzIQT;na8AS(j!tPx^EXlP7!HA;-IDBGzD&2zeS7q5@B?r+szvr zs9{lW{gCN`EK)Z@|7E*oWkt{eZHq!fb3?6&-XMZ*yt)}vF9emmxsXEhz(hPofdkD% za=_U#r_WZF5|r&Y1oJ>F2q^8aU6Qc1RBf}ycQbAeN_}r1_f%N+TnCOLk8f;Q zVdngcxZ%!$f~a2F;0Voro25Aks)-}wv6lDOAdq+@JKc(l%tGl-2;T@xi!d*4F`nHO z8aHoH5_Q?NuuV+Tl2BJvILFLz#LBYh(D}TlCMyP8f*zf7ol2KCbEH@ z19L_b+KjpV!^Qa;^lLwcZ{G-$=vI3c#!g;1>MaSYP$6Szp|jbx!ZdDkkk2hRfy($o zTz8Z@pB;sge*c48ucs$VGPQl=c+}Fex^aK76s|?$VO20(gLX^24g|FI=M$Yqx_)Ky zL@%O0{`EH^id7mW8#`aen4XWyu4)P+2_HTayg#=E!c;L@B*`7{o5{Ic^MHe4>0D z^z&{eYm`mLg7q7ue{{Q9sVPH=f5*1zBuqr7y2$?{EXKkDwf~Me)3q6NFvITeaNBMi z5h!NzbUCg}h=7-d5g*U}_PYw%p)W0IgWCcR7O-9A!5BI@eHyW1pqML?C zwRG$Rts0-o+N>7vV3)e6LE`-LvoYH~M3r?Tke0i`T`7l?lCD&#Wl$m1BKO|A zyWIob%j+OovL0;EgP!`$6I-2Q#)CGk%^SNh-U&ZQ8qs_zsHMt#Ixc0*FB5*GFq3@E z8pzX5|5myy-|uqnx6Di=iIa9l0n&>=Bq9C^X45&T$Ng3h?Eb(z=EqAJ$-C=n!EOku zXzxxWSkQtSIKfg^%Z9e|0@6_gp-8xTDM5p7*M=_?*CG1HzEpIMj~tACmb!;h)DmEZ z;>w^O-ufPXZwH14*4sv|bpNnhMjN!4f1Ffm{7PWl_xWxY+URa6wr6+_p_ zd*leQEk{^a3Y(e86}3ffp@N!<=0WU(`GCVt+(biY+T`rhSD$Kdo^q~vTl{nu%F|4&mwoRtDO-?Ye)QGIDdx> z|4qSl-BI^U}{p=XI!tT{HL>4K{bMNn&(lshS zyNEdvs>$IUBZ9x0<`1J+$?%0^fIr1jMCp!&hRzt!sh>4BQ%&h&#%PRI!^?G^5{y&U zk#_PTdb7^PM#`cEdoB6*t86e3qnGltAXg*x{{YB&o<=xLf^trEC>xM8xD$csU*{;~3U5mR?)M!@|5BJO0uqWJlDg^4JT4Dj@GP^S z!>dl4W02rIU4u!k`#JxRl#BagS`$&y%YkI`dpw(|Hd3FTvdRqef;7XeMsm9UIAck> z`o&9l_+@%`W-mW?IERu+Nu*?~?mTF)ArU1Hwfs31o>^sE*udHBV^S?w;qfh&*-7GK zg6@?a`~JMBG@#WILGZrqn9d)J-@>THro^L|_J1*jnJRPs)-W%}Ec_Fzbo>iPqFogI z849NAnLGeE;y$O$;nk-`+z|Qy{6|VGp`d`DtbQ#=NoEJLK{W*3M{h|z?Bq^T% zX~9Q3omZLG#Vi-U{Ll~K!baGosn~$=qvYdDxU61MD8DZxN6{pDwD$gF>9iC`nEtNQ z>551n0G=Pxfay~B>!+zwQ^E{SQq*qkMr9pwu>bD&uI!XiH&jjsq~%V9TrQlSNi=0<^QXezf{6J92JB*`c@Sz zV<$+!v|4rz;66VDvFXC|_AagFT@91fP3?_;abof*H5PfwG4_ZTqQ$U=G*0ugv~hob zge@8lC1k`3eYA)IVdVKQyM#fzs-u?TnKo6Khz?(PBA4Vq!5*9{Zs&A8zja}H4Z)e# z_v9ZXIlN2t1Gm)dMP#?s5@;>MD`WNO5xVPYZv894QZ4J?XOi&b)I$iU2WgA=!HA=g zbh1q3+wngO!UXMCm2d?|d6Ps0==vPiW}0BTjn<_%e8q`#a`elv$lwYS#f(UNwj*EH z^1js9<$nj35b1MJ&=vNdjvkIzbyL@qcMm^Rt*uy!mK{EVaL1eF^LDgOGD?#sIvZJM zUHE-!+0xWMUpw!LmWIoH>sMSKR;0F&V10RLC|0#mt2u(eTgw~atm6fOCks0N^>eQC z9XkJK{?+z2{&I;F2=!2vTQZDSVg=0wyQ(J_Ou11sv2B_)n=RfOOBaBG_c;(PsBQW) z^uxP@#8=tbJ|<;m+B%6qi{X3&`pHO$eS5vf|#m)-7dGZ zzucBAXy(*E1nKQ|Ac`FZl1OrK51Ldxaa+f%griVRXjg^NmKnNsrok|lOPzWU`Pdb- zn%)jTBJte`YS0ApE~V%1*NdtNF3XhH1uKRN4DY&6-(Yta*Ha9(VSs5*>B|(wXF+CU`}X~b%-iN@ z_c&fN_!SC&7VU%GL1wJ%T(wKB>1=*3c-r6+F09l8q?i#*Yv|QVDu=u|cKu z=HRhEY>6~2edv*J>kFu>GlZDMrX&djpYA|;i zG1RF>_b|ssi?C(Ja{N5#6|z6&ct`YC0>DVpncIO(Jr6gGroZ+i$;$Kb!fxvreC>x1 z$3<502qaRIIzRT?IhKfD$+l}tOH1=)z^mu0XFC8z-HC3frcnMl1UtSn%b4aNgjZLT z%xtvXG@6)tyE^se6IZzQS!)kAbWn7vJnf?k_)eaT)%I=P+M(3c(w=4}ZG3bqtt`K{ zTND|xxjw5R(I<%OMF(lo3vxU&4RX?WmOlVwb2 zfplPKj|+?bEZAy2**nST6w&()_PyjR+lBaO3lYX>`3mAy&BbVe?QQYhAr7KApp0HU zc7wEmj^8&^mDZR79N6h6FX0V>Z70BRV!XLu>@iBn?Z#dn>P7Wl9aB1NbU3mtKM$hn zQBSyb)LSC{h?Owe%ZwGBZye!G*KQo+4tQRgX@2-e1V(?IPSBxmpX;NS@dGZy4lSk9 zgsGa|8iiGnOv`ts;!Gmf}J9&PfNJa+9PD%VvllK{)T1f96dPQjU6CqILH+- z&@ZJ^eYlj-K!fpd?}tN8IxiTo)F;%@0zQ0{UlU!R*vC(uk%aB}2D>Oa;Eh1;$NXJZ zdkn0qt?r?*Ay^SGvn}taF+q%F?v{io8VLUpa4TW$b30~!w_S692AyX$a~3ju_Bm`j zQ>fQfnL*jQhsdjy5GE7D7a6TZU9Iz9dQIwHS(scGTyiTfn346C}OYfJ|VI`Eb7fwO0?UB-DCi^5RS(G|e6 z*c!j-rYorhKaWxM1UIN~fmh9RVrF_-9-$^G zDvpF5^pvawsf2GgOi-IHc)wAr4V$?DV2LFi9k%L&r*r2)T_j+gG?TO z`E`b6?3#J%7PHnEDhOX$;8=#CisHfoSCY~=@%_Ee^s@duygp-aigbQjK~WUPU3@1o ztkHVA>F7r|4A~=9O-(OlSl$^ssh^-9k}}hTyX_DW$e`SC-)~?mmRi$V@Z9>6Jow3Z zyTcnVW`q%YkWO*DMyP30k$l^oK_F!->kSlyd#g@y%R%w))k=^YghM{XEr-(&7kBfB zm)m@m)wy87m()$*WT4|dk!%1o^er=`b?Pchg}j*Bta3X$yS4&aoV4lLS4+@>N1t() zs_FnIm~uh$kv<@LBr`PCe3i`cfADkYi}->fiHyX~WO;NF)4T z-*wj8d=lIRZ+%OKJZn;~=^c-}{gvm9A`1ArtQ&$e66bFX8FDIo9YD^^%JSa7H1f9c z4yS^cP*R4WBO14b`U@ueR{jH9B5wB>wS7k#0baCB+|oqBDh@RqCvn9!y zuqT{9e9oPI2x&$chV|(@2$Ei(O=Gfb9)vjg5h;Bf<$C3tX$RP4B?ed(;!YqMI3mTL z#adKFZd|;#Q3d0AB2VXs#l;cU*85M{g6}HbJGyvKwd9Lm^ATDyFX%Vx^W3#upCDWW zr&m-TldsUyVST=bXAiI~0>i~)>a3vE(=>~T$X6C<30Y4*3P%MV^{Pcr66U@V-Z$Ry z-SHs8WY)OGB1muZ)og1$;nz&eeBNC583i?Eoqi1#y&kNqFs_qNZuYw|m;p4by-)lL zEFTS_bO-a~s&4|C4C=~A>*pWl7<0?ytB*q<#bL&WiL?L#M`V?sgsPxdZ=#1V+Yn;1 z=|7ROA_zkhEbNNPE^*kG0f z{i$&|NvZsuftl=$9-FK>|AD(voGh>Y#>e;>9Y1fy%}JyAiHkb{_J)o=1_zSkj&5v# zI-Y+*QclkO$dCwIU37%NyY9&)s=XD`E~*ErA!)u&7JlDp&Y5D2kr5r26&-Hq`?@(P z9e~2NguJx0j?s-e^?v#3n*8mdfsR>h?N12}BdLDsNKSMxRd}aAz>kh0ZJF5M>cy-< zQH}>NHxb~RA&~)xFj+xwn&dmaD{-2*Kv@}QTHj862iL~XM76JLulqZs5(N1fAGAXQ z<#3TAb3cpJO#4FJw3TCR@K4H1VsuqUl*37Zu*vS_KQ1a-8FTYl+Q6Ou`TH*M^`w~C9>l4&_l0C$Sk=`p)K%i>)z&fJbs2F*^7knkAE7la%mZ;+ z06P7H^|+EBTc3X!4}ix%XhzQ}4JO@WQV-r>w(6gknVRg-Z8;_@2MV6wfs&tXm`}?+ zU1s0dFxleyv6}tQvq^s?RQou1tLe#^pSUk&fL7Ds0_0aTagfKDo(=eKF{abA40AI@ z>Xz0`F{GYugBIvZe}Grws~x2oI9uB>y!^*20xn6sO!DNpD3SGE1D54rDjw72`P`+6_nj3aIs zHm#!HMGg&gS7iSM;m0kh@do1(-DtL4JCoDcQ%s>oGIM~mS~v+M zbM$L#%9T7Mrac#FrQ-M!AWY`0j66I_Isp=Ju0gi{eb^6)f`r%mUp0QufBx4RCmT0E zlf(ZB0}-WwsVa3M=%=Y!?u%}+29mTPPBLXYl{bnzNIZC`K$jVa)R(Z}b6vvV1;w^8 zq6=wvd?S5HmcXp=a_$^ZAb1Iku{Skdm*6EppG@c6kByk+bFKSZD@H{!zK7p5Pcn$w zAOo;C&mL)yoClCgG8JpD0lZ!i7oOJ`J3Z3w8YOuJWNq_VSi?HUGDgs{4<-~0K~Tq> z@M}XKoyRT?>iuNGBddts|Kcop1E5&vDb70h-A5(heSxO6+>uIU?Adwj@*wn!Ag;it zWUUmar%D_@Z~x>Y-Zh}un>Xl$VKX^wGHmj8@&rX|&$&yHSceb~c|MrR(%gtb^x(AW zYq68k1!Kp4T0bM{o8k&1!x5P5Tuby!uxw-%qqnp=HJwxlyjD~zpPB=_w(x8Sxn%j8 z={>0tZGxO9-?`bo44$n!J$5XS6jZVE^;`1v{NY0P1;9*C{nW-}(4egaQ_4CcbmWVr Z+ET1skwe?;fmb3xhYwip&op&ub2-4ffnxDyZaCbB+mzdT#N*oXqc=tq-oZsjs%u_n)mj{7UA>wWE+9|P zFvAo5eiNEIAj`SYkf(G0lb@2=(tcQ9+IS^QHL@LZjy5h}i810>41Yq9x3&Kq zv=LF&6S``Ql*$pXx6Wjvh7@uZJ*A8U!T7GooC<|eyI z$zlvT8S4Q8AtU%lmdAe_ZLAas7VA~o?}6aD(+(9q5iJ~wEQB)m^w-&LyJK5oI!Ezwb@1049X*Zs# zDkOMm<%!zVsUE(pr5{Ek&YKI)cJ=!$3-mQgT}9s0!Y#pLnYu*SSWPaLJxCqxm1YL# z-%BD7oN)nxqSh#hULfE5=@~E{%MZ08+3L}+Lf;{EBmpeXG@VBTnp)@r@fNeh!YvQi z{z)BbH?8+GjC@_EF8+K@@|>SGxsCv${^+?d)g_uVk+NtdSe!BO)~7mSB&-S?S`y^q z{p7}}g%Nk4ws@?48DD>59hg=b%%(=@4torl=5Gdph|aQITW^^e<%-){cy&IQ%L{dU zxqCh3!x_k+8Kl6m^iA!?b5Tj&UPL?m%~5P{nJuMkWDBf(OlT(oujL&?Fsa4Z@D?D5 zlRURE@FUm3O?I>cL9ehAl?Yqh%hw}=V#cV4x#LCpYugP5;PVQ$KH)?)*3_|08WVzZ#`}GWPxhoJH{8M_2{v~Ti4Kg5u zN*i4~T^Jq-vY5GL{K|^r5;@j@<@;yu=ed!M<$*nSZds)U?NUw2cw2fBcr1!~a$$iw zA2V8qh>CZ(8mxs8WWzq@<>rDE9Kk!5^X7blO$@=*+!pR4&XFT;4tlNcdjFb0?p8kZ ze!3Z}dPOu+Q9jb;;~l$l*~13z=pEMnqy%FnwyMHd>AwaP!D9^_#bsq>NL_g@_dF&% z3D(=BDD{~bv$$8d>rUrlX9TR@Z!mo?Z>Q)H&%?`FwDyUbK|+8RRun0ToEVxIh?jE7 z=ft883=Dw}e6mvswW^aAlgNq~BHP6Cg~E_xr3-<>mbYK^4e_th^; za&9LNXwkWZ$4stQj6m*@z3&5QM%q|VH=!S6l_kQ|xe8LuzCgI0tOWPoQy%s00$o3&LQ6dl+bBUa;R;&np?av%` zZtoG$L_GRA#XCA&d=F_w@`khewfI+`1r%?uogOp~33g{t#$J|X^H}GLm-C;OeCD~4 z_VljoNX_UYE^8PnA&y>(!C+|p0k8AE*#jgj@O zO(RUrh8B1~qWn&5X-vdbQO}vqwL`pJ)QGORwNt^pKg05BNCpsAuC_#&)@nxLYjP;+ zUUL72SoH9`8ujy2&kL_;0-2{fJJ~=b(-yy6G{-I)p=nY4vUoJ1dtLX-JDTpU*UoR5 z`k^B~i2IwuX-5E^3x@fm{h)pSCFE)R$+kozFjj!`tgvmo=KVtdhi(jScqH zj&1*AOyy54@A}VmkE<`DiQxEf6_i$9SH+tFC{uWR{^<5J#KD*D1AsXPMx&pq47t6I z-(px@zD)Lm%O}f(L4)>$`uut3ALAg%^|BB+1l8@OOnpB6V3LL1jQ!JTl{Bb)O7kzK zl)XE@;)8vW)|U`3zh$RXW;8REY^c^p^-TuYbmqsEUW+K&N+ZD!IB3mv$SZ)4k56#G zu{6%sS!e7aPW1U<=4?&{T(|DmsI4uE2lo(vnfZ7cHnyCS2LL8&D@A*DgQgeQ6u>EX zR|ntsxn>D9umH&|V5snX3y4T=z}xse=a=^F;l7_YV87UU0n!t&c?%+1;~JO$-d2Ne ziK=@UXnNe4IK;r+`p%PSuO^>VLV0(VBroD0oHF{RRuz2T(k;&)$^0C#+Q}%j?4}+g z{{q){*UX}AeHVe(+E;Zdd+~ba-2bgKVcf?@l11w2rm-Ao@6bnc_}jwD1K9v(jqWZ! zX^GN@6W!-DFr$&NZzp}0!dL~}pC0$3iDvxax^I?VOt^U6DdDo752d%qJ*x6lw1=*E zzX;{qCL*cxQp=Rts*k23rDSKst=;OEsP6^sVm3#W8aoFDt+&+hH{KNPa z1ah=`7i0X;M<~AbJz_I@Z#n*8-iIy6@B>7156XYyIGg$Jd#)5KrPZG&3^BQ2g$+D2 z8ceZdMc%E|7)?pwb+roY+Ry)P<3UN)|8A~1&MPJ>XTe-f3B;VDtrf~&7Y+GA6!uab zH?}YXtQ)v7wQL>diPQOf+vzNr-YMdC07OPk!rV$o%c^L|BH5BP z)`37PYbjQxI!XIxa@>T1OGwCJE$26)>Op2*s@|3AD1#ZCV8>0p7_EG;8W>dR%O|>w z3sGN^wfGehvzDj|e=vhr(-R^_+eMXgm7nO10}H% z8xxVXuf)BtgS=Gc*CyE5o=A**>u%zlGw~{@_>g$fIk3*JGqdyktE^%=FR(j~1heMR z(&Et_P!&L(Q-S%kFZ3pCbY{e!5>i~@)2;C0Zm*?c8 zZmHdDYrj#FU4*&TJ76#}A}$gbdBipNV~*liY5QUx-5@p4Owfbv%aMu@L^o zl)UpPy*{y1lke_8#QJ%iE@uKO6HTw|Q$I|vKaD5t7Uq(95-Fdbi6ISzb(X91ceHg; z#!)qz)Fe)x7me01BtT>8Xr*&z5c^dklCak<-#|S@?|evA1rKt?I35AcrDON@lg3T5 zKi*#T%y0W!Q;_?PZoRg`JI-iM<3dBHr}a7w+Q#)eH}ua5w*)^|Gw({x19OCqY-o7o zvHe&~oD23@Kd(SRwjrM~hMmhvCxU3EsPQ)fqEecOKxiPis--|E+6GQXh;*8{Buw8>#T`ksqi@1z=p>>;!$7DPuVbIQVDFqL}n5S?V`s}n{5r$0K}tg>;u~c_go%o_89o% zW6#f{d5O-C=&d9xKXSs$u8l>pfrC^@EBDCV0ikdtq+Z-B70wq?==wSu{11oE^qM?l z3Tq@jFagv%e#0rbLjRW#U<&yWYMT+`zdkI$g|*$cVXFr9JeqLCZfN@yt_+sfMKfK0@-uV9#yqqSPV2kZz}Pb|Bj? zt-^PG`B3@N`{L3MAR9mG$lyjkp&Z?m5`>^rj@#XBOEE2P$UxflCdUl=P|e{=qSG|@ zzM;~M7XZ|W98aP(v7S|sN?g*0Q0OL>4VOrz&EX~@6>Bwq;tPZM9+l*t4=xCrlBjYv zCizIJ{mKm~{k(iIM%D)mcwnxR9wbZkOL)UISqLAL>A0Y1)d66fgz0hct5~ufP0md= zk&i@t>3GO%cp0MDvF+?-)fs|1sJygb6xcaa(s(*qe3CQ*da?+}kaP-@RF4iEF>S{x z&cwq^BV>pf4i(c+m^1O ziVN;C_#Tbx+R#W{14@(yoJAKj635{&Yi3arJ_X6eMslFegBtus0rA(FB?DZ}+uM7* z9*HFd&U>zoI+<*(?N68qOM#HEdFJ-H5{(Z>BX*{qKcgq=n9wS3pvb6Nf0Qo7I#}pW@1L2!^FR7{#2{BpaWoRcTc>M_X$KMP*l-!`@3sG+w#&5@W?S#qE;`69_i6nO4^ z@%o2?$fi1J^1Q)wsZyI$)<+uUMz;d;$V*RKt;5ITa zNxzZ}G;{K}<2mE~o{?o}5=pJ|#-^VcrjfcSc_?b;OksnYb#(}ERVgvOVuL5v0rN{d zF<1Q{k5130yGRtxa}O(J?tP8D?dA+F^lp&Y6<^Mr`x`R3CU<;ak90`jKBkc_=|sYY z`{+$j>?3?#iWd=qzSTjZIrwn|Z(J~eR-`xUS6<$0;G&jNb~Cyx$J7axcA~=B6%m<} zFZ^7z@n?ol#ejhS@Gl)?@&18gXgbbqQvx0-!XH~Ogm0xk!z_J6r;42=z@(#j+=U;q?{a5~LleRq5C{o_y7`N_ih$IH?_$A!0z>95?!h9;`Nl~}e^ zoAXsZ06YOP%JCWx&F!n!e-f0RbbyA9|H;fjUl|B)%rXFvlW0i^u?eBimlP2L86rni zL`4~FCO6JI61gXH@J+``*%6|{DL)62KSqf0wXc_^htnT#4M{%1r*)8yJF6wC^MMK1 zMoaWNBxA$0O(xJ&{tEJ%{55P}ULQ?{sDtAD?ukT>GEiXqOZX#UD9mh#cOE{oQHkns zKP#E^#;|VvPmPAL?V$^pIg;4?B{80jR(!-9^RXqdi)m#-Y)DLD>efPYzX&UiK{2e+ zk<@3#Rbr=lhd&vWP;kczjbs&GfN-_LtmcWkL(sk~fepqKH>8P}-cCZ=LZA@V@w^s@ zPJusH0oy5Z{)@@x6hpilm^z;;3G1aIWR3P%8~CB0Egp6wn8|9*d#eV!M^4o^-2=2U zxHXJEyPYJQw+m{0du;aL9?MSU6M%uw_{A<*moG?%+1CQQ8`=g>r@@Y{Vu-jh!D&Po zA1T!@09~_P%rEXBnU87pDQdXcv+GMC<}>b}fu=zknT8l&zsX{=GciRvD5`3q>&IeB zkjFo-jg>6k{G_Z2u^B-9vx2lOKjEl``?~yP{XoR-f!iP5RiE!mBw~39!Lcz;@6=H( z%)epxzlD8dx*q@eT}R;Ul$0r|>v#!SHA&3fDGGo_2RbuLKO8Rs6-?FxELk@x;OWh9 zE$WQ4XST@{Fr&Oz#=2wF92b8}94G0>ohVSPZAcX^ICk5PZG`t_|&=gObuaQBa{@u3MIf4NB<>pl#!ZW zcgTY2Zv*_#H+@RDk(0Y6t1$~UB2-14Vb1HOGHlxD&bP93xf!F3oF|j<;C~C*R*C{B z>X+k0_+OQ$!u)&zq|!=UzZ?X#K|TNlo~6wz{ikGXBe_Xsa8_lRCDcFU#oc+epfma^ zsLjsxoxfM&RFc@~Ukfx--2Yb~x`Ag}wCjPH|0-rg9pC`)qO1f2>)*F`($XIVi;)XW z=yO5Zt^qmY0U1{?y%+2_BdRQnW`)n{UrohOsAt#xn z)g5AE?3}bKycKh_84c-m;4oh8dANknmM8<`c>nTEns?p^vbo9+;fRZK#hFeC$xA=D z|LLJIxW0p$6nNd~iP}(Kv09=p#mYGfq=Y)D^(~5E@%tKmPNiZ=4k6nsc)T&ccE3~L zh*i!mIoK>GA}xpEp<8O+1(iYZ4}4{r+T0in(at=cpk+0mT-w3f)2GE()MZSI1b-8e z;ts8$g(Q~76wD1YRvT!3Y?zF1x~3$O$fUj>0FsV9RQs$Y9$25J<)?shJFwFF-oChU z-2Mza`F*I}3oBAY+UL~BEyw!Iy2wZVVLvt}$P|@TGKCYsf&blr;ADgGa}_&3tY-Mn z`xX$Ub3@x+p&kUB$IK2s3+MlLawFueU|>H|`zfG5P3u~-g$47$i={TzsAt!tE#B;` z5mCdgkBN7A%QgPCa8d{z9oOcA+G;4{2Wk}g7AH@Aout!JlV7>u*c1cFe&|n=Q<;s# z*~60oE=h8B>Jjoy29XyPGrRjTKcsm|+B@z}k^;;9EKqu4C@0y0Wom8-5XH-=N(5lO?ko zn?LV(1d$;4lt8$ilIZLZ%t{PwUl$`BvCR^nSZ&VRSak-&qeM`pI4rEyG=+ogznsyX zETXsYl_F`wNBcUePt!PbX)~SZ$qCWa;pa)E+X_VueSJPsyJS9}f0i~U>Emg(wCsI+ z_JBYjDB_!R%h@ga3IVw?$?G)KX3bnL5A|d>$V9RTwbeC7D2^+#4!wHnu9t2(^ry2l z>5CRB0;oeStOcxRslKEbW4D zueirR;0&&83MzDLB>S+-2T;Ew39z}N+|!OIy%w!MF3DY=$0SohV9(jD-@X>?OrAG(AdDU9%m=8>exq}q^Xuzt&$vI#ZG+mRT#Z5 zue5?At#HL}7jnzex{lnHTn<0nX1bUo2$|@qVJP(vKHe%cX$c?;#cy^+?jV9?GV!F) zf?-|+`ZI>iczRwe=0n&wD~yRP)3#?tz|Rp+JWZt0(@qp2q(wuG_@)Et2%Su^g=iL`jSX12YyJJH||#+>pM zO64}co}9YjGB7no`@mP}^XmLce?(M<&HyQv8aXz3=#e(QQ@rPZR1qGpNRLzM+|BmI z%pdFn{K~o4~V)E!YKSmh@iY-85bE#*hPFjG?Pe6LOyYo<&!-sL(qLyg;E7mK_)?)w} z^!`mNd}pdXXj@&0V{I~D{w;dd(n#Ph`fhVHxjX3Iet}sRwiN}V>F?0SmRF84hj9#FFpk+l+F&bkF~-qh<@F&fEr}{&Jik5`s87;iHS!Hs z8pvcy1!kr*yR6z{zf!o+zszUmoFaK)UQ?{*Gn)l{!vRir9NuMKZyB9DzPj|513ukA zwNR8r;WGriLr)X#IT82Nlr$-s-7tj@|7(G(4rZ9d-`r^&cA9<{gN9vQ#3zb2i)z#q--m zSp+p=jPsIKa^fT;Y_RILm}hsEv%1LcH_o!D*(cRip@4rlz=g6ii_`e^N#Oa~drMnF zY(rI9OK;$;eWj>7H@gDRRhBXSE9}^OyYXe+c3VolCJgRzd#7NKI;mWb10>j^%IcTAY3E+fE`!o}YRaHsSb{IvtT(g||H$L`osQQvkvFS+zyYSqsP*;#j) z)Mb2^!c^4l?x14W%b_4AltTIP#A~J;PB*=oZ7NqN38|L&H1SNzd}t481Q&9|o_|=F zVy!>!qNTO32b&IvWoV7pM{m07bcdJ_7V*u;Hi=c(+PG4;0oUgBgLwKwzolncX1#_B zP@>XQm48_Og|YSPw)r5qdYldRceo?P<{Hv~il1Z4;`^$)#fP0IPxcj8eNSYWK#WkBcRK1t60F4EK$i36`++BUxS z?O_>L{J}7jF*wkSZMrxI)Vaf)af#=DcPd+^Z!t^}fAe&u$*V5^g9b)+Iz$I_z1cf5 z09TS-y8H3%8RaFDclTyRxY=uKuhT9md`~cwn1-pZmK=`>r#<1e^+D)qUFS=4Ao8TJu^=bNeke0nm8^V+ zN!Zrmm+VSt^C??bLFyBK*w!Ue9=>lY4(oeO_nJr-w({Ezm96UQPr;hf0&{`6)ip~yMy>WJ+ZCz;+r2R@gD9f zCNi+T13L3W?MtM~F5KY~Hp$Kag>PI=~fvb$9BmTXEWpNKbokzMs%9f=z(Qtt0f`pPN60l?``D`eQKx`)IPapC0J( zn!ji{-3`&Q%BhkG9Z6UFwUg1xvTeqq$(tSZKvC$?y!AUyd&i@XU-DjvN?hG_sFhk8 z*(;e`O>Z5>MsD@gbm7E_bY overwrite oldest data, move read pointer + assign rskip = wen && !ren && data_count >= MAX_DATA; + // read while empty => read invalid data, keep write pointer in sync + assign wskip = ren && !wen && data_count == 0; + `endif // NOSKIP + +The last few lines of output for the noskip task should be similar to the +following: + +.. code-block:: text + + SBY [fifo_noskip] engine_0: ## 0:00:00 BMC failed! + SBY [fifo_noskip] engine_0: ## 0:00:00 Assert failed in fifo: a_count_diff + SBY [fifo_noskip] engine_0: ## 0:00:00 Writing trace to VCD file: engine_0/trace.vcd + SBY [fifo_noskip] engine_0: ## 0:00:00 Writing trace to Verilog testbench: engine_0/trace_tb.v + SBY [fifo_noskip] engine_0: ## 0:00:00 Writing trace to constraints file: engine_0/trace.smtc + SBY [fifo_noskip] engine_0: ## 0:00:00 Status: FAILED + SBY [fifo_noskip] engine_0: finished (returncode=1) + SBY [fifo_noskip] summary: Elapsed clock time [H:MM:SS (secs)]: 0:00:01 (1) + SBY [fifo_noskip] summary: Elapsed process time unvailable on Windows + SBY [fifo_noskip] summary: engine_0 (abc pdr) returned FAIL + SBY [fifo_noskip] summary: counterexample trace: fifo_noskip/engine_0/trace.vcd + SBY [fifo_noskip] DONE (FAIL, rc=2) + SBY The following tasks failed: ['noskip'] + +Using the ``noskip.gtkw`` file provided, use the below command to examine the +error trace. + + gtkwave fifo_noskip/engine_0/trace.vcd noskip.gtkw + +This should result in something similar to the below image. We can immediately +see that ``data_count`` and ``addr_diff`` are different. Looking a bit deeper +we can see that in order to reach this state the read enable signal was high in +the first clock cycle while write enable is low. This leads to an underfill +where a value is read while the buffer is empty and the read address increments +to a higher value than the write address. + +.. image:: media/gtkwave_noskip.png + +During correct operation, the ``w_underfill`` witness will cover the underflow +case. Examining ``fifo_cover_oss/logfile.txt`` will reveal which trace file +includes the witness we are looking for. If this file doesn't exist, run the +code below. + + sby fifo.sby fifo_cover_oss + +Searching the file for ``w_underfill`` will reveal the below. + +.. code-block:: text + + $ grep "w_underfill" fifo_cover_oss/logfile.txt -A 1 + SBY [fifo_cover_oss] engine_0: ## 0:00:00 Reached cover statement at w_underfill in step 2. + SBY [fifo_cover_oss] engine_0: ## 0:00:00 Writing trace to VCD file: engine_0/trace2.vcd + +We can then run gtkwave with the trace file indicated to see the correct +operation as in the image below. When the buffer is empty, a read with no write +will result in the ``wksip`` signal going high, incrementing *both* read and +write addresses and avoiding underflow. + + gtkwave fifo_cover_oss/engine_0/trace2.vcd noskip.gtkw + +.. image:: media/gtkwave_coverskip.png + +For more on using the .sby file, see the :ref:`.sby reference page `. + Concurrent assertions ********************* From 4ef02d2c5cf5ea68adee915ab5d2fab261c2dc9c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 8 Jun 2022 12:08:34 +0200 Subject: [PATCH 075/220] Regression test for smtbmc --unroll --noincr --- tests/regression/unroll_noincr_traces.sby | 29 +++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 tests/regression/unroll_noincr_traces.sby diff --git a/tests/regression/unroll_noincr_traces.sby b/tests/regression/unroll_noincr_traces.sby new file mode 100644 index 00000000..e93d18fc --- /dev/null +++ b/tests/regression/unroll_noincr_traces.sby @@ -0,0 +1,29 @@ +[tasks] +boolector +yices +z3 + +[options] +mode bmc +expect fail + +[engines] +boolector: smtbmc boolector -- --noincr +yices: smtbmc --unroll yices -- --noincr +z3: smtbmc --unroll z3 -- --noincr + + +[script] +read -formal top.sv +prep -top top + +[file top.sv] +module top(input clk); + reg [7:0] counter = 0; + wire derived = counter * 7; + + always @(posedge clk) begin + counter <= counter + 1; + assert (counter < 4); + end +endmodule From 499371fd39075915fe1a4ac83164e4e426c40d78 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 13 Jun 2022 13:20:33 +0200 Subject: [PATCH 076/220] Use the test Makefile for all examples * Rename and move sbysrc/demo[123].sby to docs/examples/demos * Make them use multiple tasks for multiple engines * Scan docs/examples for sby files for make test * `make ci` is now `NOSKIP` by default * Skip scripts using `verific` w/o yosys verific support * This does not fail even with NOSKIP set --- .github/workflows/ci.yml | 2 +- Makefile | 67 ++----------------- docs/examples/Makefile | 3 + docs/examples/abstract/Makefile | 3 + docs/examples/demos/Makefile | 3 + .../examples/demos/memory.sby | 0 docs/examples/demos/picorv32_axicheck.sby | 25 +++++++ .../examples/demos/up_down_counter.sby | 9 ++- docs/examples/indinv/Makefile | 3 + docs/examples/multiclk/Makefile | 3 + docs/examples/puzzles/Makefile | 3 + docs/examples/quickstart/Makefile | 3 + sbysrc/demo1.sby | 21 ------ sbysrc/sby.py | 1 + tests/make/collect_tests.py | 1 + tests/make/required_tools.py | 9 +++ tests/make/subdir.mk | 10 +-- tests/make/test_rules.py | 3 + 18 files changed, 77 insertions(+), 92 deletions(-) create mode 100644 docs/examples/Makefile create mode 100644 docs/examples/abstract/Makefile create mode 100644 docs/examples/demos/Makefile rename sbysrc/demo3.sby => docs/examples/demos/memory.sby (100%) create mode 100644 docs/examples/demos/picorv32_axicheck.sby rename sbysrc/demo2.sby => docs/examples/demos/up_down_counter.sby (85%) create mode 100644 docs/examples/indinv/Makefile create mode 100644 docs/examples/multiclk/Makefile create mode 100644 docs/examples/puzzles/Makefile create mode 100644 docs/examples/quickstart/Makefile delete mode 100644 sbysrc/demo1.sby diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 67abe1f6..ea48d06b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -9,4 +9,4 @@ jobs: - uses: actions/checkout@v2 - uses: YosysHQ/setup-oss-cad-suite@v1 - name: Run checks - run: tabbypip install xmlschema && make ci NOSKIP=1 + run: tabbypip install xmlschema && make ci diff --git a/Makefile b/Makefile index 07f85a41..7a86f6be 100644 --- a/Makefile +++ b/Makefile @@ -21,10 +21,10 @@ help: @echo " build documentation in docs/build/html/" @echo "" @echo "make test" - @echo " run tests" + @echo " run tests, skipping tests with missing dependencies" @echo "" @echo "make ci" - @echo " run tests and check examples" + @echo " run all tests, failing tests with missing dependencies" @echo " note: this requires a full Tabby CAD Suite or OSS CAD Suite install" @echo "" @echo "make clean" @@ -50,15 +50,8 @@ endif ci: check_cad_suite @$(MAKE) run_ci -run_ci: \ - test_demo1 test_demo2 test_demo3 \ - test_abstract_abstr test_abstract_props \ - test_demos_fib_cover test_demos_fib_prove test_demos_fib_live \ - test_multiclk_dpmem \ - test_puzzles_djb2hash test_puzzles_pour853to4 test_puzzles_wolfgoatcabbage \ - test_puzzles_primegen_primegen test_puzzles_primegen_primes_pass test_puzzles_primegen_primes_fail \ - test_quickstart_demo test_quickstart_cover test_quickstart_prove test_quickstart_memory \ - test +run_ci: + $(MAKE) test NOSKIP=1 if yosys -qp 'read -verific' 2> /dev/null; then set -x; \ YOSYS_NOVERIFIC=1 $(MAKE) run_ci; \ fi @@ -79,58 +72,6 @@ test_demo2: test_demo3: cd sbysrc && python3 sby.py -f demo3.sby -test_abstract_abstr: - @if yosys -qp 'read -verific' 2> /dev/null; then set -x; \ - cd docs/examples/abstract && python3 ../../../sbysrc/sby.py -f abstr.sby; \ - else echo "skipping $@"; fi - -test_abstract_props: - if yosys -qp 'read -verific' 2> /dev/null; then set -x; \ - cd docs/examples/abstract && python3 ../../../sbysrc/sby.py -f props.sby; \ - else echo "skipping $@"; fi - -test_demos_fib_cover: - cd docs/examples/demos && python3 ../../../sbysrc/sby.py -f fib.sby cover - -test_demos_fib_prove: - cd docs/examples/demos && python3 ../../../sbysrc/sby.py -f fib.sby prove - -test_demos_fib_live: - cd docs/examples/demos && python3 ../../../sbysrc/sby.py -f fib.sby live - -test_multiclk_dpmem: - cd docs/examples/multiclk && python3 ../../../sbysrc/sby.py -f dpmem.sby - -test_puzzles_djb2hash: - cd docs/examples/puzzles && python3 ../../../sbysrc/sby.py -f djb2hash.sby - -test_puzzles_pour853to4: - cd docs/examples/puzzles && python3 ../../../sbysrc/sby.py -f pour_853_to_4.sby - -test_puzzles_wolfgoatcabbage: - cd docs/examples/puzzles && python3 ../../../sbysrc/sby.py -f wolf_goat_cabbage.sby - -test_puzzles_primegen_primegen: - cd docs/examples/puzzles && python3 ../../../sbysrc/sby.py -f primegen.sby primegen - -test_puzzles_primegen_primes_pass: - cd docs/examples/puzzles && python3 ../../../sbysrc/sby.py -f primegen.sby primes_pass - -test_puzzles_primegen_primes_fail: - cd docs/examples/puzzles && python3 ../../../sbysrc/sby.py -f primegen.sby primes_fail - -test_quickstart_demo: - cd docs/examples/quickstart && python3 ../../../sbysrc/sby.py -f demo.sby - -test_quickstart_cover: - cd docs/examples/quickstart && python3 ../../../sbysrc/sby.py -f cover.sby - -test_quickstart_prove: - cd docs/examples/quickstart && python3 ../../../sbysrc/sby.py -f prove.sby - -test_quickstart_memory: - cd docs/examples/quickstart && python3 ../../../sbysrc/sby.py -f memory.sby - test: $(MAKE) -C tests test diff --git a/docs/examples/Makefile b/docs/examples/Makefile new file mode 100644 index 00000000..5cffc833 --- /dev/null +++ b/docs/examples/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples +TESTDIR=../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/docs/examples/abstract/Makefile b/docs/examples/abstract/Makefile new file mode 100644 index 00000000..d456aab6 --- /dev/null +++ b/docs/examples/abstract/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/abstract +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/docs/examples/demos/Makefile b/docs/examples/demos/Makefile new file mode 100644 index 00000000..ecd71ac8 --- /dev/null +++ b/docs/examples/demos/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/demos +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/sbysrc/demo3.sby b/docs/examples/demos/memory.sby similarity index 100% rename from sbysrc/demo3.sby rename to docs/examples/demos/memory.sby diff --git a/docs/examples/demos/picorv32_axicheck.sby b/docs/examples/demos/picorv32_axicheck.sby new file mode 100644 index 00000000..61b471a0 --- /dev/null +++ b/docs/examples/demos/picorv32_axicheck.sby @@ -0,0 +1,25 @@ +[tasks] +yices +boolector +z3 +abc + +[options] +mode bmc +depth 10 + +[engines] +yices: smtbmc yices +boolector: smtbmc boolector -ack +z3: smtbmc --nomem z3 +abc: abc bmc3 + +[script] +read_verilog -formal -norestrict -assume-asserts picorv32.v +read_verilog -formal axicheck.v +prep -top testbench + +[files] +picorv32.v ../../../extern/picorv32.v +axicheck.v ../../../extern/axicheck.v + diff --git a/sbysrc/demo2.sby b/docs/examples/demos/up_down_counter.sby similarity index 85% rename from sbysrc/demo2.sby rename to docs/examples/demos/up_down_counter.sby index 1d5639c0..cb922eb3 100644 --- a/sbysrc/demo2.sby +++ b/docs/examples/demos/up_down_counter.sby @@ -1,10 +1,13 @@ +[tasks] +suprove +avy + [options] mode prove -wait on [engines] -aiger suprove -aiger avy +suprove: aiger suprove +avy: aiger avy [script] read_verilog -formal demo.v diff --git a/docs/examples/indinv/Makefile b/docs/examples/indinv/Makefile new file mode 100644 index 00000000..c3bf7ac0 --- /dev/null +++ b/docs/examples/indinv/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/indinv +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/docs/examples/multiclk/Makefile b/docs/examples/multiclk/Makefile new file mode 100644 index 00000000..b6c5eb73 --- /dev/null +++ b/docs/examples/multiclk/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/multiclk +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/docs/examples/puzzles/Makefile b/docs/examples/puzzles/Makefile new file mode 100644 index 00000000..45293b1b --- /dev/null +++ b/docs/examples/puzzles/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/puzzles +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/docs/examples/quickstart/Makefile b/docs/examples/quickstart/Makefile new file mode 100644 index 00000000..be061940 --- /dev/null +++ b/docs/examples/quickstart/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/quickstart +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk diff --git a/sbysrc/demo1.sby b/sbysrc/demo1.sby deleted file mode 100644 index 6c89f183..00000000 --- a/sbysrc/demo1.sby +++ /dev/null @@ -1,21 +0,0 @@ - -[options] -mode bmc -depth 10 -wait on - -[engines] -smtbmc yices -smtbmc boolector -ack -smtbmc --nomem z3 -abc bmc3 - -[script] -read_verilog -formal -norestrict -assume-asserts picorv32.v -read_verilog -formal axicheck.v -prep -top testbench - -[files] -picorv32.v ../extern/picorv32.v -axicheck.v ../extern/axicheck.v - diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 19a962b4..d9e0a5c9 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -381,6 +381,7 @@ def find_files(taskname): taskinfo[taskname or ""] = { "mode": cfg.options.get("mode"), "engines": cfg.engines, + "script": cfg.script, } print(json.dumps(taskinfo, indent=2)) sys.exit(0) diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py index cf782b99..89a68eca 100644 --- a/tests/make/collect_tests.py +++ b/tests/make/collect_tests.py @@ -26,6 +26,7 @@ def collect(path): collect(Path(".")) +collect(Path("../docs/examples")) out_file = Path("make/rules/collect.mk") out_file.parent.mkdir(exist_ok=True) diff --git a/tests/make/required_tools.py b/tests/make/required_tools.py index 67b5d2b2..203ccd76 100644 --- a/tests/make/required_tools.py +++ b/tests/make/required_tools.py @@ -36,6 +36,15 @@ def found_tools(): with open("make/rules/found_tools") as found_tools_file: found_tools = set(tool.strip() for tool in found_tools_file.readlines()) + if 'verific' in required_tools: + result = subprocess.run(["yosys", "-qp", "read -verific"], capture_output=True) + if result.returncode: + print() + print(f"SKIPPING {target}: requires yosys with verific support") + print() + exit() + required_tools.remove('verific') + missing_tools = sorted( f"`{tool}`" for tool in required_tools if tool not in found_tools ) diff --git a/tests/make/subdir.mk b/tests/make/subdir.mk index b1f367a1..86b680f9 100644 --- a/tests/make/subdir.mk +++ b/tests/make/subdir.mk @@ -1,15 +1,17 @@ +TESTDIR ?= .. + test: - @$(MAKE) -C .. $(SUBDIR)/$@ + @$(MAKE) -C $(TESTDIR) $(SUBDIR)/$@ .PHONY: test refresh IMPLICIT_PHONY IMPLICIT_PHONY: refresh: - @$(MAKE) -C .. refresh + @$(MAKE) -C $(TESTDIR) refresh help: - @$(MAKE) -C .. help + @$(MAKE) -C $(TESTDIR) help %: IMPLICIT_PHONY - @$(MAKE) -C .. $(SUBDIR)/$@ + @$(MAKE) -C $(TESTDIR) $(SUBDIR)/$@ diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 04d2226d..5c18acad 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -56,6 +56,9 @@ def parse_engine(engine): solvers.add(solver) engine_solvers.add((engine, solver)) + if any(line.startswith("read -verific") or line.startswith("verific") for line in info["script"]): + required_tools.add("verific") + required_tools = sorted(required_tools) print(f".PHONY: {target}", file=rules) From b42b6445b8641700e9538aa6df8c5c5297328698 Mon Sep 17 00:00:00 2001 From: Matt Venn Date: Mon, 13 Jun 2022 13:51:04 +0200 Subject: [PATCH 077/220] tristate example --- docs/examples/tristate/README.md | 13 +++++++++++++ docs/examples/tristate/tristate.sby | 19 +++++++++++++++++++ docs/examples/tristate/tristates.v | 18 ++++++++++++++++++ 3 files changed, 50 insertions(+) create mode 100644 docs/examples/tristate/README.md create mode 100644 docs/examples/tristate/tristate.sby create mode 100644 docs/examples/tristate/tristates.v diff --git a/docs/examples/tristate/README.md b/docs/examples/tristate/README.md new file mode 100644 index 00000000..155fab2d --- /dev/null +++ b/docs/examples/tristate/README.md @@ -0,0 +1,13 @@ +# Tristate demo + +Run + + sby -f tristate.sby pass + +to run the pass task. This uses the top module that exclusively enables each of the submodules. + +Run + + sby -f tristate.sby fail + +to run the fail task. This uses the top module that allows submodule to independently enable its tristate outputs. diff --git a/docs/examples/tristate/tristate.sby b/docs/examples/tristate/tristate.sby new file mode 100644 index 00000000..79707742 --- /dev/null +++ b/docs/examples/tristate/tristate.sby @@ -0,0 +1,19 @@ +[tasks] +pass +fail + +[options] +mode prove +depth 5 + +[engines] +smtbmc + +[script] +read -sv tristates.v +pass: prep -top top_pass +fail: prep -top top_fail +flatten; tribuf -formal + +[files] +tristates.v diff --git a/docs/examples/tristate/tristates.v b/docs/examples/tristate/tristates.v new file mode 100644 index 00000000..a6be03e3 --- /dev/null +++ b/docs/examples/tristate/tristates.v @@ -0,0 +1,18 @@ +`default_nettype none +module module1 (input wire active, output wire tri_out); + assign tri_out = active ? 1'b0 : 1'bz; +endmodule + +module module2 (input wire active, output wire tri_out); + assign tri_out = active ? 1'b0 : 1'bz; +endmodule + +module top_pass (input wire clk, input wire active1, input wire active2, output wire out); + module1 module1 (.active(active1), .tri_out(out)); + module2 module2 (.active(!active1), .tri_out(out)); +endmodule + +module top_fail (input wire clk, input wire active1, input wire active2, output wire out); + module1 module1 (.active(active1), .tri_out(out)); + module2 module2 (.active(active2), .tri_out(out)); +endmodule From 7efabe828abdd1cf447cbd39cde0c4b16200a93a Mon Sep 17 00:00:00 2001 From: Matt Venn Date: Mon, 13 Jun 2022 13:59:12 +0200 Subject: [PATCH 078/220] expect fail --- docs/examples/tristate/tristate.sby | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/examples/tristate/tristate.sby b/docs/examples/tristate/tristate.sby index 79707742..f85e937c 100644 --- a/docs/examples/tristate/tristate.sby +++ b/docs/examples/tristate/tristate.sby @@ -3,6 +3,7 @@ pass fail [options] +fail: expect fail mode prove depth 5 From 687ee0f011289b4f7eced050ca216720ec3ddc0c Mon Sep 17 00:00:00 2001 From: Matt Venn Date: Mon, 13 Jun 2022 14:00:08 +0200 Subject: [PATCH 079/220] remove unused module port --- docs/examples/tristate/tristates.v | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/examples/tristate/tristates.v b/docs/examples/tristate/tristates.v index a6be03e3..a41ffc22 100644 --- a/docs/examples/tristate/tristates.v +++ b/docs/examples/tristate/tristates.v @@ -7,7 +7,7 @@ module module2 (input wire active, output wire tri_out); assign tri_out = active ? 1'b0 : 1'bz; endmodule -module top_pass (input wire clk, input wire active1, input wire active2, output wire out); +module top_pass (input wire clk, input wire active1, output wire out); module1 module1 (.active(active1), .tri_out(out)); module2 module2 (.active(!active1), .tri_out(out)); endmodule From b88d7a13fb6b62ba77cfb90c3e0c34a79e3d4245 Mon Sep 17 00:00:00 2001 From: Matt Venn Date: Tue, 14 Jun 2022 15:35:22 +0200 Subject: [PATCH 080/220] add makefile for test --- docs/examples/tristate/Makefile | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 docs/examples/tristate/Makefile diff --git a/docs/examples/tristate/Makefile b/docs/examples/tristate/Makefile new file mode 100644 index 00000000..11735663 --- /dev/null +++ b/docs/examples/tristate/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/tristate +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk From 05d963b0df224767c8fc0e09e9d93821218e1cfa Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 14 Jun 2022 17:15:32 +0200 Subject: [PATCH 081/220] aiger: check supported modes and aigbmc fixes --- sbysrc/sby_engine_aiger.py | 13 +++++++++++-- sbysrc/sby_mode_bmc.py | 4 ++++ tests/unsorted/bmc_len.sby | 36 ++++++++++++++++++++++++++++++++++++ 3 files changed, 51 insertions(+), 2 deletions(-) create mode 100644 tests/unsorted/bmc_len.sby diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index 46656915..2850d461 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -28,16 +28,25 @@ def run(mode, task, engine_idx, engine): for o, a in opts: task.error("Unexpected AIGER engine options.") + status_2 = "UNKNOWN" + if solver_args[0] == "suprove": + if mode not in ["live", "prove"]: + task.error("The aiger solver 'suprove' is only supported in live and prove modes.") if mode == "live" and (len(solver_args) == 1 or solver_args[1][0] != "+"): solver_args.insert(1, "+simple_liveness") solver_cmd = " ".join([task.exe_paths["suprove"]] + solver_args[1:]) elif solver_args[0] == "avy": + if mode != "prove": + task.error("The aiger solver 'avy' is only supported in prove mode.") solver_cmd = " ".join([task.exe_paths["avy"], "--cex", "-"] + solver_args[1:]) elif solver_args[0] == "aigbmc": - solver_cmd = " ".join([task.exe_paths["aigbmc"]] + solver_args[1:]) + if mode != "bmc": + task.error("The aiger solver 'aigbmc' is only supported in bmc mode.") + solver_cmd = " ".join([task.exe_paths["aigbmc"], str(task.opt_depth - 1)] + solver_args[1:]) + status_2 = "PASS" # aigbmc outputs status 2 when BMC passes else: task.error(f"Invalid solver command {solver_args[0]}.") @@ -76,7 +85,7 @@ def output_callback(line): print(line, file=aiw_file) if line == "0": proc_status = "PASS" if line == "1": proc_status = "FAIL" - if line == "2": proc_status = "UNKNOWN" + if line == "2": proc_status = status_2 return None diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index fd128edf..85ef882f 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -39,6 +39,10 @@ def run(task): import sby_engine_abc sby_engine_abc.run("bmc", task, engine_idx, engine) + elif engine[0] == "aiger": + import sby_engine_aiger + sby_engine_aiger.run("bmc", task, engine_idx, engine) + elif engine[0] == "btor": import sby_engine_btor sby_engine_btor.run("bmc", task, engine_idx, engine) diff --git a/tests/unsorted/bmc_len.sby b/tests/unsorted/bmc_len.sby new file mode 100644 index 00000000..938a1bdc --- /dev/null +++ b/tests/unsorted/bmc_len.sby @@ -0,0 +1,36 @@ +[tasks] +smtbmc_pass: smtbmc pass +smtbmc_fail: smtbmc fail +aigbmc_pass: aigbmc pass +aigbmc_fail: aigbmc fail +btormc_pass: btormc pass +btormc_fail: btormc fail +abc_pass: abc pass +abc_fail: abc fail + +[options] +mode bmc +pass: expect pass +fail: expect fail +pass: depth 5 +fail: depth 6 + +[engines] +smtbmc: smtbmc boolector +aigbmc: aiger aigbmc +btormc: btor btormc +abc: abc bmc3 + +[script] +read -formal top.sv +prep -top top + +[file top.sv] +module top(input clk); + reg [7:0] counter = 0; + + always @(posedge clk) begin + counter <= counter + 1; + assert (counter < 4); + end +endmodule From 141ffd34a5b3f08f3f5395f3c588615550df8bd7 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 14 Jun 2022 17:56:54 +0200 Subject: [PATCH 082/220] btor pono: improve option handling Fail on the unsupported skip option and pass solver args to pono. --- sbysrc/sby_engine_btor.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index 7985b324..0fe577b4 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -46,7 +46,10 @@ def run(mode, task, engine_idx, engine): elif solver_args[0] == "pono": if random_seed: task.error("Setting the random seed is not available for the pono solver.") + if task.opt_skip is not None: + task.error("The btor engine supports the option skip only for the btormc solver.") solver_cmd = task.exe_paths["pono"] + f" --witness -v 1 -e bmc -k {task.opt_depth - 1}" + solver_cmd += " ".join([""] + solver_args[1:]) else: task.error(f"Invalid solver command {solver_args[0]}.") From e99884e319e7482c442e519cee4e059f6abd7a21 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 15 Jun 2022 12:10:52 +0200 Subject: [PATCH 083/220] SbyProc: New error_callback instead of exit_callback for failing procs --- sbysrc/sby_core.py | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ab10614f..55b582ec 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -87,6 +87,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.output_callback = None self.exit_callback = None + self.error_callback = None def register_dep(self, next_proc): if self.finished: @@ -115,6 +116,14 @@ def handle_exit(self, retcode): if self.exit_callback is not None: self.exit_callback(retcode) + def handle_error(self, retcode): + if self.terminated: + return + if self.logfile is not None: + self.logfile.close() + if self.error_callback is not None: + self.error_callback(retcode) + def terminate(self, timeout=False): if self.task.opt_wait and not timeout: return @@ -185,20 +194,22 @@ def preexec_fn(): self.task.status = "ERROR" if not self.silent: self.task.log(f"{self.info}: COMMAND NOT FOUND. ERROR.") + self.handle_error(self.p.returncode) self.terminated = True self.task.terminate() return - self.handle_exit(self.p.returncode) - if self.checkretcode and self.p.returncode != 0: self.task.status = "ERROR" if not self.silent: self.task.log(f"{self.info}: task failed. ERROR.") + self.handle_error(self.p.returncode) self.terminated = True self.task.terminate() return + self.handle_exit(self.p.returncode) + self.finished = True for next_proc in self.notify: next_proc.poll() @@ -503,14 +514,15 @@ def print_common_prep(): proc.checkretcode = True def instance_hierarchy_callback(retcode): - if retcode != 0: - self.precise_prop_status = False - return if self.design_hierarchy == None: with open(f"{self.workdir}/model/design.json") as f: self.design_hierarchy = design_hierarchy(f) + def instance_hierarchy_error_callback(retcode): + self.precise_prop_status = False + proc.exit_callback = instance_hierarchy_callback + proc.error_callback = instance_hierarchy_error_callback return [proc] From d0c59a3155abf9a1adf6564303da6fa909aca0cd Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 14 Jun 2022 17:59:08 +0200 Subject: [PATCH 084/220] Don't use python asserts to handle unexpected solver output --- sbysrc/sby_core.py | 3 ++- sbysrc/sby_engine_abc.py | 13 ++++++++----- sbysrc/sby_engine_aiger.py | 16 ++++++++-------- sbysrc/sby_engine_btor.py | 19 +++++++++---------- 4 files changed, 27 insertions(+), 24 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 55b582ec..eec0fe66 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -52,6 +52,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.finished = False self.terminated = False self.checkretcode = False + self.retcodes = [0] self.task = task self.info = info self.deps = deps @@ -199,7 +200,7 @@ def preexec_fn(): self.task.terminate() return - if self.checkretcode and self.p.returncode != 0: + if self.checkretcode and self.p.returncode not in self.retcodes: self.task.status = "ERROR" if not self.silent: self.task.log(f"{self.info}: task failed. ERROR.") diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 10e12687..4635ee17 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -52,6 +52,7 @@ def run(mode, task, engine_idx, engine): f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) + proc.checkretcode = True proc.noprintregex = re.compile(r"^\.+$") proc_status = None @@ -77,8 +78,8 @@ def output_callback(line): return line def exit_callback(retcode): - assert retcode == 0 - assert proc_status is not None + if proc_status is None: + task.error(f"engine_{engine_idx}: Could not determine engine status.") task.update_status(proc_status) task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}") @@ -112,9 +113,11 @@ def output_callback2(line): return line - def exit_callback2(line): - assert proc2_status is not None - assert proc2_status == "FAIL" + def exit_callback2(retcode): + if proc2_status is None: + task.error(f"engine_{engine_idx}: Could not determine aigsmt status.") + if proc2_status != "FAIL": + task.error(f"engine_{engine_idx}: Unexpected aigsmt status.") if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd") diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index 2850d461..e392932e 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -58,6 +58,8 @@ def run(mode, task, engine_idx, engine): f"cd {task.workdir}; {solver_cmd} model/design_aiger.aig", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) + if solver_args[0] not in ["avy"]: + proc.checkretcode = True proc_status = None produced_cex = False @@ -90,9 +92,8 @@ def output_callback(line): return None def exit_callback(retcode): - if solver_args[0] not in ["avy"]: - assert retcode == 0 - assert proc_status is not None + if proc_status is None: + task.error(f"engine_{engine_idx}: Could not determine engine status.") aiw_file.close() @@ -143,11 +144,10 @@ def output_callback2(line): return line def exit_callback2(line): - assert proc2_status is not None - if mode == "live": - assert proc2_status == "PASS" - else: - assert proc2_status == "FAIL" + if proc2_status is None: + task.error(f"engine_{engine_idx}: Could not determine aigsmt status.") + if proc2_status != ("PASS" if mode == "live" else "FAIL"): + task.error(f"engine_{engine_idx}: Unexpected aigsmt status.") if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd") diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index 0fe577b4..9670a1bd 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -113,8 +113,6 @@ def output_callback2(line): def make_exit_callback(suffix): def exit_callback2(retcode): - assert retcode == 0 - vcdpath = f"{task.workdir}/engine_{engine_idx}/trace{suffix}.vcd" if os.path.exists(vcdpath): common_state.produced_traces.append(f"""{"" if mode == "cover" else "counterexample "}trace: {vcdpath}""") @@ -131,13 +129,15 @@ def output_callback(line): match = re.search(r"calling BMC on ([0-9]+) properties", line) if match: common_state.expected_cex = int(match[1]) - assert common_state.produced_cex == 0 + if common_state.produced_cex != 0: + task.error(f"engine_{engine_idx}: Unexpected engine output (property count).") else: task.error(f"engine_{engine_idx}: BTOR solver '{solver_args[0]}' is currently not supported in cover mode.") if (common_state.produced_cex < common_state.expected_cex) and line == "sat": - assert common_state.wit_file == None + if common_state.wit_file != None: + task.error(f"engine_{engine_idx}: Unexpected engine output (sat).") if common_state.expected_cex == 1: common_state.wit_file = open(f"{task.workdir}/engine_{engine_idx}/trace.wit", "w") else: @@ -196,12 +196,9 @@ def output_callback(line): return None def exit_callback(retcode): - if solver_args[0] == "pono": - assert retcode in [0, 1, 255] # UNKNOWN = -1, FALSE = 0, TRUE = 1, ERROR = 2 - else: - assert retcode == 0 if common_state.expected_cex != 0: - assert common_state.solver_status is not None + if common_state.solver_status is None: + task.error(f"engine_{engine_idx}: Could not determine engine status.") if common_state.solver_status == "unsat": if common_state.expected_cex == 1: @@ -222,7 +219,9 @@ def exit_callback(retcode): f"cd {task.workdir}; {solver_cmd} model/design_btor{'_single' if solver_args[0]=='pono' else ''}.btor", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) - + proc.checkretcode = True + if solver_args[0] == "pono": + proc.retcodes = [0, 1, 255] # UNKNOWN = -1, FALSE = 0, TRUE = 1, ERROR = 2 proc.output_callback = output_callback proc.exit_callback = exit_callback common_state.running_procs += 1 From d1c04f79d64a07097556cbccf765bcb0e09dee96 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 25 Apr 2022 15:43:59 +0200 Subject: [PATCH 085/220] Use monotonic clock for timeouts --- sbysrc/sby_core.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index eec0fe66..2e092c42 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -22,7 +22,7 @@ import subprocess from shutil import copyfile, copytree, rmtree from select import select -from time import time, localtime, sleep, strftime +from time import monotonic, localtime, sleep, strftime from sby_design import SbyProperty, SbyModule, design_hierarchy all_procs_running = [] @@ -349,7 +349,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir): self.procs_running = [] self.procs_pending = [] - self.start_clock_time = time() + self.start_clock_time = monotonic() if os.name == "posix": ru = resource.getrusage(resource.RUSAGE_CHILDREN) @@ -392,7 +392,7 @@ def taskloop(self): proc.poll() if self.opt_timeout is not None: - total_clock_time = int(time() - self.start_clock_time) + total_clock_time = int(monotonic() - self.start_clock_time) if total_clock_time > self.opt_timeout: self.log(f"Reached TIMEOUT ({self.opt_timeout} seconds). Terminating all subprocesses.") self.status = "TIMEOUT" @@ -734,7 +734,7 @@ def run(self, setupmode): self.taskloop() - total_clock_time = int(time() - self.start_clock_time) + total_clock_time = int(monotonic() - self.start_clock_time) if os.name == "posix": ru = resource.getrusage(resource.RUSAGE_CHILDREN) From 0fe8c223cf6db6f88694c3f703c0864345f0bb49 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 21 Apr 2022 16:22:32 +0200 Subject: [PATCH 086/220] Decouple taskloop from task --- sbysrc/sby.py | 10 ++- sbysrc/sby_core.py | 157 +++++++++++++++++++++++++++++++++------------ 2 files changed, 119 insertions(+), 48 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index d9e0a5c9..f3eca9b5 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -458,13 +458,11 @@ def run_task(taskname): for k, v in exe_paths.items(): task.exe_paths[k] = v - if throw_err: + try: task.run(setupmode) - else: - try: - task.run(setupmode) - except SbyAbort: - pass + except SbyAbort: + if throw_err: + raise if my_opt_tmpdir: task.log(f"Removing directory '{my_workdir}'.") diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 2e092c42..d133786c 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -51,6 +51,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.running = False self.finished = False self.terminated = False + self.exited = False self.checkretcode = False self.retcodes = [0] self.task = task @@ -81,7 +82,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.logstderr = logstderr self.silent = silent - self.task.procs_pending.append(self) + self.task.update_proc_pending(self) for dep in self.deps: dep.register_dep(self) @@ -90,6 +91,9 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.exit_callback = None self.error_callback = None + if self.task.timeout_reached: + self.terminate(True) + def register_dep(self, next_proc): if self.finished: next_proc.poll() @@ -137,12 +141,19 @@ def terminate(self, timeout=False): except PermissionError: pass self.p.terminate() - self.task.procs_running.remove(self) - all_procs_running.remove(self) + self.task.update_proc_stopped(self) + elif not self.finished and not self.terminated and not self.exited: + self.task.update_proc_canceled(self) self.terminated = True - def poll(self): - if self.finished or self.terminated: + def poll(self, force_unchecked=False): + if self.task.task_local_abort and not force_unchecked: + try: + self.poll(True) + except SbyAbort: + self.task.terminate(True) + return + if self.finished or self.terminated or self.exited: return if not self.running: @@ -168,9 +179,7 @@ def preexec_fn(): self.p = subprocess.Popen(self.cmdline, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=(subprocess.STDOUT if self.logstderr else None)) - self.task.procs_pending.remove(self) - self.task.procs_running.append(self) - all_procs_running.append(self) + self.task.update_proc_running(self) self.running = True return @@ -187,26 +196,24 @@ def preexec_fn(): if self.p.poll() is not None: if not self.silent: self.task.log(f"{self.info}: finished (returncode={self.p.returncode})") - self.task.procs_running.remove(self) - all_procs_running.remove(self) + self.task.update_proc_stopped(self) self.running = False + self.exited = True if self.p.returncode == 127: - self.task.status = "ERROR" if not self.silent: self.task.log(f"{self.info}: COMMAND NOT FOUND. ERROR.") self.handle_error(self.p.returncode) self.terminated = True - self.task.terminate() + self.task.proc_failed(self) return if self.checkretcode and self.p.returncode not in self.retcodes: - self.task.status = "ERROR" if not self.silent: self.task.log(f"{self.info}: task failed. ERROR.") self.handle_error(self.p.returncode) self.terminated = True - self.task.terminate() + self.task.proc_failed(self) return self.handle_exit(self.p.returncode) @@ -321,8 +328,55 @@ def parse_config(self, f): def error(self, logmessage): raise SbyAbort(logmessage) + +class SbyTaskloop: + def __init__(self): + self.procs_pending = [] + self.procs_running = [] + self.tasks = [] + self.poll_now = False + + def run(self): + for proc in self.procs_pending: + proc.poll() + + while len(self.procs_running) or self.poll_now: + fds = [] + for proc in self.procs_running: + if proc.running: + fds.append(proc.p.stdout) + + if not self.poll_now: + if os.name == "posix": + try: + select(fds, [], [], 1.0) == ([], [], []) + except InterruptedError: + pass + else: + sleep(0.1) + self.poll_now = False + + for proc in self.procs_running: + proc.poll() + + for proc in self.procs_pending: + proc.poll() + + tasks = self.tasks + self.tasks = [] + for task in tasks: + task.check_timeout() + if task.procs_pending or task.procs_running: + self.tasks.append(task) + else: + task.exit_callback() + + for task in self.tasks: + task.exit_callback() + + class SbyTask(SbyConfig): - def __init__(self, sbyconfig, workdir, early_logs, reusedir): + def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None): super().__init__() self.used_options = set() self.models = dict() @@ -333,6 +387,8 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir): self.expect = list() self.design_hierarchy = None self.precise_prop_status = False + self.timeout_reached = False + self.task_local_abort = False yosys_program_prefix = "" ##yosys-program-prefix## self.exe_paths = { @@ -346,6 +402,9 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir): "pono": os.getenv("PONO", "pono"), } + self.taskloop = taskloop or SbyTaskloop() + self.taskloop.tasks.append(self) + self.procs_running = [] self.procs_pending = [] @@ -367,36 +426,34 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir): for line in sbyconfig: print(line, file=f) - def taskloop(self): - for proc in self.procs_pending: - proc.poll() + def check_timeout(self): + if self.opt_timeout is not None: + total_clock_time = int(monotonic() - self.start_clock_time) + if total_clock_time > self.opt_timeout: + self.log(f"Reached TIMEOUT ({self.opt_timeout} seconds). Terminating all subprocesses.") + self.status = "TIMEOUT" + self.terminate(timeout=True) - while len(self.procs_running): - fds = [] - for proc in self.procs_running: - if proc.running: - fds.append(proc.p.stdout) + def update_proc_pending(self, proc): + self.procs_pending.append(proc) + self.taskloop.procs_pending.append(proc) - if os.name == "posix": - try: - select(fds, [], [], 1.0) == ([], [], []) - except InterruptedError: - pass - else: - sleep(0.1) + def update_proc_running(self, proc): + self.procs_pending.remove(proc) + self.taskloop.procs_pending.remove(proc) - for proc in self.procs_running: - proc.poll() + self.procs_running.append(proc) + self.taskloop.procs_running.append(proc) + all_procs_running.append(proc) - for proc in self.procs_pending: - proc.poll() + def update_proc_stopped(self, proc): + self.procs_running.remove(proc) + self.taskloop.procs_running.remove(proc) + all_procs_running.remove(proc) - if self.opt_timeout is not None: - total_clock_time = int(monotonic() - self.start_clock_time) - if total_clock_time > self.opt_timeout: - self.log(f"Reached TIMEOUT ({self.opt_timeout} seconds). Terminating all subprocesses.") - self.status = "TIMEOUT" - self.terminate(timeout=True) + def update_proc_canceled(self, proc): + self.procs_pending.remove(proc) + self.taskloop.procs_pending.remove(proc) def log(self, logmessage): tm = localtime() @@ -632,8 +689,17 @@ def model(self, model_name): return self.models[model_name] def terminate(self, timeout=False): + if timeout: + self.timeout_reached = True for proc in list(self.procs_running): proc.terminate(timeout=timeout) + for proc in list(self.procs_pending): + proc.terminate(timeout=timeout) + + def proc_failed(self, proc): + # proc parameter used by autotune override + self.status = "ERROR" + self.terminate() def update_status(self, new_status): assert new_status in ["PASS", "FAIL", "UNKNOWN", "ERROR"] @@ -659,6 +725,11 @@ def update_status(self, new_status): assert 0 def run(self, setupmode): + self.setup_procs(setupmode) + if not setupmode: + self.taskloop.run() + + def setup_procs(self, setupmode): with open(f"{self.workdir}/config.sby", "r") as f: self.parse_config(f) @@ -732,8 +803,7 @@ def run(self, setupmode): if opt not in self.used_options: self.error(f"Unused option: {opt}") - self.taskloop() - + def summarize(self): total_clock_time = int(monotonic() - self.start_clock_time) if os.name == "posix": @@ -772,6 +842,9 @@ def run(self, setupmode): for line in self.summary: print(line, file=f) + def exit_callback(self): + self.summarize() + def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): junit_time = strftime('%Y-%m-%dT%H:%M:%S') if self.precise_prop_status: From 0308142fa4db2ddd23bda4dca038ae6016b2e1fd Mon Sep 17 00:00:00 2001 From: George Rennie Date: Sun, 19 Jun 2022 00:49:12 +0100 Subject: [PATCH 087/220] Use default prefix directory when no task is specified --- sbysrc/sby.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index f3eca9b5..e079ff6c 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -402,7 +402,10 @@ def run_task(taskname): if workdir is not None: my_workdir = workdir elif workdir_prefix is not None: - my_workdir = workdir_prefix + "_" + taskname + if taskname is None: + my_workdir = workdir_prefix + else: + my_workdir = workdir_prefix + "_" + taskname if my_workdir is None and sbyfile is not None and not my_opt_tmpdir: my_workdir = sbyfile[:-4] From d8ebd1eb9d6ae580a18a6c7933cea9c2a137f034 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 20 Jun 2022 14:59:00 +0200 Subject: [PATCH 088/220] Reflect recent engine updates in the reference docs --- docs/source/reference.rst | 79 +++++++++++++++++++++------------------ 1 file changed, 42 insertions(+), 37 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 9cbf78bc..8d003143 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -213,6 +213,8 @@ The following mode/engine/solver combinations are currently supported: | | ``abc bmc3`` | | | | | | ``abc sim3`` | +| | | +| | ``aiger smtbmc`` | +-----------+--------------------------+ | ``prove`` | ``smtbmc [all solvers]`` | | | | @@ -227,8 +229,6 @@ The following mode/engine/solver combinations are currently supported: | | ``btor btormc`` | +-----------+--------------------------+ | ``live`` | ``aiger suprove`` | -| | | -| | ``aiger avy`` | +-----------+--------------------------+ ``smtbmc`` engine @@ -237,34 +237,37 @@ The following mode/engine/solver combinations are currently supported: The ``smtbmc`` engine supports the ``bmc``, ``prove``, and ``cover`` modes and supports the following options: -+-----------------+---------------------------------------------------------+ -| Option | Description | -+=================+=========================================================+ -| ``--nomem`` | Don't use the SMT theory of arrays to model memories. | -| | Instead synthesize memories to registers and address | -| | logic. | -+-----------------+---------------------------------------------------------+ -| ``--syn`` | Synthesize the circuit to a gate-level representation | -| | instead of using word-level SMT operators. This also | -| | runs some low-level logic optimization on the circuit. | -+-----------------+---------------------------------------------------------+ -| ``--stbv`` | Use large bit vectors (instead of uninterpreted | -| | functions) to represent the circuit state. | -+-----------------+---------------------------------------------------------+ -| ``--stdt`` | Use SMT-LIB 2.6 datatypes to represent states. | -+-----------------+---------------------------------------------------------+ -| ``--nopresat`` | Do not run "presat" SMT queries that make sure that | -| | assumptions are non-conflicting (and potentially | -| | warmup the SMT solver). | -+-----------------+---------------------------------------------------------+ -| ``--unroll``, | Disable/enable unrolling of the SMT problem. The | -| ``--nounroll`` | default value depends on the solver being used. | -+-----------------+---------------------------------------------------------+ -| ``--dumpsmt2`` | Write the SMT2 trace to an additional output file. | -| | (Useful for benchmarking and troubleshooting.) | -+-----------------+---------------------------------------------------------+ -| ``--progress`` | Enable Yosys-SMTBMC timer display. | -+-----------------+---------------------------------------------------------+ ++------------------+---------------------------------------------------------+ +| Option | Description | ++==================+=========================================================+ +| ``--nomem`` | Don't use the SMT theory of arrays to model memories. | +| | Instead synthesize memories to registers and address | +| | logic. | ++------------------+---------------------------------------------------------+ +| ``--syn`` | Synthesize the circuit to a gate-level representation | +| | instead of using word-level SMT operators. This also | +| | runs some low-level logic optimization on the circuit. | ++------------------+---------------------------------------------------------+ +| ``--stbv`` | Use large bit vectors (instead of uninterpreted | +| | functions) to represent the circuit state. | ++------------------+---------------------------------------------------------+ +| ``--stdt`` | Use SMT-LIB 2.6 datatypes to represent states. | ++------------------+---------------------------------------------------------+ +| ``--nopresat`` | Do not run "presat" SMT queries that make sure that | +| | assumptions are non-conflicting (and potentially | +| | warmup the SMT solver). | ++------------------+---------------------------------------------------------+ +| ``--keep-going`` | In BMC mode, continue after the first failed assertion | +| | and report further failed assertions. | ++------------------+---------------------------------------------------------+ +| ``--unroll``, | Disable/enable unrolling of the SMT problem. The | +| ``--nounroll`` | default value depends on the solver being used. | ++------------------+---------------------------------------------------------+ +| ``--dumpsmt2`` | Write the SMT2 trace to an additional output file. | +| | (Useful for benchmarking and troubleshooting.) | ++------------------+---------------------------------------------------------+ +| ``--progress`` | Enable Yosys-SMTBMC timer display. | ++------------------+---------------------------------------------------------+ Any SMT2 solver that is compatible with ``yosys-smtbmc`` can be passed as argument to the ``smtbmc`` engine. The solver options are passed to the solver @@ -272,12 +275,13 @@ as additional command line options. The following solvers are currently supported by ``yosys-smtbmc``: - * yices - * boolector - * bitwuzla - * z3 - * mathsat - * cvc4 +* yices +* boolector +* bitwuzla +* z3 +* mathsat +* cvc4 +* cvc5 Any additional options after ``--`` are passed to ``yosys-smtbmc`` as-is. @@ -295,6 +299,7 @@ The engine supports no engine options and supports the following solvers: | ``pono`` | ``bmc`` | +-------------------------------+---------------------------------+ +Solver options are passed to the solver as additional command line options. ``aiger`` engine ~~~~~~~~~~~~~~~~ @@ -310,7 +315,7 @@ solvers: +-------------------------------+---------------------------------+ | ``avy`` | ``prove`` | +-------------------------------+---------------------------------+ -| ``aigbmc`` | ``prove``, ``live`` | +| ``aigbmc`` | ``bmc`` | +-------------------------------+---------------------------------+ Solver options are passed to the solver as additional command line options. From db740839b737ee55b8b39f1b29780872d32d248a Mon Sep 17 00:00:00 2001 From: Jacob Lifshay Date: Wed, 22 Jun 2022 21:17:29 -0700 Subject: [PATCH 089/220] switch to using hierarchy -smtcheck for smtlib2 solvers, allowing smtlib2_module modules. Fixes: #168 Depends on: https://github.com/YosysHQ/yosys/pull/3391 --- sbysrc/sby_core.py | 12 ++++++------ tests/unsorted/blackbox.sby | 31 ++++++++++++++++++++++++++++++ tests/unsorted/smtlib2_module.sby | 32 +++++++++++++++++++++++++++++++ 3 files changed, 69 insertions(+), 6 deletions(-) create mode 100644 tests/unsorted/blackbox.sby create mode 100644 tests/unsorted/smtlib2_module.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ab10614f..3592c31a 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -465,7 +465,7 @@ def make_model(self, model_name): if not os.path.isdir(f"{self.workdir}/model"): os.makedirs(f"{self.workdir}/model") - def print_common_prep(): + def print_common_prep(check): if self.opt_multiclock: print("clk2fflogic", file=f) else: @@ -482,7 +482,7 @@ def print_common_prep(): print("setundef -anyseq", file=f) print("opt -keepdc -fast", file=f) print("check", file=f) - print("hierarchy -simcheck", file=f) + print(f"hierarchy {check}", file=f) if model_name == "base": with open(f"""{self.workdir}/model/design.ys""", "w") as f: @@ -490,7 +490,7 @@ def print_common_prep(): for cmd in self.script: print(cmd, file=f) # the user must designate a top module in [script] - print("hierarchy -simcheck", file=f) + print("hierarchy -smtcheck", file=f) print(f"""write_jny -no-connections ../model/design.json""", file=f) print(f"""write_rtlil ../model/design.il""", file=f) @@ -522,7 +522,7 @@ def instance_hierarchy_callback(retcode): print("memory_map", file=f) else: print("memory_nordff", file=f) - print_common_prep() + print_common_prep("-smtcheck") if "_syn" in model_name: print("techmap", file=f) print("opt -fast", file=f) @@ -555,7 +555,7 @@ def instance_hierarchy_callback(retcode): print("memory_map", file=f) else: print("memory_nordff", file=f) - print_common_prep() + print_common_prep("-simcheck") print("flatten", file=f) print("setundef -undriven -anyseq", file=f) if "_syn" in model_name: @@ -587,7 +587,7 @@ def instance_hierarchy_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print("read_ilang design.il", file=f) print("memory_map", file=f) - print_common_prep() + print_common_prep("-simcheck") print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) diff --git a/tests/unsorted/blackbox.sby b/tests/unsorted/blackbox.sby new file mode 100644 index 00000000..ca9400e2 --- /dev/null +++ b/tests/unsorted/blackbox.sby @@ -0,0 +1,31 @@ +[options] +mode bmc +depth 1 +expect error + +[engines] +smtbmc + +[script] +read_verilog -formal test.v +prep -top top + +[file test.v] +(* blackbox *) +module submod(a, b); + input [7:0] a; + output [7:0] b; +endmodule + +module top; + wire [7:0] a = $anyconst, b; + + submod submod( + .a(a), + .b(b) + ); + + always @* begin + assert(~a == b); + end +endmodule diff --git a/tests/unsorted/smtlib2_module.sby b/tests/unsorted/smtlib2_module.sby new file mode 100644 index 00000000..43dfcb28 --- /dev/null +++ b/tests/unsorted/smtlib2_module.sby @@ -0,0 +1,32 @@ +[options] +mode bmc +depth 1 + +[engines] +smtbmc + +[script] +read_verilog -formal test.v +prep -top top + +[file test.v] +(* blackbox *) +(* smtlib2_module *) +module submod(a, b); + input [7:0] a; + (* smtlib2_comb_expr = "(bvnot a)" *) + output [7:0] b; +endmodule + +module top; + wire [7:0] a = $anyconst, b; + + submod submod( + .a(a), + .b(b) + ); + + always @* begin + assert(~a == b); + end +endmodule From 3dcf7766ea716ddddea428268cae26f004afcf9f Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 23 Jun 2022 13:15:58 +0200 Subject: [PATCH 090/220] smtbmc: Fix induction trace filename with --keep-going for the basecase --keep-going only applies to the basecase and induction runs without that option, so the trace filename for induction should have no placeholder. --- sbysrc/sby_engine_smtbmc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 4ec365de..09177634 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -131,7 +131,7 @@ def run(mode, task, engine_idx, engine): smtbmc_opts.append("-c") trace_prefix += "%" - if keep_going: + if keep_going and mode != "prove_induction": smtbmc_opts.append("--keep-going") trace_prefix += "%" From 5014d740232c9a09ddea0f5f7d25dd3fe24de93f Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 14 Jun 2022 18:04:24 +0200 Subject: [PATCH 091/220] sby_design: Extract total memory size and forall usage --- sbysrc/sby_core.py | 8 ++++---- sbysrc/sby_design.py | 26 +++++++++++++++++++++----- sbysrc/sby_engine_smtbmc.py | 10 +++++----- 3 files changed, 30 insertions(+), 14 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index d133786c..cab8feb4 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -385,7 +385,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None): self.status = "UNKNOWN" self.total_time = 0 self.expect = list() - self.design_hierarchy = None + self.design = None self.precise_prop_status = False self.timeout_reached = False self.task_local_abort = False @@ -572,9 +572,9 @@ def print_common_prep(): proc.checkretcode = True def instance_hierarchy_callback(retcode): - if self.design_hierarchy == None: + if self.design == None: with open(f"{self.workdir}/model/design.json") as f: - self.design_hierarchy = design_hierarchy(f) + self.design = design_hierarchy(f) def instance_hierarchy_error_callback(retcode): self.precise_prop_status = False @@ -848,7 +848,7 @@ def exit_callback(self): def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): junit_time = strftime('%Y-%m-%dT%H:%M:%S') if self.precise_prop_status: - checks = self.design_hierarchy.get_property_list() + checks = self.design.hierarchy.get_property_list() junit_tests = len(checks) junit_failures = 0 junit_errors = 0 diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 8fc78955..399ea115 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -99,7 +99,16 @@ def find_property_by_cellname(self, cell_name, trans_dict=dict()): return prop raise KeyError(f"No such property: {cell_name}") + +@dataclass +class SbyDesign: + hierarchy: SbyModule = None + memory_bits: int = 0 + forall: bool = False + + def design_hierarchy(filename): + design = SbyDesign(hierarchy=None) design_json = json.load(filename) def make_mod_hier(instance_name, module_name, hierarchy=""): # print(instance_name,":", module_name) @@ -125,13 +134,19 @@ def make_mod_hier(instance_name, module_name, hierarchy=""): if sort["type"][0] != '$' or sort["type"].startswith("$paramod"): for cell in sort["cells"]: mod.submodules[cell["name"]] = make_mod_hier(cell["name"], sort["type"], hierarchy=sub_hierarchy) + if sort["type"] in ["$mem", "$mem_v2"]: + for cell in sort["cells"]: + design.memory_bits += int(cell["parameters"]["WIDTH"], 2) * int(cell["parameters"]["SIZE"], 2) + if sort["type"] in ["$allconst", "$allseq"]: + design.forall = True + return mod for m in design_json["modules"]: attrs = m["attributes"] if "top" in attrs and int(attrs["top"]) == 1: - hierarchy = make_mod_hier(m["name"], m["name"]) - return hierarchy + design.hierarchy = make_mod_hier(m["name"], m["name"]) + return design else: raise ValueError("Cannot find top module") @@ -140,10 +155,11 @@ def main(): if len(sys.argv) != 2: print(f"""Usage: {sys.argv[0]} design.json""") with open(sys.argv[1]) as f: - d = design_hierarchy(f) - print("Design Hierarchy:", d) - for p in d.get_property_list(): + design = design_hierarchy(f) + print("Design Hierarchy:", design.hierarchy) + for p in design.hierarchy.get_property_list(): print("Property:", p) + print("Memory Bits:", design.memory_bits) if __name__ == '__main__': main() diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 09177634..8c11388d 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -194,7 +194,7 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Assert failed in (\S+): (\S+) \((\S+)\)", line) if match: cell_name = match[3] - prop = task.design_hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) + prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" last_prop.append(prop) return line @@ -202,7 +202,7 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) \((\S+)\) in step \d+.", line) if match: cell_name = match[2] - prop = task.design_hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) + prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "PASS" last_prop.append(prop) return line @@ -218,7 +218,7 @@ def output_callback(line): match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\).", line) if match: cell_name = match[2] - prop = task.design_hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) + prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" return line @@ -250,7 +250,7 @@ def exit_callback(retcode): if excess_traces > 0: task.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""") elif proc_status == "PASS" and mode == "bmc": - for prop in task.design_hierarchy: + for prop in task.design.hierarchy: if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": prop.status = "PASS" @@ -285,7 +285,7 @@ def exit_callback(retcode): assert False if task.basecase_pass and task.induction_pass: - for prop in task.design_hierarchy: + for prop in task.design.hierarchy: if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": prop.status = "PASS" task.update_status("PASS") From b4458d43d7f94b81636bc058890d5f699aadf127 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 25 Apr 2022 12:27:18 +0200 Subject: [PATCH 092/220] Automatic engine selection --- sbysrc/sby.py | 14 +- sbysrc/sby_autotune.py | 679 +++++++++++++++++++++++++++++++++++++++ sbysrc/sby_core.py | 47 ++- sbysrc/sby_mode_bmc.py | 3 +- sbysrc/sby_mode_cover.py | 3 +- sbysrc/sby_mode_live.py | 3 +- sbysrc/sby_mode_prove.py | 3 +- 7 files changed, 731 insertions(+), 21 deletions(-) create mode 100644 sbysrc/sby_autotune.py diff --git a/sbysrc/sby.py b/sbysrc/sby.py index e079ff6c..b802b7a7 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -46,6 +46,10 @@ def __call__(self, parser, namespace, values, option_string=None): help="add taskname (useful when sby file is read from stdin)") parser.add_argument("-E", action="store_true", dest="throw_err", help="throw an exception (incl stack trace) for most errors") +parser.add_argument("--autotune", action="store_true", dest="autotune", + help="automatically find a well performing engine and engine configuration for each task") +parser.add_argument("--autotune-config", dest="autotune_config", + help="read an autotune configuration file (overrides the sby file's autotune options)") parser.add_argument("--yosys", metavar="", action=DictAction, dest="exe_paths") @@ -108,6 +112,8 @@ def __call__(self, parser, namespace, values, option_string=None): dump_files = args.dump_files reusedir = False setupmode = args.setupmode +autotune = args.autotune +autotune_config = args.autotune_config init_config_file = args.init_config_file if sbyfile is not None: @@ -462,7 +468,11 @@ def run_task(taskname): task.exe_paths[k] = v try: - task.run(setupmode) + if autotune: + import sby_autotune + sby_autotune.SbyAutotune(task, autotune_config).run() + else: + task.run(setupmode) except SbyAbort: if throw_err: raise @@ -477,7 +487,7 @@ def run_task(taskname): task.log(f"DONE ({task.status}, rc={task.retcode})") task.logfile.close() - if not my_opt_tmpdir and not setupmode: + if not my_opt_tmpdir and not setupmode and not autotune: with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: task.print_junit_result(f, junit_ts_name, junit_tc_name, junit_format_strict=False) diff --git a/sbysrc/sby_autotune.py b/sbysrc/sby_autotune.py new file mode 100644 index 00000000..9e2f28c0 --- /dev/null +++ b/sbysrc/sby_autotune.py @@ -0,0 +1,679 @@ +# +# SymbiYosys (sby) -- Front-end for Yosys-based formal verification flows +# +# Copyright (C) 2022 Jannis Harder +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# + +import os +import re +import subprocess +from shutil import rmtree, which +from time import monotonic +from sby_core import SbyAbort, SbyTask + + +class SbyAutotuneConfig: + """Autotune configuration parsed from the sby file or an external autotune config + file. + """ + def __init__(self): + self.timeout = None + self.soft_timeout = 60 + self.wait_percentage = 50 + self.wait_seconds = 10 + self.parallel = "auto" + + self.presat = None + self.incr = "auto" + self.incr_threshold = 20 + self.mem = "auto" + self.mem_threshold = 10240 + self.forall = "auto" + + def config_line(self, log, line, file_kind="sby file"): + option, *arg = line.split(None, 1) + if not arg: + log.error(f"{file_kind} syntax error: {line}") + arg = arg[0].strip() + + BOOL_OR_ANY = {"on": True, "off": False, "any": None} + BOOL_ANY_OR_AUTO = {"on": True, "off": False, "any": None, "auto": "auto"} + ON_ANY_OR_AUTO = {"on": True, "any": None, "auto": "auto"} + + def enum_option(values): + if arg not in values: + values_str = ', '.join(repr(value) for value in sorted(values)) + log.error(f"{file_kind}: invalid value '{arg}' for autotune option {option}, valid values are: {values_str}") + return values[arg] + + def int_option(): + try: + return int(arg) + except ValueError: + log.error(f"{file_kind}: invalid value '{arg}' for autotune option {option}, expected an integer value") + + if option == "timeout": + self.timeout = "none" if arg == "none" else int_option() + elif option == "soft_timeout": + self.soft_timeout = int_option() + elif option == "wait": + self.wait_percentage = 0 + self.wait_seconds = 0 + for part in arg.split("+"): + part = part.strip() + if part.endswith("%"): + self.wait_percentage += int(part[:-1].strip()) + else: + self.wait_seconds += int(part) + elif option == "parallel": + self.parallel = "auto" if arg == "auto" else int_option() + elif option == "presat": + self.presat = enum_option(BOOL_OR_ANY) + elif option == "incr": + self.incr = enum_option(BOOL_ANY_OR_AUTO) + elif option == "incr_threshold": + self.incr_threshold = int_option() + elif option == "mem": + self.mem = enum_option(ON_ANY_OR_AUTO) + elif option == "mem_threshold": + self.mem_threshold = int_option() + elif option == "forall": + self.forall = enum_option(ON_ANY_OR_AUTO) + else: + log.error(f"{file_kind} syntax error: {line}") + + def parse_file(self, log, file): + for line in file: + line = re.sub(r"\s*(\s#.*)?$", "", line) + if line == "" or line[0] == "#": + continue + self.config_line(log, line.rstrip(), "autotune configuration file") + +class SbyAutotuneCandidate: + """An engine configuration to try and its current state during autotuning. + """ + def __init__(self, autotune, engine): + self.autotune = autotune + self.engine = engine + + self.state = "pending" + self.engine_idx = None + self.info = f"{' '.join(self.engine)}:" + self.suspended = 0 + self.suspend = 1 + + self.engine_retcode = None + self.engine_status = None + self.total_adjusted_time = None + + self.soft_timeout = self.autotune.config.soft_timeout + + if tuple(self.engine) not in self.autotune.candidate_engines: + self.autotune.active_candidates.append(self) + self.autotune.candidate_engines.add(tuple(self.engine)) + + def set_engine_idx(self, idx): + self.engine_idx = idx + self.info = f"engine_{idx} ({' '.join(self.engine)}):" + + def set_running(self): + assert not self.suspended + assert self.state == "pending" + assert self in self.autotune.active_candidates + self.state = "running" + + def retry_later(self): + assert self.state == "running" + assert self in self.autotune.active_candidates + self.state = "pending" + self.soft_timeout *= 2 + self.suspended = self.suspend + + def timed_out(self): + assert self.state == "running" + self.autotune.active_candidates.remove(self) + self.state = "timeout" + + def failed(self): + assert self.state == "running" + self.autotune.active_candidates.remove(self) + self.autotune.failed_candidates.append(self) + self.state = "failed" + + def finished(self): + assert self.state == "running" + self.autotune.active_candidates.remove(self) + self.autotune.finished_candidates.append(self) + self.state = "finished" + + def threads(self): + if self.autotune.config.mode == "prove" and self.engine[0] == "smtbmc": + return 2 + return 1 + + +class SbyAutotune: + """Performs automatic engine selection for a given task. + """ + def __init__(self, task, config_file=None): + task.exit_callback = lambda: None + task.check_timeout = lambda: None + task.status = "TIMEOUT" + task.retcode = 8 + + task.proc_failed = self.proc_failed + + self.config = None + + if config_file: + with open(config_file) as config: + self.config.parse_file(task, config) + + self.task = task + + self.done = False + self.threads_running = 0 + + self.next_engine_idx = 0 + + self.model_requests = {} + + self.timeout = None + self.best_time = None + self.have_pending_candidates = False + + self.active_candidates = [] + self.finished_candidates = [] + self.failed_candidates = [] + + self.candidate_engines = set() + + def available(self, tool): + if not which(tool): + return False + + if tool == "btorsim": + error_msg = subprocess.run( + ["btorsim", "--vcd"], + capture_output=True, + text=True, + ).stderr + if "invalid command line option" in error_msg: + self.log('found version of "btorsim" is too old and does not support the --vcd option') + return False + + return True + + def candidate(self, *engine): + flat_engine = [] + def flatten(part): + if part is None: + return + elif isinstance(part, (tuple, list)): + for subpart in part: + flatten(subpart) + else: + flat_engine.append(part) + + flatten(engine) + + SbyAutotuneCandidate(self, flat_engine) + + def configure(self): + self.config.mode = self.task.opt_mode + self.config.skip = self.task.opt_skip + + if self.config.incr == "auto": + self.config.incr = None + if self.config.mode != "live": + steps = self.task.opt_depth - (self.config.skip or 0) + if steps > self.config.incr_threshold: + self.log(f"checking more than {self.config.incr_threshold} timesteps ({steps}), disabling nonincremental smtbmc") + self.config.incr = True + + if self.config.mem == "auto": + self.config.mem = None + if self.task.design is None: + self.log("warning: unknown amount of memory bits in design") + elif self.task.design.memory_bits > self.config.mem_threshold: + self.log( + f"more than {self.config.mem_threshold} bits of memory in design ({self.task.design.memory_bits} bits), " + "disabling engines without native memory support" + ) + self.config.mem = True + + if self.config.forall == "auto": + self.config.forall = None + if self.task.design.forall: + self.log("design uses $allconst/$allseq, disabling engines without forall support") + self.config.forall = True + + if self.config.mode not in ["bmc", "prove"]: + self.config.presat = None + + if self.config.parallel == "auto": + try: + self.config.parallel = len(os.sched_getaffinity(0)) + except AttributeError: + self.config.parallel = os.cpu_count() # TODO is this correct? + + if self.config.timeout is None: + self.config.timeout = self.task.opt_timeout + elif self.config.timeout == "none": + self.config.timeout = None + + def build_candidates(self): + if self.config.mode == "live": + # Not much point in autotuning here... + self.candidate("aiger", "suprove") + return + + if self.config.presat is None: + presat_flags = [None, "--nopresat"] + elif self.config.presat: + presat_flags = [None] + else: + presat_flags = ["--nopresat"] + + if self.config.incr is None: + noincr_flags = [None, ["--", "--noincr"]] + elif self.config.incr: + noincr_flags = [None] + else: + noincr_flags = [["--", "--noincr"]] + + if self.config.forall: + self.log('disabling engines "smtbmc boolector" and "smtbmc bitwuzla" as they do not support forall') + else: + for solver in ["boolector", "bitwuzla"]: + if not self.available(solver): + self.log(f'disabling engine "smtbmc {solver}" as the solver "{solver}" was not found') + continue + for noincr in noincr_flags: + for presat in presat_flags: + self.candidate("smtbmc", presat, solver, noincr) + + if not self.available("btorsim"): + self.log('disabling engine "btor" as the "btorsim" tool was not found') + elif self.config.forall: + self.log('disabling engine "btor" as it does not support forall') + else: + if self.config.mode in ["bmc", "cover"]: + if not self.available("btormc"): + self.log('disabling engine "btor btormc" as the "btormc" tool was not found') + elif self.config.presat: + self.log('disabling engine "btor btormc" as it does not support presat checking') + else: + self.candidate("btor", "btormc") + + if self.config.mode == "bmc": + if not self.available("pono"): + self.log('disabling engine "btor btormc" as the "btormc" tool was not found') + elif self.config.presat: + self.log('disabling engine "btor pono" as it does not support presat checking') + elif self.config.skip: + self.log('disabling engine "btor pono" as it does not support the "skip" option') + else: + self.candidate("btor", "pono") + + for solver in ["yices", "z3"]: + if not self.available(solver): + self.log(f'disabling engine "smtbmc {solver}" as the solver "{solver}" was not found') + continue + for unroll in ["--unroll", "--nounroll"]: + if solver == "yices" and self.config.forall: + self.log('disabling engine "smtbmc yices" due to limited forall support') + # TODO yices implicitly uses --noincr for forall problems and + # requires --stbv which does not play well with memory, still test it? + continue + + stmode = "--stdt" if self.config.forall else None + + for noincr in noincr_flags: + for presat in presat_flags: + self.candidate("smtbmc", presat, stmode, unroll, solver, noincr) + + if self.config.mode not in ["bmc", "prove"]: + pass + elif self.config.presat: + self.log('disabling engines "abc" and "aiger" as they do not support presat checking') + elif self.config.forall: + self.log('disabling engines "abc" and "aiger" as they do not support forall') + elif self.config.mem: + self.log('disabling engines "abc" and "aiger" as they do not support memory') + elif self.config.skip: + self.log('disabling engines "abc" and "aiger" as they do not support the "skip" option') + elif self.config.mode == "bmc": + self.candidate("abc", "bmc3") + + if not self.available("aigbmc"): + self.log('disabling engine "aiger aigbmc" as the "aigbmc" tool was not found') + else: + self.candidate("aiger", "aigbmc") + # abc sim3 will never finish + elif self.config.mode == "prove": + self.candidate("abc", "pdr") + + if not self.available("suprove"): + self.log('disabling engine "aiger suprove" as the "suprove" tool was not found') + else: + self.candidate("aiger", "suprove") + # avy seems to crash in the presence of assumptions + + def log(self, message): + self.task.log(message) + + def run(self): + self.task.handle_non_engine_options() + self.config = self.task.autotune_config or SbyAutotuneConfig() + + if "expect" not in self.task.options: + self.task.expect = ["PASS", "FAIL"] + # TODO check that solvers produce consistent results? + + if "TIMEOUT" in self.task.expect: + self.task.error("cannot autotune a task with option 'expect timeout'") + + if self.task.reusedir: + rmtree(f"{self.task.workdir}/model", ignore_errors=True) + else: + self.task.copy_src() + + self.model(None, "base") + self.task.taskloop.run() + + if self.task.status == "ERROR": + return + + self.configure() + + self.build_candidates() + if not self.active_candidates: + self.error("no supported engines found for the current configuration and design") + self.log(f"testing {len(self.active_candidates)} engine configurations...") + + self.start_engines() + self.task.taskloop.run() + + self.finished_candidates.sort(key=lambda candidate: candidate.total_adjusted_time) + + if self.failed_candidates: + self.log("failed engines:") + for candidate in self.failed_candidates: + self.log( + f" engine_{candidate.engine_idx}: {' '.join(candidate.engine)}" + f" (returncode={candidate.engine_retcode} status={candidate.engine_status})" + ) + + if self.finished_candidates: + self.log("finished engines:") + for place, candidate in list(enumerate(self.finished_candidates, 1))[::-1]: + self.log( + f" #{place}: engine_{candidate.engine_idx}: {' '.join(candidate.engine)}" + f" ({candidate.total_adjusted_time} seconds, status={candidate.engine_status})" + ) + + if self.finished_candidates: + self.task.status = "AUTOTUNED" + self.task.retcode = 0 + elif self.failed_candidates: + self.task.status = "FAIL" + self.task.retcode = 2 + + def next_candidate(self, peek=False): + # peek=True is used to check whether we need to timeout running candidates to + # give other candidates a chance. + can_retry = None + + for candidate in self.active_candidates: + if candidate.state == "pending": + if not candidate.suspended: + return candidate + if can_retry is None or can_retry.suspended > candidate.suspended: + can_retry = candidate + + if can_retry and not peek: + shift = can_retry.suspended + for candidate in self.active_candidates: + if candidate.state == "pending": + candidate.suspended -= shift + + return can_retry + + def start_engines(self): + self.task.taskloop.poll_now = True + + while self.threads_running < self.config.parallel: + candidate = self.next_candidate() + if candidate is None: + self.have_pending_candidates = False + return + + candidate_threads = candidate.threads() + if self.threads_running: + if self.threads_running + candidate_threads > self.config.parallel: + break + + candidate.set_running() + candidate.set_engine_idx(self.next_engine_idx) + self.next_engine_idx += 1 + + try: + engine_task = SbyAutotuneTask(self, candidate) + pending = sum(c.state == "pending" for c in self.active_candidates) + self.log(f"{candidate.info} starting... ({pending} configurations pending)") + self.threads_running += candidate_threads + engine_task.setup_procs(False) + except SbyAbort: + pass + + self.have_pending_candidates = bool(self.next_candidate(peek=True)) + + def engine_finished(self, engine_task): + self.threads_running -= engine_task.candidate.threads() + + candidate = engine_task.candidate + + time = candidate.total_adjusted_time + + if engine_task.status == "TIMEOUT": + if self.timeout is None or time < self.timeout: + candidate.retry_later() + self.log(f"{candidate.info} timeout ({time} seconds, will be retried if necessary)") + else: + candidate.timed_out() + self.log(f"{candidate.info} timeout ({time} seconds)") + elif engine_task.retcode: + candidate.failed() + self.log(f"{candidate.info} failed (returncode={candidate.engine_retcode} status={candidate.engine_status})") + else: + candidate.finished() + + self.log(f"{candidate.info} succeeded (status={candidate.engine_status})") + + if self.best_time is None: + self.log(f"{candidate.info} took {time} seconds (first engine to finish)") + self.best_time = time + elif time < self.best_time: + self.log(f"{candidate.info} took {time} seconds (best candidate, previous best: {self.best_time} seconds)") + self.best_time = time + else: + self.log(f"{candidate.info} took {time} seconds") + + new_timeout = int(time + self.config.wait_seconds + time * self.config.wait_percentage // 100) + + if self.timeout is None or new_timeout < self.timeout: + self.timeout = new_timeout + + self.start_engines() + + def model(self, engine_task, name): + if self.task not in self.task.taskloop.tasks: + self.task.taskloop.tasks.append(self.task) + if name in self.model_requests: + request = self.model_requests[name] + else: + self.model_requests[name] = request = SbyModelRequest(self, name) + + request.attach_engine_task(engine_task) + + return request.procs + + def proc_failed(self, proc): + for name, request in self.model_requests.items(): + if proc in request.procs: + for task in request.engine_tasks: + task = task or self.task + task.status = "ERROR" + task.log(f"could not prepare model '{name}', see toplevel logfile") + task.terminate() + pass + + +class SbyModelRequest: + """Handles sharing and canceling of model generation from several SbyAutotuneTask + instances. + """ + def __init__(self, autotune, name): + self.autotune = autotune + self.name = name + self.engine_tasks = [] + + autotune.log(f"model '{name}': preparing now...") + + self.make_model() + + def make_model(self): + self.start_time = monotonic() + self.total_time = None + self.min_time = 0 + + self.procs = self.autotune.task.model(self.name) + for proc in self.procs: + proc.register_dep(self) + + def attach_engine_task(self, engine_task): + if self.total_time is None: + if engine_task: + if self.start_time is None: + model_time = 0 + extra_time = self.min_time + else: + model_time = monotonic() - self.start_time + extra_time = max(0, self.min_time - model_time) + + engine_task.model_time += model_time + + engine_task.check_timeout(extra_time) + + if self.start_time is None: + self.make_model() + + self.engine_tasks.append(engine_task) + if engine_task: + engine_task.model_requests.append(self) + + else: + if engine_task: + engine_task.model_time += self.total_time + + def detach_engine_task(self, engine_task): + self.engine_tasks.remove(engine_task) + if not self.engine_tasks and self.total_time is None: + self.autotune.log(f"cancelled model '{self.name}'") + del self.autotune.task.models[self.name] + for proc in self.procs: + proc.terminate(True) + + self.min_time = max(self.min_time, monotonic() - self.start_time) + self.start_time = None + + self.procs = [] + + def poll(self): + if self.total_time is None and all(proc.finished for proc in self.procs): + self.autotune.log(f"prepared model '{self.name}'") + + self.total_time = self.min_time = monotonic() - self.start_time + + +class SbyAutotuneTask(SbyTask): + """Task that shares the workdir with a parent task, runs in parallel to other + autotune tasks and can be cancelled independent from other autotune tasks while + sharing model generation with other tasks. + """ + def __init__(self, autotune, candidate): + task = autotune.task + self.autotune = autotune + self.candidate = candidate + super().__init__( + sbyconfig=None, + workdir=task.workdir, + early_logs=[], + reusedir=True, + taskloop=task.taskloop, + logfile=open(f"{task.workdir}/engine_{candidate.engine_idx}_autotune.txt", "a"), + ) + self.task_local_abort = True + self.log_targets = [self.logfile] + self.exe_paths = autotune.task.exe_paths + self.reusedir = False + self.design = autotune.task.design + + self.model_time = 0 + self.model_requests = [] + + + def parse_config(self, f): + super().parse_config(f) + self.engines = [] + + def engine_list(self): + return [(self.candidate.engine_idx, self.candidate.engine)] + + def copy_src(self): + pass + + def model(self, model_name): + self.log(f"using model '{model_name}'") + return self.autotune.model(self, model_name) + + def exit_callback(self): + super().exit_callback() + + self.candidate.total_adjusted_time = int(monotonic() - self.start_clock_time + self.model_time) + self.candidate.engine_retcode = self.retcode + self.candidate.engine_status = self.status + + self.autotune.engine_finished(self) + for request in self.model_requests: + request.detach_engine_task(self) + + def check_timeout(self, extra_time=0): + model_time = self.model_time + extra_time + total_adjusted_time = int(monotonic() - self.start_clock_time + model_time) + + if self.autotune.timeout is not None: + timeout = self.autotune.timeout + else: + if not self.autotune.have_pending_candidates: + return + timeout = self.candidate.soft_timeout + + if not self.timeout_reached and total_adjusted_time >= timeout: + self.log(f"Reached autotune TIMEOUT ({timeout} seconds). Terminating all subprocesses.") + self.status = "TIMEOUT" + self.total_adjusted_time = total_adjusted_time + self.terminate(timeout=True) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index cab8feb4..6be86a0f 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -233,6 +233,7 @@ def __init__(self): self.options = dict() self.engines = list() self.script = list() + self.autotune_config = None self.files = dict() self.verbatim_files = dict() pass @@ -242,7 +243,7 @@ def parse_config(self, f): for line in f: raw_line = line - if mode in ["options", "engines", "files"]: + if mode in ["options", "engines", "files", "autotune"]: line = re.sub(r"\s*(\s#.*)?$", "", line) if line == "" or line[0] == "#": continue @@ -275,6 +276,15 @@ def parse_config(self, f): self.error(f"sby file syntax error: {line}") continue + if entries[0] == "autotune": + mode = "autotune" + if self.autotune_config: + self.error(f"sby file syntax error: {line}") + + import sby_autotune + self.autotune_config = sby_autotune.SbyAutotuneConfig() + continue + if entries[0] == "file": mode = "file" if len(entries) != 2: @@ -300,6 +310,10 @@ def parse_config(self, f): self.options[entries[0]] = entries[1] continue + if mode == "autotune": + self.autotune_config.config_line(self, line) + continue + if mode == "engines": entries = line.split() self.engines.append(entries) @@ -376,7 +390,7 @@ def run(self): class SbyTask(SbyConfig): - def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None): + def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logfile=None): super().__init__() self.used_options = set() self.models = dict() @@ -416,7 +430,8 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None): self.summary = list() - self.logfile = open(f"{workdir}/logfile.txt", "a") + self.logfile = logfile or open(f"{workdir}/logfile.txt", "a") + self.log_targets = [sys.stdout, self.logfile] for line in early_logs: print(line, file=self.logfile, flush=True) @@ -426,6 +441,9 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None): for line in sbyconfig: print(line, file=f) + def engine_list(self): + return list(enumerate(self.engines)) + def check_timeout(self): if self.opt_timeout is not None: total_clock_time = int(monotonic() - self.start_clock_time) @@ -457,13 +475,13 @@ def update_proc_canceled(self, proc): def log(self, logmessage): tm = localtime() - print("SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), flush=True) - print("SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), file=self.logfile, flush=True) + line = "SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage) + for target in self.log_targets: + print(line, file=target, flush=True) def error(self, logmessage): tm = localtime() - print("SBY {:2d}:{:02d}:{:02d} [{}] ERROR: {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), flush=True) - print("SBY {:2d}:{:02d}:{:02d} [{}] ERROR: {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage), file=self.logfile, flush=True) + self.log(f"ERROR: {logmessage}") self.status = "ERROR" if "ERROR" not in self.expect: self.retcode = 16 @@ -728,8 +746,9 @@ def run(self, setupmode): self.setup_procs(setupmode) if not setupmode: self.taskloop.run() + self.write_summary_file() - def setup_procs(self, setupmode): + def handle_non_engine_options(self): with open(f"{self.workdir}/config.sby", "r") as f: self.parse_config(f) @@ -747,6 +766,9 @@ def setup_procs(self, setupmode): if s not in ["PASS", "FAIL", "UNKNOWN", "ERROR", "TIMEOUT"]: self.error(f"Invalid expect value: {s}") + if self.opt_mode != "live": + self.handle_int_option("depth", 20) + self.handle_bool_option("multiclock", False) self.handle_bool_option("wait", False) self.handle_int_option("timeout", None) @@ -755,8 +777,10 @@ def setup_procs(self, setupmode): self.handle_int_option("skip", None) self.handle_str_option("tbtop", None) + def setup_procs(self, setupmode): + self.handle_non_engine_options() if self.opt_smtc is not None: - for engine in self.engines: + for engine_idx, engine in self.engine_list(): if engine[0] != "smtbmc": self.error("Option smtc is only valid for smtbmc engine.") @@ -764,11 +788,11 @@ def setup_procs(self, setupmode): if self.opt_skip == 0: self.opt_skip = None else: - for engine in self.engines: + for engine_idx, engine in self.engine_list(): if engine[0] not in ["smtbmc", "btor"]: self.error("Option skip is only valid for smtbmc and btor engines.") - if len(self.engines) == 0: + if len(self.engine_list()) == 0: self.error("Config file is lacking engine configuration.") if self.reusedir: @@ -838,6 +862,7 @@ def summarize(self): if self.status == "TIMEOUT": self.retcode = 8 if self.status == "ERROR": self.retcode = 16 + def write_summary_file(self): with open(f"{self.workdir}/{self.status}", "w") as f: for line in self.summary: print(line, file=f) diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index 85ef882f..78324edb 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -24,8 +24,7 @@ def run(task): task.handle_int_option("append", 0) task.handle_str_option("aigsmt", "yices") - for engine_idx in range(len(task.engines)): - engine = task.engines[engine_idx] + for engine_idx, engine in task.engine_list(): assert len(engine) > 0 task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index 858ab9a7..d7705ee3 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -23,8 +23,7 @@ def run(task): task.handle_int_option("depth", 20) task.handle_int_option("append", 0) - for engine_idx in range(len(task.engines)): - engine = task.engines[engine_idx] + for engine_idx, engine in task.engine_list(): assert len(engine) > 0 task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index a6330537..46b556fc 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -24,8 +24,7 @@ def run(task): task.status = "UNKNOWN" - for engine_idx in range(len(task.engines)): - engine = task.engines[engine_idx] + for engine_idx, engine in task.engine_list(): assert len(engine) > 0 task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index 6b446a8a..3abadf75 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -31,8 +31,7 @@ def run(task): task.basecase_procs = list() task.induction_procs = list() - for engine_idx in range(len(task.engines)): - engine = task.engines[engine_idx] + for engine_idx, engine in task.engine_list(): assert len(engine) > 0 task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") From 48a02f9cc46d7d79c3a4f00ad9a7e9eb292ca061 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 23 Jun 2022 16:37:56 +0200 Subject: [PATCH 093/220] Test autotune --- tests/autotune/Makefile | 2 + tests/autotune/autotune_div.sby | 85 +++++++++++++++++++++++++++++ tests/autotune/autotune_div.sh | 3 + tests/autotune/autotune_options.sby | 50 +++++++++++++++++ tests/autotune/autotune_options.sh | 3 + 5 files changed, 143 insertions(+) create mode 100644 tests/autotune/Makefile create mode 100644 tests/autotune/autotune_div.sby create mode 100644 tests/autotune/autotune_div.sh create mode 100644 tests/autotune/autotune_options.sby create mode 100644 tests/autotune/autotune_options.sh diff --git a/tests/autotune/Makefile b/tests/autotune/Makefile new file mode 100644 index 00000000..44a02a73 --- /dev/null +++ b/tests/autotune/Makefile @@ -0,0 +1,2 @@ +SUBDIR=autotune +include ../make/subdir.mk diff --git a/tests/autotune/autotune_div.sby b/tests/autotune/autotune_div.sby new file mode 100644 index 00000000..863e1607 --- /dev/null +++ b/tests/autotune/autotune_div.sby @@ -0,0 +1,85 @@ +[tasks] +bmc +cover +prove + +[options] +bmc: mode bmc +cover: mode cover +prove: mode prove +expect pass + +[engines] +smtbmc boolector + +[script] +read -sv autotune_div.sv +prep -top top + +[file autotune_div.sv] +module top #( + parameter WIDTH = 4 // Reduce this if it takes too long on CI +) ( + input clk, + input load, + input [WIDTH-1:0] a, + input [WIDTH-1:0] b, + output reg [WIDTH-1:0] q, + output reg [WIDTH-1:0] r, + output reg done +); + + reg [WIDTH-1:0] a_reg = 0; + reg [WIDTH-1:0] b_reg = 1; + + initial begin + q <= 0; + r <= 0; + done <= 1; + end + + reg [WIDTH-1:0] q_step = 1; + reg [WIDTH-1:0] r_step = 1; + + // This is not how you design a good divider circuit! + always @(posedge clk) begin + if (load) begin + a_reg <= a; + b_reg <= b; + q <= 0; + r <= a; + q_step <= 1; + r_step <= b; + done <= b == 0; + end else begin + if (r_step <= r) begin + q <= q + q_step; + r <= r - r_step; + + if (!r_step[WIDTH-1]) begin + r_step <= r_step << 1; + q_step <= q_step << 1; + end + end else begin + if (!q_step[0]) begin + r_step <= r_step >> 1; + q_step <= q_step >> 1; + end else begin + done <= 1; + end + end + end + end + + always @(posedge clk) begin + assert (r_step == b_reg * q_step); // Helper invariant + + assert (q * b_reg + r == a_reg); // Main invariant & correct output relationship + if (done) assert (r <= b_reg - 1); // Output range + + cover (done); + cover (done && b_reg == 0); + cover (r != a_reg); + cover (r == a_reg); + end +endmodule diff --git a/tests/autotune/autotune_div.sh b/tests/autotune/autotune_div.sh new file mode 100644 index 00000000..e22aa5dd --- /dev/null +++ b/tests/autotune/autotune_div.sh @@ -0,0 +1,3 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN --autotune -f $SBY_FILE $TASK diff --git a/tests/autotune/autotune_options.sby b/tests/autotune/autotune_options.sby new file mode 100644 index 00000000..daacb3f6 --- /dev/null +++ b/tests/autotune/autotune_options.sby @@ -0,0 +1,50 @@ +[tasks] +a +b +c +d + +[options] +mode bmc +expect fail + +[engines] +smtbmc boolector + +[script] +read -sv autotune_div.sv +prep -top top + +[autotune] +a: timeout 60 +a: wait 10%+20 +a: parallel 1 +a: presat on +a: incr on +a: mem on +a: forall on + +b: timeout none +b: parallel auto +b: presat off +b: incr off +b: mem auto +b: mem_threshold 20 +b: forall any + +c: presat any +c: incr any +c: mem any +c: forall auto + +d: incr auto +d: incr_threshold 10 + +[file autotune_div.sv] +module top (input clk); + reg [7:0] counter = 0; + always @(posedge clk) begin + counter <= counter + 1; + assert (counter != 4); + end +endmodule diff --git a/tests/autotune/autotune_options.sh b/tests/autotune/autotune_options.sh new file mode 100644 index 00000000..e22aa5dd --- /dev/null +++ b/tests/autotune/autotune_options.sh @@ -0,0 +1,3 @@ +#!/bin/bash +set -e +python3 $SBY_MAIN --autotune -f $SBY_FILE $TASK From d038a7d35c03132e76238a9d6d70d4dd779ec452 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 27 Jun 2022 15:57:22 +0200 Subject: [PATCH 094/220] autotune: Initial documentation --- docs/source/index.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/index.rst b/docs/source/index.rst index 0527fb4e..5dc69305 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -22,6 +22,7 @@ at the moment.) install.rst quickstart.rst reference.rst + autotune.rst verilog.rst verific.rst license.rst From 907db48ac90e8118343cd8085bf0e3af998c15a1 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Thu, 30 Jun 2022 12:06:12 +1200 Subject: [PATCH 095/220] Updating from feedback Primarily addressing Nak's comments on the PR first. Of note is the change from separate files to a single file. Changed to boolector engine and bmc by default. Updated install instructions to move z3 to optional and boolector to recommended. Literal code includes use :lines: option. --- docs/examples/fifo/fifo.sby | 23 ++--- docs/examples/fifo/fifo.sv | 188 +++++++++++++++++++++++++++++++----- docs/examples/fifo/top.sv | 167 -------------------------------- docs/source/install.rst | 58 +++++------ docs/source/newstart.rst | 119 ++++++++++++----------- 5 files changed, 269 insertions(+), 286 deletions(-) delete mode 100644 docs/examples/fifo/top.sv diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby index 62e9d858..22ebcddf 100644 --- a/docs/examples/fifo/fifo.sby +++ b/docs/examples/fifo/fifo.sby @@ -1,10 +1,8 @@ [tasks] -prove_oss prove oss -noskip prove oss -cover_oss cover oss -prove_tabby prove tabby -cover_tabby cover tabby -prove_oss cover_oss : default +basic bmc +nofullskip prove +cover +basic cover : default [options] cover: @@ -13,18 +11,17 @@ mode cover prove: mode prove -- +bmc: +mode bmc +-- [engines] -cover: smtbmc z3 -prove: abc pdr +smtbmc boolector [script] -read -sv fifo.sv -tabby: read -define USE_VERIFIC=1 -noskip: read -define NOSKIP=1 -read -formal top.sv +nofullskip: read -define NO_FULL_SKIP=1 +read -formal fifo.sv prep -top fifo [files] fifo.sv -top.sv diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 5e7e6c8e..9d32a432 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -1,23 +1,3 @@ -// Define the fifo storage -module storage ( - input wen, ren, clk, rst_n, - input [3:0] waddr, raddr, - input [7:0] wdata, - output [7:0] rdata -); - parameter MAX_DATA = 16; - - // 8 bit data, fifo depth 16 / 4 bit address - // reset not defined - reg [7:0] data [MAX_DATA-1:0]; - always @(posedge clk) begin - if (wen) - data[waddr] <= wdata; - end - - assign rdata = data[raddr]; -endmodule - // address generator/counter module addr_gen ( input en, clk, rst_n, @@ -39,7 +19,171 @@ module addr_gen ( addr <= 0; else addr <= addr + 1; - else - addr <= addr; end endmodule + +// Define our top level fifo entity +module fifo ( + input wen, ren, clk, rst_n, + input [7:0] wdata, + output [7:0] rdata, + output [4:0] count, + output full, empty +); + parameter MAX_DATA = 16; + + // wire up our sub modules + wire [3:0] waddr, raddr; + wire wskip, rskip; + + // fifo storage + // 8 bit data, fifo depth 16 / 4 bit address + // reset not defined + reg [7:0] data [MAX_DATA-1:0]; + always @(posedge clk) begin + if (wen) + data[waddr] <= wdata; + end + assign rdata = data[raddr]; + + addr_gen #(.MAX_DATA(MAX_DATA)) fifo_writer ( + .en (wen || wskip), + .clk (clk ), + .rst_n (rst_n), + .addr (waddr) + ); + + addr_gen #(.MAX_DATA(MAX_DATA)) fifo_reader ( + .en (ren || rskip), + .clk (clk ), + .rst_n (rst_n), + .addr (raddr) + ); + + // internals + reg [4:0] data_count; + initial begin + data_count <= 0; + end + + always @(posedge clk or negedge rst_n) begin + if (~rst_n) + data_count <= 0; + else if (wen && !ren && data_count < MAX_DATA) + data_count <= data_count + 1; + else if (ren && !wen && data_count > 0) + data_count <= data_count - 1; + end + + assign full = data_count == MAX_DATA; + assign empty = (data_count == 0) && rst_n; + assign count = data_count; + +`ifndef NO_FULL_SKIP + // write while full => overwrite oldest data, move read pointer + assign rskip = wen && !ren && data_count >= MAX_DATA; + // read while empty => read invalid data, keep write pointer in sync + assign wskip = ren && !wen && data_count == 0; +`else + assign rskip = 0; + assign wskip = 0; +`endif // NO_FULL_SKIP + +`ifdef FORMAL + // observers + wire [4:0] addr_diff; + assign addr_diff = waddr >= raddr + ? waddr - raddr + : waddr + MAX_DATA - raddr; + + reg init = 0; + always @(posedge clk) begin + if (rst_n) + init <= 1; + // if init is low we don't care about the value of rst_n + // if init is high (rst_n has ben high), then rst_n must remain high + assume (!init || init && rst_n); + end + + // tests + always @(posedge clk) begin + if (rst_n) begin + // waddr and raddr can only be non zero if reset is high + w_nreset: cover (waddr || raddr); + + // count never less than zero, or more than max + a_uflow: assert (count >= 0); + a_uflow2: assert (raddr >= 0); + a_oflow: assert (count <= MAX_DATA); + a_oflow2: assert (waddr < MAX_DATA); + + // count should be equal to the difference between writer and reader address + a_count_diff: assert (count == addr_diff + || count == MAX_DATA && addr_diff == 0); + + // count should only be able to increase or decrease by 1 + a_counts: assert (count == 0 + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); + + // read/write addresses can only increase (or stay the same) + a_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + a_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); + + // full and empty work as expected + a_full: assert (!full || full && count == MAX_DATA); + w_full: cover (wen && !ren && count == MAX_DATA-1); + a_empty: assert (!empty || empty && count == 0); + w_empty: cover (ren && !wen && count == 1); + + // can we corrupt our data? + w_overfill: cover ($past(rskip) && raddr); + w_underfill: cover ($past(wskip) && waddr); + end else begin + // waddr and raddr are zero while reset is low + a_reset: assert (!waddr && !raddr); + w_reset: cover (~rst_n); + + // outputs are zero while reset is low + a_zero_out: assert (!empty && !full && !count); + end + end + +`ifdef VERIFIC + // if we have verific we can also do the following additional tests + always @(posedge clk) begin + if (rst_n) begin + // read/write enables enable + ap_raddr2: assert property (ren |=> $changed(raddr)); + ap_waddr2: assert property (wen |=> $changed(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); + ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); + + // can we corrupt our data? + ap_overfill: assert property (wen && full |=> $changed(raddr)); + ap_underfill: assert property (ren && empty |=> $changed(waddr)); + + // change data when writing (and only when writing) so we can line + // up reads with writes + assume property (wen |=> $changed(wdata)); + assume property (!wen |=> $stable(wdata)); + end + end +`else // !VERIFIC + // without verific we are more limited in describing the above assumption + always @(posedge clk) begin + assume ((wen && wdata != $past(wdata)) + || (!wen && wdata == $past(wdata))); + end +`endif // VERIFIC + +`endif // FORMAL + +endmodule diff --git a/docs/examples/fifo/top.sv b/docs/examples/fifo/top.sv deleted file mode 100644 index f5eec519..00000000 --- a/docs/examples/fifo/top.sv +++ /dev/null @@ -1,167 +0,0 @@ -// Define our top level fifo entity -module fifo ( - input wen, ren, clk, rst_n, - input [7:0] wdata, - output [7:0] rdata, - output [4:0] count, - output full, empty -); - parameter MAX_DATA = 16; - - // internals - reg [4:0] data_count; - initial begin - data_count <= 0; - end - - // wire up our sub modules - wire [3:0] waddr, raddr; - wire wskip, rskip; - storage #(.MAX_DATA(MAX_DATA)) fifo_storage ( - .wen (wen ), - .ren (ren ), - .clk (clk ), - .rst_n (rst_n), - .waddr (waddr), - .raddr (raddr), - .wdata (wdata), - .rdata (rdata) - ); - - addr_gen #(.MAX_DATA(MAX_DATA)) fifo_writer ( - .en (wen || wskip), - .clk (clk ), - .rst_n (rst_n), - .addr (waddr) - ); - - addr_gen #(.MAX_DATA(MAX_DATA)) fifo_reader ( - .en (ren || rskip), - .clk (clk ), - .rst_n (rst_n), - .addr (raddr) - ); - - always @(posedge clk or negedge rst_n) begin - if (~rst_n) - data_count <= 0; - else if (wen && !ren && data_count < MAX_DATA) - data_count <= data_count + 1; - else if (ren && !wen && data_count > 0) - data_count <= data_count - 1; - else - data_count <= data_count; - end - - assign full = data_count == MAX_DATA; - assign empty = (data_count == 0) && rst_n; - assign count = data_count; - -`ifndef NOSKIP - // write while full => overwrite oldest data, move read pointer - assign rskip = wen && !ren && data_count >= MAX_DATA; - // read while empty => read invalid data, keep write pointer in sync - assign wskip = ren && !wen && data_count == 0; -`else - assign rskip = 0; - assign wskip = 0; -`endif // NOSKIP - -`ifdef FORMAL - // observers - wire [4:0] addr_diff; - assign addr_diff = waddr >= raddr - ? waddr - raddr - : waddr + MAX_DATA - raddr; - - reg init = 0; - always @(posedge clk) begin - if (rst_n) - init <= 1; - // if init is low we don't care about the value of rst_n - // if init is high (rst_n has ben high), then rst_n must remain high - assume (!init || init && rst_n); - end - - // tests - always @(posedge clk) begin - if (rst_n) begin - // waddr and raddr can only be non zero if reset is high - w_nreset: cover (waddr || raddr); - - // count never less than zero, or more than max - a_uflow: assert (count >= 0); - a_uflow2: assert (raddr >= 0); - a_oflow: assert (count <= MAX_DATA); - a_oflow2: assert (waddr < MAX_DATA); - - // count should be equal to the difference between writer and reader address - a_count_diff: assert (count == addr_diff - || count == MAX_DATA && addr_diff == 0); - - // count should only be able to increase or decrease by 1 - a_counts: assert (count == 0 - || count == $past(count) - || count == $past(count) + 1 - || count == $past(count) - 1); - - // read/write addresses can only increase (or stay the same) - a_raddr: assert (raddr == 0 - || raddr == $past(raddr) - || raddr == $past(raddr + 1)); - a_waddr: assert (waddr == 0 - || waddr == $past(waddr) - || waddr == $past(waddr + 1)); - - // full and empty work as expected - a_full: assert (!full || full && count == MAX_DATA); - w_full: cover (wen && !ren && count == MAX_DATA-1); - a_empty: assert (!empty || empty && count == 0); - w_empty: cover (ren && !wen && count == 1); - - // can we corrupt our data? - w_overfill: cover ($past(rskip) && raddr); - w_underfill: cover ($past(wskip) && waddr); - end else begin - // waddr and raddr are zero while reset is low - a_reset: assert (!waddr && !raddr); - w_reset: cover (~rst_n); - - // outputs are zero while reset is low - a_zero_out: assert (!empty && !full && !count); - end - end - -`ifdef USE_VERIFIC - // if we have verific we can also do the following additional tests - always @(posedge clk) begin - if (rst_n) begin - // read/write enables enable - ap_raddr2: assert property (ren |=> $changed(raddr)); - ap_waddr2: assert property (wen |=> $changed(waddr)); - - // read/write needs enable UNLESS full/empty - ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); - ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); - - // can we corrupt our data? - ap_overfill: assert property (wen && full |=> $changed(raddr)); - ap_underfill: assert property (ren && empty |=> $changed(waddr)); - - // change data when writing (and only when writing) so we can line - // up reads with writes - assume property (wen |=> $changed(wdata)); - assume property (!wen |=> $stable(wdata)); - end - end -`else // !USE_VERIFIC - // without verific we are more limited in describing the above assumption - always @(posedge clk) begin - assume ((wen && wdata != $past(wdata)) - || (!wen && wdata == $past(wdata))); - end -`endif // USE_VERIFIC - -`endif // FORMAL - -endmodule diff --git a/docs/source/install.rst b/docs/source/install.rst index 4110d630..a232c866 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -3,12 +3,12 @@ Installation guide ================== -This document will guide you through the process of installing SymbiYosys. +This document will guide you through the process of installing sby. CAD suite(s) ************ -SymbiYosys (sby) is part of the `Tabby CAD Suite +Sby (SymbiYosys) is part of the `Tabby CAD Suite `_ and the `OSS CAD Suite `_! The easiest way to use sby is to install the binary software suite, which contains all required @@ -32,9 +32,9 @@ CAD Suite, please visit https://www.yosyshq.com/tabby-cad-datasheet. Installing from source ********************** -Follow the instructions below to install SymbiYosys and its dependencies. -Yosys, SymbiYosys, and Z3 are non-optional. The other packages are only -required for some engine configurations. +Follow the instructions below to install sby and its dependencies. Yosys and sby +are non-optional. Boolector is recommended to install but not required. The +other packages are only required for some engine configurations. Prerequisites ------------- @@ -59,7 +59,7 @@ https://yosyshq.net/yosys/ https://people.eecs.berkeley.edu/~alanmi/abc/ -Next install Yosys, Yosys-SMTBMC and ABC (``yosys-abc``): +Note that this will install Yosys, Yosys-SMTBMC and ABC (as ``yosys-abc``): .. code-block:: text @@ -68,8 +68,8 @@ Next install Yosys, Yosys-SMTBMC and ABC (``yosys-abc``): make -j$(nproc) sudo make install -SymbiYosys -^^^^^^^^^^ +sby +^^^ https://github.com/YosysHQ/sby @@ -79,25 +79,38 @@ https://github.com/YosysHQ/sby cd sby sudo make install -Z3 -^^ +Recommended components +---------------------- -https://github.com/Z3Prover/z3/wiki +Boolector +^^^^^^^^^ -.. code-block:: text +https://boolector.github.io - git clone https://github.com/Z3Prover/z3 - cd z3 - python scripts/mk_make.py - cd build - make -j$(nproc) - sudo make install +.. code-block:: text + git clone https://github.com/boolector/boolector + cd boolector + ./contrib/setup-btor2tools.sh + ./contrib/setup-lingeling.sh + ./configure.sh + make -C build -j$(nproc) + sudo cp build/bin/{boolector,btor*} /usr/local/bin/ + sudo cp deps/btor2tools/bin/btorsim /usr/local/bin/ + +To use the ``btor`` engine you will need to install btor2tools from +`commit c35cf1c `_ or +newer. Optional components ------------------- Additional solver engines can be installed as per their instructions, links are provided below. +Z3 +^^^ + + https://github.com/Z3Prover/z3 + Yices 2 ^^^^^^^ http://yices.csl.sri.com/ @@ -111,12 +124,3 @@ super_prove Avy ^^^ https://arieg.bitbucket.io/avy/ - -Boolector -^^^^^^^^^ - http://fmv.jku.at/boolector/ - - https://github.com/boolector/boolector - - To use the ``btor`` engine you additionally need a newer version of btorsim - than the boolector setup script builds: https://github.com/boolector/btor2tools diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index eac65a85..ebc443f0 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -2,8 +2,9 @@ Getting started =============== -.. note:: This tutorial assumes sby installation as per the :ref:`install-doc`. - It is also recommended to install +.. note:: + This tutorial assumes sby and boolector installation as per the + :ref:`install-doc`. For this tutorial, it is also recommended to install `GTKWave `_, an open source VCD viewer. First In, First Out (FIFO) buffer @@ -21,33 +22,40 @@ a FIFO is they arrive at the queue's tail. In hardware we can create such a construct by providing two addresses into a -register file. See the Verilog code below for the two main modules of an -example implementation. +register file. This tutorial will use an example implementation provided in +`fifo.sv`. + +First, the address generator module: + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :lines: 1-23 + +This module is instantiated twice; once for the write address and once for the +read address. In both cases, the address will start at and reset to 0, and will +increment by 1 when an enable signal is received. When the address pointers +increment from the maximum storage value they reset back to 0, providing a +circular queue. + +Next, the register file: .. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog + :lines: 39-47 Notice that this register design includes a synchronous write and asynchronous -read. Each word is 8 bits, and up to 16 words can be stored in the buffer. The -address generator module will be instantiated twice; once for the write address -and once for the read address. In both cases, the address will start at and -reset to 0, and will increment by 1 when an enable signal is received. When the -address pointers increment from the maximum storage value they reset back to 0, -providing a circular queue. The top level design implemented, can be found in -``top.sv``. +read. Each word is 8 bits, and up to 16 words can be stored in the buffer. Verification properties *********************** In order to verify our design we must first define properties that it must -satisfy. For example, there must never be a negative number of values in the -FIFO. Similarly, there must never be more than there is memory available. By -assigning a signal to count the number of values in the buffer, we can make the -following assertions in the code: +satisfy. For example, there must never be more than there is memory available. +By assigning a signal to count the number of values in the buffer, we can make +the following assertion in the code: .. code-block:: systemverilog - a_uflow: assert (count >= 0); a_oflow: assert (count <= MAX_DATA); It is also possible to use the prior value of a signal for comparison. This can @@ -86,36 +94,30 @@ SymbiYosys SymbiYosys (sby) uses a .sby file to define a set of tasks used for verification. -**prove_oss** - Prove mode (unbounded model check), for use with OSS CAD Suite. - -**noskip** - Demonstration of failing model check with OSS CAD Suite. - -**cover_oss** - Cover mode (testing cover statements), for use with OSS CAD Suite. +**basic** + Bounded model check of design. -**prove_tabby** - Prove mode, for use with Tabby CAD Suite. +**nofullskip** + Demonstration of failing model using an unbounded model check. -**cover_tabby** - Cover mode, for use with Tabby CAD Suite. +**cover** + Cover mode (testing cover statements). -The use of the ``:default`` tag indicates that by default, prove_oss and -cover_oss should be run if no tasks are specified, such as when running the -command below. +The use of the ``:default`` tag indicates that by default, basic and cover +should be run if no tasks are specified, such as when running the command below. sby fifo.sby -.. note:: The default set of tests should all pass. If this is not the case - there may be a problem with the installation of sby or one of its solvers. +.. note:: + The default set of tests should all pass. If this is not the case there may + be a problem with the installation of sby or one of its solvers. To see what happens when a test fails, the below command can be used. Note the use of the ``-f`` flag to automatically overwrite existing task output. While this may not be necessary on the first run, it is quite useful when making adjustments to code and rerunning tests to validate. - sby -f fifo.sby noskip + sby -f fifo.sby nofullskip The noskip task disables the code shown below. Because the count signal has been written such that it cannot exceed MAX_DATA, removing this code will lead @@ -125,36 +127,38 @@ overflow occur and the oldest data be written. .. code-block:: systemverilog - `ifndef NOSKIP + `ifndef NO_FULL_SKIP // write while full => overwrite oldest data, move read pointer assign rskip = wen && !ren && data_count >= MAX_DATA; // read while empty => read invalid data, keep write pointer in sync assign wskip = ren && !wen && data_count == 0; - `endif // NOSKIP + `endif // NO_FULL_SKIP The last few lines of output for the noskip task should be similar to the following: .. code-block:: text - SBY [fifo_noskip] engine_0: ## 0:00:00 BMC failed! - SBY [fifo_noskip] engine_0: ## 0:00:00 Assert failed in fifo: a_count_diff - SBY [fifo_noskip] engine_0: ## 0:00:00 Writing trace to VCD file: engine_0/trace.vcd - SBY [fifo_noskip] engine_0: ## 0:00:00 Writing trace to Verilog testbench: engine_0/trace_tb.v - SBY [fifo_noskip] engine_0: ## 0:00:00 Writing trace to constraints file: engine_0/trace.smtc - SBY [fifo_noskip] engine_0: ## 0:00:00 Status: FAILED - SBY [fifo_noskip] engine_0: finished (returncode=1) - SBY [fifo_noskip] summary: Elapsed clock time [H:MM:SS (secs)]: 0:00:01 (1) - SBY [fifo_noskip] summary: Elapsed process time unvailable on Windows - SBY [fifo_noskip] summary: engine_0 (abc pdr) returned FAIL - SBY [fifo_noskip] summary: counterexample trace: fifo_noskip/engine_0/trace.vcd - SBY [fifo_noskip] DONE (FAIL, rc=2) + SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Assert failed in fifo: a_count_diff + SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Assert failed in fifo: ap_underfill + SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Writing trace to VCD file: engine_0/trace.vcd + SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Writing trace to Verilog testbench: engine_0/trace_tb.v + SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Writing trace to constraints file: engine_0/trace.smtc + SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Status: failed + SBY [fifo_nofullskip] engine_0.basecase: finished (returncode=1) + SBY [fifo_nofullskip] engine_0: Status returned by engine for basecase: FAIL + SBY [fifo_nofullskip] engine_0.induction: terminating process + SBY [fifo_nofullskip] summary: Elapsed clock time [H:MM:SS (secs)]: 0:00:02 (2) + SBY [fifo_nofullskip] summary: Elapsed process time unvailable on Windows + SBY [fifo_nofullskip] summary: engine_0 (smtbmc boolector) returned FAIL for basecase + SBY [fifo_nofullskip] summary: counterexample trace: fifo_nofullskip/engine_0/trace.vcd + SBY [fifo_nofullskip] DONE (FAIL, rc=2) SBY The following tasks failed: ['noskip'] Using the ``noskip.gtkw`` file provided, use the below command to examine the error trace. - gtkwave fifo_noskip/engine_0/trace.vcd noskip.gtkw + gtkwave fifo_nofullskip/engine_0/trace.vcd noskip.gtkw This should result in something similar to the below image. We can immediately see that ``data_count`` and ``addr_diff`` are different. Looking a bit deeper @@ -166,26 +170,26 @@ to a higher value than the write address. .. image:: media/gtkwave_noskip.png During correct operation, the ``w_underfill`` witness will cover the underflow -case. Examining ``fifo_cover_oss/logfile.txt`` will reveal which trace file +case. Examining ``fifo_cover/logfile.txt`` will reveal which trace file includes the witness we are looking for. If this file doesn't exist, run the code below. - sby fifo.sby fifo_cover_oss + sby fifo.sby cover Searching the file for ``w_underfill`` will reveal the below. .. code-block:: text - $ grep "w_underfill" fifo_cover_oss/logfile.txt -A 1 - SBY [fifo_cover_oss] engine_0: ## 0:00:00 Reached cover statement at w_underfill in step 2. - SBY [fifo_cover_oss] engine_0: ## 0:00:00 Writing trace to VCD file: engine_0/trace2.vcd + $ grep "w_underfill" fifo_cover/logfile.txt -A 1 + SBY [fifo_cover] engine_0: ## 0:00:00 Reached cover statement at w_underfill in step 2. + SBY [fifo_cover] engine_0: ## 0:00:00 Writing trace to VCD file: engine_0/trace2.vcd We can then run gtkwave with the trace file indicated to see the correct operation as in the image below. When the buffer is empty, a read with no write will result in the ``wksip`` signal going high, incrementing *both* read and write addresses and avoiding underflow. - gtkwave fifo_cover_oss/engine_0/trace2.vcd noskip.gtkw + gtkwave fifo_cover/engine_0/trace2.vcd noskip.gtkw .. image:: media/gtkwave_coverskip.png @@ -198,8 +202,9 @@ Until this point, all of the properties described have been *immediate* assertions. As the name suggests, immediate assertions are evaluated immediately whereas concurrent assertions allow for the capture of sequences of events which occur across time. The use of concurrent assertions requires a -more advanced parser, such as Verific. Verific is included for use in the -*Tabby CAD Suite*. +more advanced series of checks. Using a parser such as Verific supports these +checks *without* having to write out potentially complicated state machines. +Verific is included for use in the *Tabby CAD Suite*. With concurrent assertions we are able to verify more fully that our enables and status flags work as desired. For example, we can assert that if the read From 685457915ad80d4828a595e535ea12942016b3f2 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 30 Jun 2022 17:50:05 +0200 Subject: [PATCH 096/220] docs: add missing autotune.rst --- docs/source/autotune.rst | 181 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 181 insertions(+) create mode 100644 docs/source/autotune.rst diff --git a/docs/source/autotune.rst b/docs/source/autotune.rst new file mode 100644 index 00000000..6fab860b --- /dev/null +++ b/docs/source/autotune.rst @@ -0,0 +1,181 @@ +Autotune: Automatic Engine Selection +==================================== + +Selecting the best performing engine for a given verification task often +requires some amount of trial and error. To reduce the manual work required for +this, sby offers the ``--autotune`` option which takes an ``.sby`` file and +runs it using engines and engine configurations. At the end it produces a +report listing the fastest engines among these candidates. + +Using Autotune +-------------- + +To run autotune, you can add the ``--autotune`` option to your usual sby +invokation. For example if you usually run ``sby demo.sby`` you would run ``sby +--autotune demo.sby`` instead. When the ``.sby`` file contains multiple tasks, +autotune is run for each task independently. As without ``--autotune``, it is +possible to specify which tasks to run on the command line. + +Autotune runs without requiring further interaction and will eventually print a +list of engine configurations and their respective solving times. To +permanently use an engine configuration you can copy if from the ``sby +--autotune`` output into the ``[engines]`` section of your ``.sby`` file. + +Autotune Log Output +------------------- + +The log output in ``--autotune`` mode differs from the usual sby log output. + +It also starts with preparing the design (this includes running the user +provided ``[script]``) so it can be passed to the solvers. This is only done +once and will be reused to run every candidate engine. + +.. code-block:: text + + SBY [demo] model 'base': preparing now... + SBY [demo] base: starting process "cd demo/src; yosys -ql ../model/design.log ../model/design.ys" + SBY [demo] base: finished (returncode=0) + SBY [demo] prepared model 'base' + +This is followed by selecting the engine candidates to run. For this the design +and sby configuration are analyzed to skip engines that are not compatible or +unlikely to work well. When engines is skipped due to a recommendation, a +corresponding log message is displayed as well as the total number of +candidates to try: + +.. code-block:: text + + SBY [demo] checking more than 20 timesteps (100), disabling nonincremental smtbmc + SBY [demo] testing 16 engine configurations... + +After this, the candidate engine configurations are started. Depending on the +configuration engines can run in parallel. The engine output itself is not +logged to stdout when running autotune, so you will only see messages about +starting an engine: + +.. code-block:: text + + SBY [demo] engine_1 (smtbmc --nopresat boolector): starting... (14 configurations pending) + SBY [demo] engine_2 (smtbmc bitwuzla): starting... (13 configurations pending) + SBY [demo] engine_3 (smtbmc --nopresat bitwuzla): starting... (12 configurations pending) + ... + +The engine log that would normally be printed is instead redirected to files +named ``engine_*_autotune.txt`` within sby's working directory. + +To run an engine, further preparation steps may be necessary. These are cached +and will be reused for every engine requiring them, while properly accounting +for the required prepration time. Below is an example of the log output +produced by such a preparation step. Note that this runs in parallel, so it may +be interspersed with other log output. + +.. code-block:: text + + SBY [demo] model 'smt2': preparing now... + SBY [demo] smt2: starting process "cd demo/model; yosys -ql design_smt2.log design_smt2.ys" + ... + SBY [demo] smt2: finished (returncode=0) + SBY [demo] prepared model 'smt2' + +Whenever an engine finishes a log message is printed: + +.. code-block:: text + + SBY [demo] engine_4 (smtbmc --unroll yices): succeeded (status=PASS) + SBY [demo] engine_4 (smtbmc --unroll yices): took 30 seconds (first engine to finish) + +When an engine takes longer than the current hard timeout, it is stopped: + +.. code-block:: text + + SBY [demo] engine_2 (smtbmc bitwuzla): timeout (150 seconds) + +Depending on the configuration, autotune will also stop an engine earlier when +reaching a soft timeout. In that case, when no other engine finishes in less +time, the engine will be retried later with a longer soft timeout: + +.. code-block:: text + + SBY [demo] engine_0 (smtbmc boolector): timeout (60 seconds, will be retried if necessary) + + +Finally at the end a summary of all finished engines is printed, sorted by +their solving time: + +.. code-block:: text + + SBY [demo] finished engines: + SBY [demo] #3: engine_1: smtbmc --nopresat boolector (52 seconds, status=PASS) + SBY [demo] #2: engine_5: smtbmc --nopresat --unroll yices (41 seconds, status=PASS) + SBY [demo] #1: engine_4: smtbmc --unroll yices (30 seconds, status=PASS) + SBY [demo] DONE (AUTOTUNED, rc=0) + +If any tried engine encounters an error or produces an unexpected result, +autotune will also output a list of failed engines. Note that when the sby file +does not contain the ``expect`` option, autotune defaults to ``expect +pass,fail`` to simplify running autotune on a verification task with a +currently unknown outcome. + +Configuring Autotune +-------------------- + +Autotune can be configured by adding an ``[autotune]`` section to the ``.sby`` +file. Each line in that section has the form ``option_name value``, the +possible options and their supported values are described below. In addition +the ``--autotune-config`` command line option can be used to specify a file +containing further autotune options, using the same syntax. When both are used, +the command line option takes precedence. This makes it easy to run autotune +with existing ``.sby`` files without having to modify them. + +Autotune Options +---------------- + ++--------------------+------------------------------------------------------+ +| Autotune Option | Description | ++====================+======================================================+ +| ``timeout`` | Set a different timeout value (in seconds) used only | +| | for autotune. The value ``none`` can be used to | +| | disable the timeout. Default: uses the non-autotune | +| | timeout option. | ++--------------------+------------------------------------------------------+ +| ``soft_timeout`` | Initial timeout value (in seconds) used to interrupt | +| | a candidate engine when other candidates are | +| | pending. Increased every time a candidate is retried | +| | to ensure progress. Default: ``60`` | ++--------------------+------------------------------------------------------+ +| ``wait`` | Additional time to wait past the time taken by the | +| | fastest finished engine candidate so far. Can be an | +| | absolute time in seconds, a percentage of the | +| | fastest candidate or a sum of both. | +| | Default: ``50%+10`` | ++--------------------+------------------------------------------------------+ +| ``parallel`` | Maximal number of engine candidates to try in | +| | parallel. When set to ``auto``, the number of | +| | available CPUs is used. Default: ``auto`` | ++--------------------+------------------------------------------------------+ +| ``presat`` | Filter candidates by whether they perform a presat | +| | check. Values ``on``, ``off``, ``any``. | +| | Default: ``any`` | ++--------------------+------------------------------------------------------+ +| ``incr`` | Filter candidates by whether they use incremental | +| | solving (when applicable). Values ``on``, ``off``, | +| | ``any``, ``auto`` (see next option). | +| | Default: ``auto`` | ++--------------------+------------------------------------------------------+ +| ``incr_threshold`` | Number of timesteps required to only consider | +| | incremental solving when ``incr`` is set to | +| | ``auto``. Default: ``20`` | ++--------------------+------------------------------------------------------+ +| ``mem`` | Filter candidates by whether they have native | +| | support for memory. Values ``on``, ``any``, ``auto`` | +| | (see next option). Default: ``auto`` | ++--------------------+------------------------------------------------------+ +| ``mem_threshold`` | Number of memory bits required to only consider | +| | candidates with native memory support when ``mem`` | +| | is set to ``auto``. Default: ``10240`` | ++--------------------+------------------------------------------------------+ +| ``forall`` | Filter candidates by whether they support | +| | ``$allconst``/``$allseq``. Values ``on``, ``any``, | +| | ``auto`` (``on`` when ``$allconst``/``allseq`` are | +| | found in the design). Default: ``auto`` | ++--------------------+------------------------------------------------------+ From 7ba67ef260a901a5d8c209275d3724251e27fa51 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Fri, 1 Jul 2022 11:15:47 +1200 Subject: [PATCH 097/220] Removing unnecessary underflow assertions --- docs/examples/fifo/fifo.sv | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 9d32a432..1a32187f 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -111,9 +111,7 @@ module fifo ( // waddr and raddr can only be non zero if reset is high w_nreset: cover (waddr || raddr); - // count never less than zero, or more than max - a_uflow: assert (count >= 0); - a_uflow2: assert (raddr >= 0); + // count never more than max a_oflow: assert (count <= MAX_DATA); a_oflow2: assert (waddr < MAX_DATA); From aab2c3c2e0360d316f69aa2c519bf56981703203 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Fri, 1 Jul 2022 11:19:01 +1200 Subject: [PATCH 098/220] New exercise section Worked exercise using the MAX_DATA parameter, highlighting its incompleteness. Includes completed examples in /golden subdirectory. Also some formatting changes for spacing and extra links. --- docs/examples/fifo/golden/fifo.sby | 32 +++++ docs/examples/fifo/golden/fifo.sv | 192 +++++++++++++++++++++++++++++ docs/source/newstart.rst | 66 ++++++++-- 3 files changed, 279 insertions(+), 11 deletions(-) create mode 100644 docs/examples/fifo/golden/fifo.sby create mode 100644 docs/examples/fifo/golden/fifo.sv diff --git a/docs/examples/fifo/golden/fifo.sby b/docs/examples/fifo/golden/fifo.sby new file mode 100644 index 00000000..d94789c3 --- /dev/null +++ b/docs/examples/fifo/golden/fifo.sby @@ -0,0 +1,32 @@ +[tasks] +basic bmc +nofullskip prove +cover +bigtest cover +basic cover : default + +[options] +cover: +mode cover +-- +prove: +mode prove +-- +bmc: +mode bmc +-- +bigtest: depth 120 +~bigtest: depth 10 + +[engines] +smtbmc boolector + +[script] +nofullskip: read -define NO_FULL_SKIP=1 +read -formal fifo.sv +bigtest: hierarchy -check -top fifo -chparam MAX_DATA 100 -chparam ADDR_BITS 7 +~bigtest: hierarchy -check -top fifo -chparam MAX_DATA 5 -chparam ADDR_BITS 3 +prep -top fifo + +[files] +fifo.sv diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv new file mode 100644 index 00000000..014563d1 --- /dev/null +++ b/docs/examples/fifo/golden/fifo.sv @@ -0,0 +1,192 @@ +// address generator/counter +module addr_gen ( + input en, clk, rst_n, + output reg [ADDR_BITS-1:0] addr +); + parameter MAX_DATA = 16; + parameter ADDR_BITS = 5; + + initial begin + addr <= 0; + end + + // async reset + // increment address when enabled + always @(posedge clk or negedge rst_n) begin + if (~rst_n) + addr <= 0; + else if (en) + if (addr == MAX_DATA-1) + addr <= 0; + else + addr <= addr + 1; + end +endmodule + +// Define our top level fifo entity +module fifo ( + input wen, ren, clk, rst_n, + input [7:0] wdata, + output [7:0] rdata, + output [ADDR_BITS:0] count, + output full, empty +); + parameter MAX_DATA = 16; + parameter ADDR_BITS = 5; + + // wire up our sub modules + // ADDR_BITS=5 gives 5 bits of address, [4:0] + // supporting MAX_DATA up to 2**5=32 + wire [ADDR_BITS-1:0] waddr, raddr; + wire wskip, rskip; + + // fifo storage + // reset not defined + reg [7:0] data [MAX_DATA-1:0]; + always @(posedge clk) begin + if (wen) + data[waddr] <= wdata; + end + assign rdata = data[raddr]; + + addr_gen #(.MAX_DATA(MAX_DATA), .ADDR_BITS(ADDR_BITS)) + fifo_writer ( + .en (wen || wskip), + .clk (clk ), + .rst_n (rst_n), + .addr (waddr) + ); + + addr_gen #(.MAX_DATA(MAX_DATA), .ADDR_BITS(ADDR_BITS)) + fifo_reader ( + .en (ren || rskip), + .clk (clk ), + .rst_n (rst_n), + .addr (raddr) + ); + + // internals + reg [ADDR_BITS:0] data_count; + initial begin + data_count <= 0; + end + + always @(posedge clk or negedge rst_n) begin + if (~rst_n) + data_count <= 0; + else if (wen && !ren && data_count < MAX_DATA) + data_count <= data_count + 1; + else if (ren && !wen && data_count > 0) + data_count <= data_count - 1; + end + + assign full = data_count == MAX_DATA; + assign empty = (data_count == 0) && rst_n; + assign count = data_count; + +`ifndef NO_FULL_SKIP + // write while full => overwrite oldest data, move read pointer + assign rskip = wen && !ren && data_count >= MAX_DATA; + // read while empty => read invalid data, keep write pointer in sync + assign wskip = ren && !wen && data_count == 0; +`else + assign rskip = 0; + assign wskip = 0; +`endif // NO_FULL_SKIP + +`ifdef FORMAL + // observers + wire [ADDR_BITS:0] addr_diff; + assign addr_diff = waddr >= raddr + ? waddr - raddr + : waddr + MAX_DATA - raddr; + + reg init = 0; + always @(posedge clk) begin + if (rst_n) + init <= 1; + // if init is low we don't care about the value of rst_n + // if init is high (rst_n has ben high), then rst_n must remain high + assume (!init || init && rst_n); + end + + // tests + always @(posedge clk) begin + if (rst_n) begin + // waddr and raddr can only be non zero if reset is high + w_nreset: cover (waddr || raddr); + + // count never more than max + a_oflow: assert (count <= MAX_DATA); + a_oflow2: assert (waddr < MAX_DATA); + + // count should be equal to the difference between writer and reader address + a_count_diff: assert (count == addr_diff + || count == MAX_DATA && addr_diff == 0); + + // count should only be able to increase or decrease by 1 + a_counts: assert (count == 0 + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); + + // read/write addresses can only increase (or stay the same) + a_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + a_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); + + // full and empty work as expected + a_full: assert (!full || full && count == MAX_DATA); + w_full: cover (wen && !ren && count == MAX_DATA-1); + a_empty: assert (!empty || empty && count == 0); + w_empty: cover (ren && !wen && count == 1); + + // can we corrupt our data? + w_overfill: cover ($past(rskip) && raddr); + w_underfill: cover ($past(wskip) && waddr); + end else begin + // waddr and raddr are zero while reset is low + a_reset: assert (!waddr && !raddr); + w_reset: cover (~rst_n); + + // outputs are zero while reset is low + a_zero_out: assert (!empty && !full && !count); + end + end + +`ifdef VERIFIC + // if we have verific we can also do the following additional tests + always @(posedge clk) begin + if (rst_n) begin + // read/write enables enable + ap_raddr2: assert property (ren |=> $changed(raddr)); + ap_waddr2: assert property (wen |=> $changed(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); + ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); + + // can we corrupt our data? + ap_overfill: assert property (wen && full |=> $changed(raddr)); + ap_underfill: assert property (ren && empty |=> $changed(waddr)); + + // change data when writing (and only when writing) so we can line + // up reads with writes + assume property (wen |=> $changed(wdata)); + assume property (!wen |=> $stable(wdata)); + end + end +`else // !VERIFIC + // without verific we are more limited in describing the above assumption + always @(posedge clk) begin + assume ((wen && wdata != $past(wdata)) + || (!wen && wdata == $past(wdata))); + end +`endif // VERIFIC + +`endif // FORMAL + +endmodule diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index ebc443f0..2828cb67 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -3,9 +3,13 @@ Getting started =============== .. note:: + This tutorial assumes sby and boolector installation as per the :ref:`install-doc`. For this tutorial, it is also recommended to install `GTKWave `_, an open source VCD viewer. + `Source files used in this tutorial + `_ can be + found on the sby git, under ``docs/examples/fifo``. First In, First Out (FIFO) buffer ********************************* @@ -109,6 +113,7 @@ should be run if no tasks are specified, such as when running the command below. sby fifo.sby .. note:: + The default set of tests should all pass. If this is not the case there may be a problem with the installation of sby or one of its solvers. @@ -139,12 +144,12 @@ following: .. code-block:: text - SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Assert failed in fifo: a_count_diff - SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Assert failed in fifo: ap_underfill - SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Writing trace to VCD file: engine_0/trace.vcd - SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Writing trace to Verilog testbench: engine_0/trace_tb.v - SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Writing trace to constraints file: engine_0/trace.smtc - SBY [fifo_nofullskip] engine_0.basecase: ## 0:00:00 Status: failed + SBY [fifo_nofullskip] engine_0.basecase: ## Assert failed in fifo: a_count_diff + SBY [fifo_nofullskip] engine_0.basecase: ## Assert failed in fifo: ap_underfill + SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to VCD file: engine_0/trace.vcd + SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to Verilog testbench: engine_0/trace_tb.v + SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to constraints file: engine_0/trace.smtc + SBY [fifo_nofullskip] engine_0.basecase: ## Status: failed SBY [fifo_nofullskip] engine_0.basecase: finished (returncode=1) SBY [fifo_nofullskip] engine_0: Status returned by engine for basecase: FAIL SBY [fifo_nofullskip] engine_0.induction: terminating process @@ -181,8 +186,8 @@ Searching the file for ``w_underfill`` will reveal the below. .. code-block:: text $ grep "w_underfill" fifo_cover/logfile.txt -A 1 - SBY [fifo_cover] engine_0: ## 0:00:00 Reached cover statement at w_underfill in step 2. - SBY [fifo_cover] engine_0: ## 0:00:00 Writing trace to VCD file: engine_0/trace2.vcd + SBY [fifo_cover] engine_0: ## Reached cover statement at w_underfill in step 2. + SBY [fifo_cover] engine_0: ## Writing trace to VCD file: engine_0/trace2.vcd We can then run gtkwave with the trace file indicated to see the correct operation as in the image below. When the buffer is empty, a read with no write @@ -193,7 +198,45 @@ write addresses and avoiding underflow. .. image:: media/gtkwave_coverskip.png -For more on using the .sby file, see the :ref:`.sby reference page `. +Exercise +******** + +Adjust the ``[script]`` section of ``fifo.sby`` so that it looks like the below. + +.. code-block:: text + + [script] + nofullskip: read -define NO_FULL_SKIP=1 + read -formal fifo.sv + hierarchy -check -top fifo -chparam MAX_DATA 17 + prep -top fifo + +The ``hierarchy`` command we added changes the ``MAX_DATA`` parameter of the top +module to be 17. Now run the ``basic`` task and see what happens. It should +fail and give an error like ``Assert failed in fifo: a_count_diff``. Can you +modify the verilog code so that it works with larger values of ``MAX_DATA`` +while still passing all of the tests? + +.. note:: + + If you need a **hint**, try increasing the width of the address wires. 4 bits + supports up to :math:`2^4=16` addresses. Are there other signals that + need to be wider? Can you make the width parameterisable to support + arbitrarily large buffers? + +Once the tests are passing with ``MAX_DATA=17``, try something bigger, like 64, +or 100. Does the ``basic`` task still pass? What about ``cover``? By default, +``bmc & cover`` modes will run to a depth of 20 cycles. If a maximum of one +value can be loaded in each cycle, how many cycles will it take to load 100 +values? Using the :ref:`.sby reference page `, +try to increase the cover mode depth to be at least a few cycles larger than the +``MAX_DATA``. + +.. note:: + + Reference files are provided in the ``fifo/golden`` directory, showing how + the verilog could have been modified and how a ``bigtest`` task could be + added. Concurrent assertions ********************* @@ -223,5 +266,6 @@ requires a skip in the read address, then the read address will *not* change. Further information ******************* For more information on the uses of assertions and the difference between -immediate and concurrent assertions, refer to appnote 109: Property Checking -with SystemVerilog Assertions. +immediate and concurrent assertions, refer to appnote 109: `Property Checking +with SystemVerilog Assertions +`_. From de5b9b782175e7ada189f4c5c9357d449dc3b87f Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Fri, 1 Jul 2022 11:29:33 +1200 Subject: [PATCH 099/220] Changed phrasing to avoid confusion on witnesses --- docs/source/newstart.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 2828cb67..f421fe26 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -174,10 +174,10 @@ to a higher value than the write address. .. image:: media/gtkwave_noskip.png -During correct operation, the ``w_underfill`` witness will cover the underflow +During correct operation, the ``w_underfill`` statement will cover the underflow case. Examining ``fifo_cover/logfile.txt`` will reveal which trace file -includes the witness we are looking for. If this file doesn't exist, run the -code below. +includes the cover statment we are looking for. If this file doesn't exist, run +the code below. sby fifo.sby cover From c9fbfa36841bf6608b3d218d85f54fd7040299ed Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 4 Jul 2022 10:32:55 +1200 Subject: [PATCH 100/220] Adding makefile for fifo --- docs/examples/fifo/Makefile | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 docs/examples/fifo/Makefile diff --git a/docs/examples/fifo/Makefile b/docs/examples/fifo/Makefile new file mode 100644 index 00000000..c22f5f17 --- /dev/null +++ b/docs/examples/fifo/Makefile @@ -0,0 +1,3 @@ +SUBDIR=../docs/examples/fifo +TESTDIR=../../../tests +include $(TESTDIR)/make/subdir.mk From cc27d27c0540799722e26de02d7fb6471febde0b Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 4 Jul 2022 11:53:40 +1200 Subject: [PATCH 101/220] More literalincludes Tidying up of newstart.rst and fifo.sv to include as much code as possible by reference. Should reduce repetition and make it easier if changes occur in source. --- docs/examples/fifo/fifo.sv | 71 ++++++++++++++----------------- docs/examples/fifo/golden/fifo.sv | 69 +++++++++++++----------------- docs/source/newstart.rst | 67 +++++++++++++++-------------- 3 files changed, 99 insertions(+), 108 deletions(-) diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 1a32187f..0a35f140 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -1,17 +1,14 @@ // address generator/counter -module addr_gen ( - input en, clk, rst_n, +module addr_gen +#( parameter MAX_DATA=16 +) ( input en, clk, rst_n, output reg [3:0] addr ); - parameter MAX_DATA = 16; - - initial begin - addr <= 0; - end + initial addr <= 0; // async reset // increment address when enabled - always @(posedge clk or negedge rst_n) begin + always @(posedge clk or negedge rst_n) if (~rst_n) addr <= 0; else if (en) @@ -19,52 +16,47 @@ module addr_gen ( addr <= 0; else addr <= addr + 1; - end endmodule // Define our top level fifo entity -module fifo ( - input wen, ren, clk, rst_n, +module fifo +#( parameter MAX_DATA=16 +) ( input wen, ren, clk, rst_n, input [7:0] wdata, output [7:0] rdata, output [4:0] count, output full, empty ); - parameter MAX_DATA = 16; - - // wire up our sub modules - wire [3:0] waddr, raddr; - wire wskip, rskip; - // fifo storage - // 8 bit data, fifo depth 16 / 4 bit address - // reset not defined + // async read, sync write + wire [3:0] waddr, raddr; reg [7:0] data [MAX_DATA-1:0]; - always @(posedge clk) begin + always @(posedge clk) if (wen) data[waddr] <= wdata; - end assign rdata = data[raddr]; + // end storage - addr_gen #(.MAX_DATA(MAX_DATA)) fifo_writer ( + // addr_gen for both write and read addresses + addr_gen #(.MAX_DATA(MAX_DATA)) + fifo_writer ( .en (wen || wskip), .clk (clk ), .rst_n (rst_n), .addr (waddr) ); - addr_gen #(.MAX_DATA(MAX_DATA)) fifo_reader ( + addr_gen #(.MAX_DATA(MAX_DATA)) + fifo_reader ( .en (ren || rskip), .clk (clk ), .rst_n (rst_n), .addr (raddr) ); - // internals + // status signals reg [4:0] data_count; - initial begin - data_count <= 0; - end + initial data_count <= 0; always @(posedge clk or negedge rst_n) begin if (~rst_n) @@ -79,6 +71,8 @@ module fifo ( assign empty = (data_count == 0) && rst_n; assign count = data_count; + // overflow protection + wire wskip, rskip; `ifndef NO_FULL_SKIP // write while full => overwrite oldest data, move read pointer assign rskip = wen && !ren && data_count >= MAX_DATA; @@ -116,22 +110,22 @@ module fifo ( a_oflow2: assert (waddr < MAX_DATA); // count should be equal to the difference between writer and reader address - a_count_diff: assert (count == addr_diff - || count == MAX_DATA && addr_diff == 0); + a_count_diff: assert (count == addr_diff + || count == MAX_DATA && addr_diff == 0); // count should only be able to increase or decrease by 1 a_counts: assert (count == 0 - || count == $past(count) - || count == $past(count) + 1 - || count == $past(count) - 1); + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); // read/write addresses can only increase (or stay the same) - a_raddr: assert (raddr == 0 - || raddr == $past(raddr) - || raddr == $past(raddr + 1)); - a_waddr: assert (waddr == 0 - || waddr == $past(waddr) - || waddr == $past(waddr + 1)); + a_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + a_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); // full and empty work as expected a_full: assert (!full || full && count == MAX_DATA); @@ -165,6 +159,7 @@ module fifo ( ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); // can we corrupt our data? + // these should already be covered by ap_{r,w}addr2 ap_overfill: assert property (wen && full |=> $changed(raddr)); ap_underfill: assert property (ren && empty |=> $changed(waddr)); diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv index 014563d1..f92b33af 100644 --- a/docs/examples/fifo/golden/fifo.sv +++ b/docs/examples/fifo/golden/fifo.sv @@ -1,18 +1,15 @@ // address generator/counter -module addr_gen ( - input en, clk, rst_n, +module addr_gen +#( parameter MAX_DATA=16, + parameter ADDR_BITS=5 +) ( input en, clk, rst_n, output reg [ADDR_BITS-1:0] addr ); - parameter MAX_DATA = 16; - parameter ADDR_BITS = 5; - - initial begin - addr <= 0; - end + initial addr <= 0; // async reset // increment address when enabled - always @(posedge clk or negedge rst_n) begin + always @(posedge clk or negedge rst_n) if (~rst_n) addr <= 0; else if (en) @@ -20,35 +17,29 @@ module addr_gen ( addr <= 0; else addr <= addr + 1; - end endmodule // Define our top level fifo entity -module fifo ( - input wen, ren, clk, rst_n, +module fifo +#( parameter MAX_DATA=16, + parameter ADDR_BITS=5 +) ( input wen, ren, clk, rst_n, input [7:0] wdata, output [7:0] rdata, output [ADDR_BITS:0] count, output full, empty ); - parameter MAX_DATA = 16; - parameter ADDR_BITS = 5; - - // wire up our sub modules - // ADDR_BITS=5 gives 5 bits of address, [4:0] - // supporting MAX_DATA up to 2**5=32 - wire [ADDR_BITS-1:0] waddr, raddr; - wire wskip, rskip; - // fifo storage - // reset not defined + // async read, sync write + wire [3:0] waddr, raddr; reg [7:0] data [MAX_DATA-1:0]; - always @(posedge clk) begin + always @(posedge clk) if (wen) data[waddr] <= wdata; - end assign rdata = data[raddr]; + // end storage + // addr_gen for both write and read addresses addr_gen #(.MAX_DATA(MAX_DATA), .ADDR_BITS(ADDR_BITS)) fifo_writer ( .en (wen || wskip), @@ -65,11 +56,9 @@ module fifo ( .addr (raddr) ); - // internals + // status signals reg [ADDR_BITS:0] data_count; - initial begin - data_count <= 0; - end + initial data_count <= 0; always @(posedge clk or negedge rst_n) begin if (~rst_n) @@ -84,6 +73,8 @@ module fifo ( assign empty = (data_count == 0) && rst_n; assign count = data_count; + // overflow protection + wire wskip, rskip; `ifndef NO_FULL_SKIP // write while full => overwrite oldest data, move read pointer assign rskip = wen && !ren && data_count >= MAX_DATA; @@ -121,22 +112,22 @@ module fifo ( a_oflow2: assert (waddr < MAX_DATA); // count should be equal to the difference between writer and reader address - a_count_diff: assert (count == addr_diff - || count == MAX_DATA && addr_diff == 0); + a_count_diff: assert (count == addr_diff + || count == MAX_DATA && addr_diff == 0); // count should only be able to increase or decrease by 1 a_counts: assert (count == 0 - || count == $past(count) - || count == $past(count) + 1 - || count == $past(count) - 1); + || count == $past(count) + || count == $past(count) + 1 + || count == $past(count) - 1); // read/write addresses can only increase (or stay the same) - a_raddr: assert (raddr == 0 - || raddr == $past(raddr) - || raddr == $past(raddr + 1)); - a_waddr: assert (waddr == 0 - || waddr == $past(waddr) - || waddr == $past(waddr + 1)); + a_raddr: assert (raddr == 0 + || raddr == $past(raddr) + || raddr == $past(raddr + 1)); + a_waddr: assert (waddr == 0 + || waddr == $past(waddr) + || waddr == $past(waddr + 1)); // full and empty work as expected a_full: assert (!full || full && count == MAX_DATA); diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index f421fe26..214a3af7 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -33,7 +33,8 @@ First, the address generator module: .. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog - :lines: 1-23 + :start-at: address generator + :end-at: endmodule This module is instantiated twice; once for the write address and once for the read address. In both cases, the address will start at and reset to 0, and will @@ -45,7 +46,9 @@ Next, the register file: .. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog - :lines: 39-47 + :start-at: fifo storage + :end-before: end storage + :dedent: Notice that this register design includes a synchronous write and asynchronous read. Each word is 8 bits, and up to 16 words can be stored in the buffer. @@ -58,9 +61,11 @@ satisfy. For example, there must never be more than there is memory available. By assigning a signal to count the number of values in the buffer, we can make the following assertion in the code: -.. code-block:: systemverilog - - a_oflow: assert (count <= MAX_DATA); +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: a_oflow + :end-at: ; + :dedent: It is also possible to use the prior value of a signal for comparison. This can be used, for example, to ensure that the count is only able to increase or @@ -69,12 +74,11 @@ decrease by 1. A case must be added to handle resetting the count directly to code; at least one of these conditions must be true at all times if our design is to be correct. -.. code-block:: systemverilog - - a_counts: assert (count == 0 - || count == $past(count) - || count == $past(count) + 1 - || count == $past(count) - 1); +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: a_counts + :end-at: ; + :dedent: As our count signal is used independently of the read and write pointers, we must verify that the count is always correct. While the write pointer will @@ -83,14 +87,17 @@ means that the write *address* could wrap around and appear *less than* the read address. So we must first perform some simple arithmetic to find the absolute difference in addresses, and then compare with the count signal. -.. code-block:: systemverilog +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: assign addr_diff + :end-at: ; + :dedent: - assign addr_diff = waddr >= raddr - ? waddr - raddr - : waddr + MAX_DATA - raddr; - - a_count_diff: assert (count == addr_diff - || count == MAX_DATA && addr_diff == 0); +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: a_count_diff + :end-at: ; + :dedent: SymbiYosys ********** @@ -130,14 +137,11 @@ to the ``a_count_diff`` assertion failing. Without this assertion, there is no guarantee that data will be read in the same order it was written should an overflow occur and the oldest data be written. -.. code-block:: systemverilog - - `ifndef NO_FULL_SKIP - // write while full => overwrite oldest data, move read pointer - assign rskip = wen && !ren && data_count >= MAX_DATA; - // read while empty => read invalid data, keep write pointer in sync - assign wskip = ren && !wen && data_count == 0; - `endif // NO_FULL_SKIP +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: NO_FULL_SKIP + :end-at: endif + :lines: 1-5,9 The last few lines of output for the noskip task should be similar to the following: @@ -257,11 +261,12 @@ increment or remain the same we do not need to specify that here. We can also assert that if the enable is low, and the buffer is not full and potentially requires a skip in the read address, then the read address will *not* change. -.. code-block:: systemverilog - - ap_raddr2: assert property (ren |=> $changed(raddr)); - ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); - +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: ap_raddr2 + :end-at: ap_raddr3 + :dedent: + :lines: 1,5 Further information ******************* From e01ac8b84855aa7581d40a69d15c42d1432a8432 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 29 Jun 2022 16:43:07 +0200 Subject: [PATCH 102/220] tests: Test for invalid x-value FF init optimizations --- tests/regression/ff_xinit_opt.sby | 39 +++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 tests/regression/ff_xinit_opt.sby diff --git a/tests/regression/ff_xinit_opt.sby b/tests/regression/ff_xinit_opt.sby new file mode 100644 index 00000000..2078ad1b --- /dev/null +++ b/tests/regression/ff_xinit_opt.sby @@ -0,0 +1,39 @@ +[options] +mode bmc + +[engines] +smtbmc boolector + +[script] +read_verilog -formal ff_xinit_opt.sv +prep -flatten -top top + +opt -fast -keepdc + +[file ff_xinit_opt.sv] +module top( + input clk, + input [7:0] d +); + + (* keep *) + wire [7:0] some_const = $anyconst; + + wire [7:0] q1; + wire [7:0] q2; + + ff ff1(.clk(clk), .d(q1), .q(q1)); + ff ff2(.clk(1'b0), .d(d), .q(q2)); + + initial assume (some_const == q1); + initial assume (some_const == q2); + initial assume (q1 != 0); + initial assume (q2 != 0); + + always @(posedge clk) assert(some_const == q1); + always @(posedge clk) assert(some_const == q2); +endmodule + +module ff(input clk, input [7:0] d, (* keep *) output reg [7:0] q); + always @(posedge clk) q <= d; +endmodule From ff802086b4b1b9d9ca0c74fd5fa521b5316bbe4c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 29 Jun 2022 18:00:52 +0200 Subject: [PATCH 103/220] test uninited FFs with const clks and fix btor script for this --- sbysrc/sby_core.py | 2 +- tests/regression/const_clocks.sby | 43 +++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) create mode 100644 tests/regression/const_clocks.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 4e57b21b..5580d09c 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -653,7 +653,7 @@ def instance_hierarchy_error_callback(retcode): print("abc", file=f) print("opt_clean", file=f) else: - print("opt -fast", file=f) + print("opt -fast -keepdc", file=f) print("delete -output", file=f) print("dffunmap", file=f) print("stat", file=f) diff --git a/tests/regression/const_clocks.sby b/tests/regression/const_clocks.sby new file mode 100644 index 00000000..245358bf --- /dev/null +++ b/tests/regression/const_clocks.sby @@ -0,0 +1,43 @@ +[tasks] +btor +smt +btor_m btor multiclock +smt_m smt multiclock + +[options] +mode bmc + +multiclock: multiclock on + +[engines] +#smtbmc +btor: btor btormc +smt: smtbmc boolector + +[script] +read_verilog -formal const_clocks.sv +prep -flatten -top top + +[file const_clocks.sv] +module top( + input clk, + input [7:0] d +); + + (* keep *) + wire [7:0] some_const = $anyconst; + + wire [7:0] q; + + ff ff1(.clk(1'b0), .d(d), .q(q)); + + initial assume (some_const == q); + initial assume (q != 0); + + + always @(posedge clk) assert(some_const == q); +endmodule + +module ff(input clk, input [7:0] d, (* keep *) output reg [7:0] q); + always @(posedge clk) q <= d; +endmodule From ea7fc7dc2c0d7eea25dabe06beba6ebe56d58c7d Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 5 Jul 2022 15:34:27 +0200 Subject: [PATCH 104/220] tests: Windows fixes Make tests runnable on Windows, as long as a unix like environment as e.g. provided by MSYS2 is available. --- tests/Makefile | 20 ++++++++++++++++++-- tests/make/collect_tests.py | 21 +++++++++++++-------- tests/make/test_rules.py | 21 +++++++++++++++++---- 3 files changed, 48 insertions(+), 14 deletions(-) diff --git a/tests/Makefile b/tests/Makefile index 9b65da77..805d1909 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -2,11 +2,27 @@ test: .PHONY: test clean refresh help +OS_NAME := $(shell python3 -c "import os;print(os.name)") +ifeq (nt,$(OS_NAME)) +ifeq (quoted,$(shell echo "quoted")) +OS_NAME := nt-unix-like +endif +endif + +ifeq (nt,$(OS_NAME)) +$(error This Makefile requires unix-like tools and shell, e.g. MSYS2.) +endif + help: @cat make/help.txt -export SBY_WORKDIR_GITIGNORE=1 -export SBY_MAIN=$(realpath $(dir $(firstword $(MAKEFILE_LIST)))/../sbysrc/sby.py) +export SBY_WORKDIR_GITIGNORE := 1 + +SBY_MAIN := $(realpath $(dir $(firstword $(MAKEFILE_LIST)))/../sbysrc/sby.py) +ifeq (nt-unix-like,$(OS_NAME)) +SBY_MAIN := $(shell cygpath -w $(SBY_MAIN)) +endif +export SBY_MAIN make/rules/collect.mk: make/collect_tests.py python3 make/collect_tests.py diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py index 89a68eca..b5f76996 100644 --- a/tests/make/collect_tests.py +++ b/tests/make/collect_tests.py @@ -4,7 +4,8 @@ tests = [] checked_dirs = [] -SAFE_PATH = re.compile(r"^[a-zA-Z0-9_./]*$") +SAFE_PATH = re.compile(r"^[a-zA-Z0-9_./\\]*$") + def collect(path): # don't pick up any paths that need escaping nor any sby workdirs @@ -15,8 +16,6 @@ def collect(path): for entry in path.glob("*.sby"): filename = str(entry) if not SAFE_PATH.match(filename): - continue - if not re.match(r"^[a-zA-Z0-9_./]*$", filename): print(f"skipping {filename!r}, use only [a-zA-Z0-9_./] in filenames") continue tests.append(entry) @@ -25,6 +24,10 @@ def collect(path): collect(entry) +def unix_path(path): + return "/".join(path.parts) + + collect(Path(".")) collect(Path("../docs/examples")) @@ -33,16 +36,18 @@ def collect(path): with out_file.open("w") as output: - for checked_dir in checked_dirs: print(f"{out_file}: {checked_dir}", file=output) for test in tests: - print(f"make/rules/test/{test}.mk: {test}", file=output) + test_unix = unix_path(test) + print(f"make/rules/test/{test_unix}.mk: {test_unix}", file=output) for ext in [".sh", ".py"]: script_file = test.parent / (test.stem + ext) if script_file.exists(): - print(f"make/rules/test/{test}.mk: {script_file}", file=output) - print(f"make/rules/test/{test}.mk: make/test_rules.py", file=output) + script_file_unix = unix_path(script_file) + print(f"make/rules/test/{test_unix}.mk: {script_file_unix}", file=output) + print(f"make/rules/test/{test_unix}.mk: make/test_rules.py", file=output) for test in tests: - print(f"-include make/rules/test/{test}.mk", file=output) + test_unix = unix_path(test) + print(f"-include make/rules/test/{test_unix}.mk", file=output) diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 5c18acad..9607d814 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -7,6 +7,11 @@ from required_tools import REQUIRED_TOOLS + +def unix_path(path): + return "/".join(path.parts) + + sby_file = Path(sys.argv[1]) sby_dir = sby_file.parent @@ -56,7 +61,10 @@ def parse_engine(engine): solvers.add(solver) engine_solvers.add((engine, solver)) - if any(line.startswith("read -verific") or line.startswith("verific") for line in info["script"]): + if any( + line.startswith("read -verific") or line.startswith("verific") + for line in info["script"] + ): required_tools.add("verific") required_tools = sorted(required_tools) @@ -66,12 +74,17 @@ def parse_engine(engine): shell_script = sby_dir / f"{sby_file.stem}.sh" + sby_dir_unix = unix_path(sby_dir) + if shell_script.exists(): - command = f"cd {sby_dir} && SBY_FILE={sby_file.name} WORKDIR={workdirname} TASK={task} bash {shell_script.name}" + command = f"cd {sby_dir_unix} && env SBY_FILE={sby_file.name} WORKDIR={workdirname} TASK={task} bash {shell_script.name}" else: - command = f"cd {sby_dir} && python3 $(SBY_MAIN) -f {sby_file.name} {task}" + command = f"cd {sby_dir_unix} && python3 $(SBY_MAIN) -f {sby_file.name} {task}" - print(f"\t@python3 make/required_tools.py run {target} {shlex.quote(command)} {shlex.join(required_tools)}", file=rules) + print( + f"\t@python3 make/required_tools.py run {target} {shlex.quote(command)} {shlex.join(required_tools)}", + file=rules, + ) print(f".PHONY: clean-{target}", file=rules) print(f"clean-{target}:", file=rules) From 566edad13c369bd4a4f5c7bc61a11b7e5d390493 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 5 Jul 2022 17:20:55 +0200 Subject: [PATCH 105/220] Read config before creating a workdir When using a task name not defined in the config, this now produces an error before creating an unnecessary workdir for that non-existing task. --- sbysrc/sby.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index b802b7a7..0628c6d2 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -402,6 +402,8 @@ def find_files(taskname): sys.exit(1) def run_task(taskname): + sbyconfig, _, _, _ = read_sbyconfig(sbydata, taskname) + my_opt_tmpdir = opt_tmpdir my_workdir = None @@ -461,7 +463,6 @@ def run_task(taskname): else: junit_filename = "junit" - sbyconfig, _, _, _ = read_sbyconfig(sbydata, taskname) task = SbyTask(sbyconfig, my_workdir, early_logmsgs, reusedir) for k, v in exe_paths.items(): From b3b315a473ac6436517ebbfebce24392f2bd3b27 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 6 Jul 2022 11:19:51 +0200 Subject: [PATCH 106/220] Make SbyProc hide Windows differences in retcode handling Without this, we don't properly detect missing solver binaries and do not properly handle the return status of the pono solver. --- sbysrc/sby_core.py | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 5580d09c..372cbebf 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -176,7 +176,7 @@ def preexec_fn(): fcntl.fcntl(self.p.stdout, fcntl.F_SETFL, fl | os.O_NONBLOCK) else: - self.p = subprocess.Popen(self.cmdline, shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, + self.p = subprocess.Popen(self.cmdline + " & exit !errorlevel!", shell=True, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=(subprocess.STDOUT if self.logstderr else None)) self.task.update_proc_running(self) @@ -200,23 +200,31 @@ def preexec_fn(): self.running = False self.exited = True - if self.p.returncode == 127: + if os.name == "nt": + if self.p.returncode == 9009: + returncode = 127 + else: + returncode = self.p.returncode & 0xff + else: + returncode = self.p.returncode + + if returncode == 127: if not self.silent: self.task.log(f"{self.info}: COMMAND NOT FOUND. ERROR.") - self.handle_error(self.p.returncode) + self.handle_error(returncode) self.terminated = True self.task.proc_failed(self) return - if self.checkretcode and self.p.returncode not in self.retcodes: + if self.checkretcode and returncode not in self.retcodes: if not self.silent: self.task.log(f"{self.info}: task failed. ERROR.") - self.handle_error(self.p.returncode) + self.handle_error(returncode) self.terminated = True self.task.proc_failed(self) return - self.handle_exit(self.p.returncode) + self.handle_exit(returncode) self.finished = True for next_proc in self.notify: From 92e7eb2e3225d7a3bdf23497d80e4901b6c75e98 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 8 Jul 2022 12:36:44 +0200 Subject: [PATCH 107/220] abc pdr: Enable log output by default This makes it consistent with the other abc solvers and shows whether abc pdr is making progress. --- sbysrc/sby_engine_abc.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 4635ee17..e96ddb89 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -41,6 +41,7 @@ def run(mode, task, engine_idx, engine): elif abc_command[0] == "pdr": if mode != "prove": task.error("ABC command 'pdr' is only valid in prove mode.") + abc_command[0] += f" -v" else: task.error(f"Invalid ABC command {abc_command[0]}.") From bc2bb5c86316ae0e0be9a5431d806c2ce88583d3 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 8 Jul 2022 14:31:57 +0200 Subject: [PATCH 108/220] docs: Don't use linebreaks within inline code spans. --- docs/source/autotune.rst | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/source/autotune.rst b/docs/source/autotune.rst index 6fab860b..7ef07c19 100644 --- a/docs/source/autotune.rst +++ b/docs/source/autotune.rst @@ -11,15 +11,15 @@ Using Autotune -------------- To run autotune, you can add the ``--autotune`` option to your usual sby -invokation. For example if you usually run ``sby demo.sby`` you would run ``sby ---autotune demo.sby`` instead. When the ``.sby`` file contains multiple tasks, -autotune is run for each task independently. As without ``--autotune``, it is -possible to specify which tasks to run on the command line. +invokation. For example if you usually run ``sby demo.sby`` you would run +``sby --autotune demo.sby`` instead. When the ``.sby`` file contains multiple +tasks, autotune is run for each task independently. As without ``--autotune``, +it is possible to specify which tasks to run on the command line. Autotune runs without requiring further interaction and will eventually print a list of engine configurations and their respective solving times. To -permanently use an engine configuration you can copy if from the ``sby ---autotune`` output into the ``[engines]`` section of your ``.sby`` file. +permanently use an engine configuration you can copy if from the +``sby --autotune`` output into the ``[engines]`` section of your ``.sby`` file. Autotune Log Output ------------------- @@ -112,8 +112,8 @@ their solving time: If any tried engine encounters an error or produces an unexpected result, autotune will also output a list of failed engines. Note that when the sby file -does not contain the ``expect`` option, autotune defaults to ``expect -pass,fail`` to simplify running autotune on a verification task with a +does not contain the ``expect`` option, autotune defaults to +``expect pass,fail`` to simplify running autotune on a verification task with a currently unknown outcome. Configuring Autotune From 4ab610ce87e02d3340989c46d92cc513827b53fc Mon Sep 17 00:00:00 2001 From: matt venn Date: Mon, 11 Jul 2022 11:10:45 +0200 Subject: [PATCH 109/220] Update autotune.rst --- docs/source/autotune.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/autotune.rst b/docs/source/autotune.rst index 7ef07c19..d61f9960 100644 --- a/docs/source/autotune.rst +++ b/docs/source/autotune.rst @@ -11,7 +11,7 @@ Using Autotune -------------- To run autotune, you can add the ``--autotune`` option to your usual sby -invokation. For example if you usually run ``sby demo.sby`` you would run +invocation. For example if you usually run ``sby demo.sby`` you would run ``sby --autotune demo.sby`` instead. When the ``.sby`` file contains multiple tasks, autotune is run for each task independently. As without ``--autotune``, it is possible to specify which tasks to run on the command line. From ca3429e3286b363584bd797ba397b9c1f6e4b4b7 Mon Sep 17 00:00:00 2001 From: KrystalDelusion <93062060+KrystalDelusion@users.noreply.github.com> Date: Mon, 11 Jul 2022 21:21:31 +1200 Subject: [PATCH 110/220] Autotune grammar/spelling check --- docs/source/autotune.rst | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/docs/source/autotune.rst b/docs/source/autotune.rst index d61f9960..909b8386 100644 --- a/docs/source/autotune.rst +++ b/docs/source/autotune.rst @@ -3,7 +3,7 @@ Autotune: Automatic Engine Selection Selecting the best performing engine for a given verification task often requires some amount of trial and error. To reduce the manual work required for -this, sby offers the ``--autotune`` option which takes an ``.sby`` file and +this, sby offers the ``--autotune`` option. This takes an ``.sby`` file and runs it using engines and engine configurations. At the end it produces a report listing the fastest engines among these candidates. @@ -11,14 +11,14 @@ Using Autotune -------------- To run autotune, you can add the ``--autotune`` option to your usual sby -invocation. For example if you usually run ``sby demo.sby`` you would run +invocation. For example, if you usually run ``sby demo.sby`` you would run ``sby --autotune demo.sby`` instead. When the ``.sby`` file contains multiple tasks, autotune is run for each task independently. As without ``--autotune``, it is possible to specify which tasks to run on the command line. -Autotune runs without requiring further interaction and will eventually print a +Autotune runs without requiring further interaction, and will eventually print a list of engine configurations and their respective solving times. To -permanently use an engine configuration you can copy if from the +permanently use an engine configuration you can copy it from the ``sby --autotune`` output into the ``[engines]`` section of your ``.sby`` file. Autotune Log Output @@ -37,9 +37,9 @@ once and will be reused to run every candidate engine. SBY [demo] base: finished (returncode=0) SBY [demo] prepared model 'base' -This is followed by selecting the engine candidates to run. For this the design +This is followed by selecting the engine candidates to run. The design and sby configuration are analyzed to skip engines that are not compatible or -unlikely to work well. When engines is skipped due to a recommendation, a +unlikely to work well. When an engine is skipped due to a recommendation, a corresponding log message is displayed as well as the total number of candidates to try: @@ -49,7 +49,7 @@ candidates to try: SBY [demo] testing 16 engine configurations... After this, the candidate engine configurations are started. Depending on the -configuration engines can run in parallel. The engine output itself is not +configuration, engines can run in parallel. The engine output itself is not logged to stdout when running autotune, so you will only see messages about starting an engine: @@ -77,7 +77,7 @@ be interspersed with other log output. SBY [demo] smt2: finished (returncode=0) SBY [demo] prepared model 'smt2' -Whenever an engine finishes a log message is printed: +Whenever an engine finishes, a log message is printed: .. code-block:: text @@ -91,7 +91,7 @@ When an engine takes longer than the current hard timeout, it is stopped: SBY [demo] engine_2 (smtbmc bitwuzla): timeout (150 seconds) Depending on the configuration, autotune will also stop an engine earlier when -reaching a soft timeout. In that case, when no other engine finishes in less +reaching a soft timeout. If no other engine finishes in less time, the engine will be retried later with a longer soft timeout: .. code-block:: text @@ -99,7 +99,7 @@ time, the engine will be retried later with a longer soft timeout: SBY [demo] engine_0 (smtbmc boolector): timeout (60 seconds, will be retried if necessary) -Finally at the end a summary of all finished engines is printed, sorted by +Finally, a summary of all finished engines is printed, sorted by their solving time: .. code-block:: text @@ -121,7 +121,7 @@ Configuring Autotune Autotune can be configured by adding an ``[autotune]`` section to the ``.sby`` file. Each line in that section has the form ``option_name value``, the -possible options and their supported values are described below. In addition +possible options and their supported values are described below. In addition, the ``--autotune-config`` command line option can be used to specify a file containing further autotune options, using the same syntax. When both are used, the command line option takes precedence. This makes it easy to run autotune From 5d3f784bebd7f952ccfbe8323f6d97b66f2ebf04 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 13 Jul 2022 15:54:24 +0200 Subject: [PATCH 111/220] Fix a race-condition SbyProc that could truncate output Present for a long time, but was not easy to hit. Some of my work in progress changes made this much more likely and running the complete test suite in parallel had a good chance of reproducing this for at least one of the tests. --- sbysrc/sby_core.py | 24 +++++++++++++++--------- 1 file changed, 15 insertions(+), 9 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 372cbebf..391fb16a 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -183,17 +183,12 @@ def preexec_fn(): self.running = True return - while True: - outs = self.p.stdout.readline().decode("utf-8") - if len(outs) == 0: break - if outs[-1] != '\n': - self.linebuffer += outs - break - outs = (self.linebuffer + outs).strip() - self.linebuffer = "" - self.handle_output(outs) + self.read_output() if self.p.poll() is not None: + # The process might have written something since the last time we checked + self.read_output() + if not self.silent: self.task.log(f"{self.info}: finished (returncode={self.p.returncode})") self.task.update_proc_stopped(self) @@ -231,6 +226,17 @@ def preexec_fn(): next_proc.poll() return + def read_output(self): + while True: + outs = self.p.stdout.readline().decode("utf-8") + if len(outs) == 0: break + if outs[-1] != '\n': + self.linebuffer += outs + break + outs = (self.linebuffer + outs).strip() + self.linebuffer = "" + self.handle_output(outs) + class SbyAbort(BaseException): pass From 3ff2c9affc0bf0e96971960dc9c6bb93325a1566 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Tue, 19 Jul 2022 18:34:43 +0200 Subject: [PATCH 112/220] avoid erroring out when coarse-grain logic loops can be resolved by mapping to fine grain operators --- sbysrc/sby_core.py | 1 + tests/regression/fake_loop.sby | 23 +++++++++++++++++++++++ 2 files changed, 24 insertions(+) create mode 100644 tests/regression/fake_loop.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 391fb16a..3462c36a 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -567,6 +567,7 @@ def make_model(self, model_name): os.makedirs(f"{self.workdir}/model") def print_common_prep(check): + print("scc -select; simplemap; select -clear", file=f) if self.opt_multiclock: print("clk2fflogic", file=f) else: diff --git a/tests/regression/fake_loop.sby b/tests/regression/fake_loop.sby new file mode 100644 index 00000000..419e2674 --- /dev/null +++ b/tests/regression/fake_loop.sby @@ -0,0 +1,23 @@ +[options] +mode cover + +[engines] +smtbmc boolector + +[script] +read -formal fake_loop.sv +hierarchy -top fake_loop +proc + +[file fake_loop.sv] +module fake_loop(input clk, input a, input b, output [9:0] x); + wire [9:0] ripple; + reg [9:0] prev_ripple = 9'b0; + + always @(posedge clk) prev_ripple <= ripple; + + assign ripple = {ripple[8:0], a} ^ prev_ripple; // only cyclic at the coarse-grain level + assign x = ripple[9] + b; + + always @(posedge clk) cover(ripple[9]); +endmodule From 1cf206fc1c73e2323dbaf7e0779b3bb3d0d1a9ce Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Mon, 25 Jul 2022 17:01:17 +0200 Subject: [PATCH 113/220] add cvc5 executable to required tool mapping --- tests/make/required_tools.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/make/required_tools.py b/tests/make/required_tools.py index 203ccd76..ce333564 100644 --- a/tests/make/required_tools.py +++ b/tests/make/required_tools.py @@ -4,6 +4,7 @@ ("smtbmc", "yices"): ["yices-smt2"], ("smtbmc", "z3"): ["z3"], ("smtbmc", "cvc4"): ["cvc4"], + ("smtbmc", "cvc5"): ["cvc5"], ("smtbmc", "mathsat"): ["mathsat"], ("smtbmc", "boolector"): ["boolector"], ("smtbmc", "bitwuzla"): ["bitwuzla"], From 9293e660920f3c2927f8c920f7e3d651b3265d2f Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 26 Jul 2022 16:03:36 +0200 Subject: [PATCH 114/220] example for autotune --- docs/examples/autotune/README.md | 30 +++++++++++ docs/examples/autotune/divider.sby | 24 +++++++++ docs/examples/autotune/divider.sv | 85 ++++++++++++++++++++++++++++++ 3 files changed, 139 insertions(+) create mode 100644 docs/examples/autotune/README.md create mode 100644 docs/examples/autotune/divider.sby create mode 100644 docs/examples/autotune/divider.sv diff --git a/docs/examples/autotune/README.md b/docs/examples/autotune/README.md new file mode 100644 index 00000000..3cdadb72 --- /dev/null +++ b/docs/examples/autotune/README.md @@ -0,0 +1,30 @@ +# Autotune demo + +This directory contains a simple sequential integer divider circuit. The +verilog implementation in [divider.sv](divider.sv) comes with assertions that +this circuit will always produce the correct result and will always finish +within a fixed number of cycles. The circuit has the divider bit-width as +parameter. + +Increasing the WIDTH parameter quickly turns proving those assertions into a +very difficult proof for fully autmated solvers. This makes it a good example +for the `--autotune` option which tries different backend engines to find the +best performing engine configuration for a given verification task. + +The [divider.sby](divider.sby) file defines 3 tasks named `small`, `medium` and +`large` which configure the divider with different bit-widths. To verify the +`small` divider using the default engine run: + + sby -f divider.sby small + +To automatically try different backend engines using autotune, run + + sby --autotune -f divider.sby small + +The `small` task should finish quickly using both the default engine and using +autotune. The `medium` and `large` tasks take significantly longer and show +greater differences between engine configurations. Note that the `large` tasks +can take many minutes to hours, depending on the machine you are using. + +You can learn more about Sby's autotune feature from [Sby's +documentation](https://symbiyosys.readthedocs.io/en/latest/autotune.html). diff --git a/docs/examples/autotune/divider.sby b/docs/examples/autotune/divider.sby new file mode 100644 index 00000000..61bed9a5 --- /dev/null +++ b/docs/examples/autotune/divider.sby @@ -0,0 +1,24 @@ +[tasks] +small default +medium +large + +[options] +mode prove +small: depth 11 +medium: depth 15 +large: depth 19 + +[engines] +smtbmc + +[script] +small: read -define WIDTH=8 +medium: read -define WIDTH=12 +large: read -define WIDTH=16 + +read -formal divider.sv +prep -top divider + +[files] +divider.sv diff --git a/docs/examples/autotune/divider.sv b/docs/examples/autotune/divider.sv new file mode 100644 index 00000000..b2ec2add --- /dev/null +++ b/docs/examples/autotune/divider.sv @@ -0,0 +1,85 @@ +`ifndef WIDTH +`define WIDTH 4 +`endif + +module divider #( + parameter WIDTH=`WIDTH +) ( + input wire clk, + input wire start, + input wire [WIDTH-1:0] dividend, + input wire [WIDTH-1:0] divisor, + + output reg done, + output reg [WIDTH-1:0] quotient, + output wire [WIDTH-1:0] remainder +); + + reg [WIDTH-1:0] acc; + + reg [WIDTH*2-1:0] sub; + reg [WIDTH-1:0] pos; + + assign remainder = acc; + + always @(posedge clk) begin + if (start) begin + acc <= dividend; + quotient <= 0; + sub <= divisor << (WIDTH - 1); + pos <= 1 << (WIDTH - 1); + done <= 0; + end else if (!done) begin + if (acc >= sub) begin + acc <= acc - sub[WIDTH-1:0]; + quotient <= quotient + pos; + end + + sub <= sub >> 1; + {pos, done} <= pos; + end + end + + +`ifdef FORMAL + reg [WIDTH-1:0] start_dividend = 0; + reg [WIDTH-1:0] start_divisor = 0; + + reg started = 0; + reg finished = 0; + reg [$clog2(WIDTH + 1):0] counter = 0; + + always @(posedge clk) begin + // Bound the number of cycles until the result is ready + assert (counter <= WIDTH); + + if (started) begin + if (finished || done) begin + finished <= 1; + // Make sure result stays until we start a new division + assert (done); + + // Check the result + if (start_divisor == 0) begin + assert ("ient); + assert (remainder == start_dividend); + end else begin + assert (quotient == start_dividend / start_divisor); + assert (remainder == start_dividend % start_divisor); + end + end else begin + counter <= counter + 1'b1; + end + end + + // Track the requested inputs + if (start) begin + start_divisor <= divisor; + start_dividend <= dividend; + started <= 1; + counter <= 0; + finished <= 0; + end + end +`endif +endmodule From 4ec278e6ecee27601e80b687d069dcf15c69c851 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 26 Jul 2022 16:35:57 +0200 Subject: [PATCH 115/220] Autotune example in docs --- docs/source/autotune.rst | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) diff --git a/docs/source/autotune.rst b/docs/source/autotune.rst index 909b8386..19432186 100644 --- a/docs/source/autotune.rst +++ b/docs/source/autotune.rst @@ -21,6 +21,43 @@ list of engine configurations and their respective solving times. To permanently use an engine configuration you can copy it from the ``sby --autotune`` output into the ``[engines]`` section of your ``.sby`` file. +Example +^^^^^^^ + +The Sby repository contains a `small example`_ in the ``docs/examples/autotune`` +directory. + +The ``divider.sby`` file contains the following ``[engines]`` section: + +.. code-block:: text + + [engines] + smtbmc + +We notice that running ``sby -f divider.sby medium`` takes a long time and want +to see if another engine would speed things up, so we run +``sby --autotune -f divider.sby medium``. After a few minutes this prints: + +.. code-block:: text + + SBY [divider_medium] finished engines: + SBY [divider_medium] #4: engine_7: smtbmc --nopresat bitwuzla -- --noincr (32 seconds, status=PASS) + SBY [divider_medium] #3: engine_2: smtbmc boolector -- --noincr (32 seconds, status=PASS) + SBY [divider_medium] #2: engine_3: smtbmc --nopresat boolector -- --noincr (32 seconds, status=PASS) + SBY [divider_medium] #1: engine_6: smtbmc bitwuzla -- --noincr (31 seconds, status=PASS) + SBY [divider_medium] DONE (AUTOTUNED, rc=0) + +This tells us that for the ``medium`` task, the best engine choice (#1) is +``smtbmc bitwuzla -- --noincr``. To use this engine by default we can change +the ``[engines]`` section of ``divider.sby`` to: + +.. code-block:: text + + [engines] + smtbmc bitwuzla -- --noincr + +.. _`small example`: https://github.com/YosysHQ/sby/tree/master/docs/examples/autotune + Autotune Log Output ------------------- From ed9b291d2becc9d8bfa21588fbc6da34de188e34 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 1 Aug 2022 20:36:19 +1200 Subject: [PATCH 116/220] Remove redundancies in certain logic checks A | A' === True, A | (A' & B) === A | B --- docs/examples/fifo/fifo.sv | 11 ++++++----- docs/examples/fifo/golden/fifo.sv | 17 +++-------------- 2 files changed, 9 insertions(+), 19 deletions(-) diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 0a35f140..15522b8a 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -96,7 +96,7 @@ module fifo init <= 1; // if init is low we don't care about the value of rst_n // if init is high (rst_n has ben high), then rst_n must remain high - assume (!init || init && rst_n); + assume (!init || rst_n); end // tests @@ -128,9 +128,9 @@ module fifo || waddr == $past(waddr + 1)); // full and empty work as expected - a_full: assert (!full || full && count == MAX_DATA); + a_full: assert (!full || count == MAX_DATA); w_full: cover (wen && !ren && count == MAX_DATA-1); - a_empty: assert (!empty || empty && count == 0); + a_empty: assert (!empty || count == 0); w_empty: cover (ren && !wen && count == 1); // can we corrupt our data? @@ -165,8 +165,9 @@ module fifo // change data when writing (and only when writing) so we can line // up reads with writes - assume property (wen |=> $changed(wdata)); - assume property (!wen |=> $stable(wdata)); + //TODO: this but with a cover statement + // assume property (wen |=> $changed(wdata)); + // assume property (!wen |=> $stable(wdata)); end end `else // !VERIFIC diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv index f92b33af..18eb9840 100644 --- a/docs/examples/fifo/golden/fifo.sv +++ b/docs/examples/fifo/golden/fifo.sv @@ -98,7 +98,7 @@ module fifo init <= 1; // if init is low we don't care about the value of rst_n // if init is high (rst_n has ben high), then rst_n must remain high - assume (!init || init && rst_n); + assume (!init || rst_n); end // tests @@ -130,9 +130,9 @@ module fifo || waddr == $past(waddr + 1)); // full and empty work as expected - a_full: assert (!full || full && count == MAX_DATA); + a_full: assert (!full || count == MAX_DATA); w_full: cover (wen && !ren && count == MAX_DATA-1); - a_empty: assert (!empty || empty && count == 0); + a_empty: assert (!empty || count == 0); w_empty: cover (ren && !wen && count == 1); // can we corrupt our data? @@ -163,19 +163,8 @@ module fifo // can we corrupt our data? ap_overfill: assert property (wen && full |=> $changed(raddr)); ap_underfill: assert property (ren && empty |=> $changed(waddr)); - - // change data when writing (and only when writing) so we can line - // up reads with writes - assume property (wen |=> $changed(wdata)); - assume property (!wen |=> $stable(wdata)); end end -`else // !VERIFIC - // without verific we are more limited in describing the above assumption - always @(posedge clk) begin - assume ((wen && wdata != $past(wdata)) - || (!wen && wdata == $past(wdata))); - end `endif // VERIFIC `endif // FORMAL From b2d0368e2680962419fdc9314f3987a84601ef90 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 1 Aug 2022 22:06:03 +1200 Subject: [PATCH 117/220] Testing fifo things in CI Turns out the bigtest golden ref is failing a_count_diff, need to fix that before removing the default statement. Base example code is fine. New shell script to run default case and then nofullskip. Expects returncode=2 after running nofullskip. --- docs/examples/fifo/fifo.sh | 8 ++++++++ docs/examples/fifo/golden/fifo.sby | 1 + 2 files changed, 9 insertions(+) create mode 100644 docs/examples/fifo/fifo.sh diff --git a/docs/examples/fifo/fifo.sh b/docs/examples/fifo/fifo.sh new file mode 100644 index 00000000..adfe6d28 --- /dev/null +++ b/docs/examples/fifo/fifo.sh @@ -0,0 +1,8 @@ +#!/bin/bash + +python3 $SBY_MAIN -f fifo.sby && python3 $SBY_MAIN -f fifo.sby nofullskip + +if [[ $? -ne 2 ]] ; then + echo "Unexpected result" + exit 1 +fi diff --git a/docs/examples/fifo/golden/fifo.sby b/docs/examples/fifo/golden/fifo.sby index d94789c3..605307b5 100644 --- a/docs/examples/fifo/golden/fifo.sby +++ b/docs/examples/fifo/golden/fifo.sby @@ -17,6 +17,7 @@ mode bmc -- bigtest: depth 120 ~bigtest: depth 10 +nofullskip: expect fail [engines] smtbmc boolector From 59dc27ed735464e9caba9ca39edd38a198e43d9a Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 09:39:41 -0400 Subject: [PATCH 118/220] sby: core: config: cleaned up the error messages to make them less opaque --- sbysrc/sby_core.py | 40 +++++++++++++++++++++++++--------------- 1 file changed, 25 insertions(+), 15 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 3462c36a..481fde1b 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -270,30 +270,38 @@ def parse_config(self, f): if match: entries = match.group(1).split() if len(entries) == 0: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: Expected section header, got '{line}'") if entries[0] == "options": mode = "options" - if len(self.options) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") + if len(self.options) != 0: + self.error(f"sby file syntax error: '[options]' section already defined") + + if len(entries) != 1: + self.error(f"sby file syntax error: '[options]' section accepts no arguments, got '{line}'") continue if entries[0] == "engines": mode = "engines" - if len(self.engines) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") + if len(self.engines) != 0: + self.error(f"sby file syntax error: '[engines]' section already defined") + if len(entries) != 1: + self.error(f"sby file syntax error: '[engines]' section accepts no arguments, got '{line}'") continue if entries[0] == "script": mode = "script" - if len(self.script) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") + if len(self.script) != 0: + self.error(f"sby file syntax error: '[script]' section already defined") + if len(entries) != 1: + self.error(f"sby file syntax error: '[script]' section accepts no arguments, got '{line}'") + continue if entries[0] == "autotune": mode = "autotune" if self.autotune_config: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: '[autotune]' section already defined") import sby_autotune self.autotune_config = sby_autotune.SbyAutotuneConfig() @@ -302,7 +310,7 @@ def parse_config(self, f): if entries[0] == "file": mode = "file" if len(entries) != 2: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: '[file]' section expects a file name argument") current_verbatim_file = entries[1] if current_verbatim_file in self.verbatim_files: self.error(f"duplicate file: {entries[1]}") @@ -312,15 +320,15 @@ def parse_config(self, f): if entries[0] == "files": mode = "files" if len(entries) != 1: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: '[files]' section expects no arguments, got '{line}'") continue - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: unexpected section '{entries[0]}', expected one of 'options, engines, script, autotune, file, files'") if mode == "options": entries = line.split() if len(entries) != 2: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: '[options]' section entry does not have an argument '{line}'") self.options[entries[0]] = entries[1] continue @@ -339,19 +347,21 @@ def parse_config(self, f): if mode == "files": entries = line.split() + if len(entries) < 1 or len(entries) > 2: + self.error(f"sby file syntax error: '[files]' section entry expects up to 2 arguments, {len(entries)} specified") + if len(entries) == 1: self.files[os.path.basename(entries[0])] = entries[0] elif len(entries) == 2: self.files[entries[0]] = entries[1] - else: - self.error(f"sby file syntax error: {line}") + continue if mode == "file": self.verbatim_files[current_verbatim_file].append(raw_line) continue - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: In an incomprehensible mode '{mode}'") def error(self, logmessage): raise SbyAbort(logmessage) From 93d8ef966323bf00c734704f0e4edc14f8a85be4 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 2 Aug 2022 10:12:33 +1200 Subject: [PATCH 119/220] Fixed bigtest Accidentally broke it in cc27d27 (this is why regular testing is important). --- docs/examples/fifo/golden/fifo.sby | 1 - docs/examples/fifo/golden/fifo.sv | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/examples/fifo/golden/fifo.sby b/docs/examples/fifo/golden/fifo.sby index 605307b5..824f3590 100644 --- a/docs/examples/fifo/golden/fifo.sby +++ b/docs/examples/fifo/golden/fifo.sby @@ -3,7 +3,6 @@ basic bmc nofullskip prove cover bigtest cover -basic cover : default [options] cover: diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv index 18eb9840..2ef5cca6 100644 --- a/docs/examples/fifo/golden/fifo.sv +++ b/docs/examples/fifo/golden/fifo.sv @@ -31,7 +31,7 @@ module fifo ); // fifo storage // async read, sync write - wire [3:0] waddr, raddr; + wire [ADDR_BITS-1:0] waddr, raddr; reg [7:0] data [MAX_DATA-1:0]; always @(posedge clk) if (wen) From a76286ed3448e6e9c58f805f78afda3896cf91cd Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 2 Aug 2022 10:28:06 +1200 Subject: [PATCH 120/220] Check output of fifo.sby False positive exit 0 if fifo.sby was giving returncode=2. --- docs/examples/fifo/fifo.sh | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/docs/examples/fifo/fifo.sh b/docs/examples/fifo/fifo.sh index adfe6d28..74f05c1b 100644 --- a/docs/examples/fifo/fifo.sh +++ b/docs/examples/fifo/fifo.sh @@ -1,8 +1,13 @@ #!/bin/bash -python3 $SBY_MAIN -f fifo.sby && python3 $SBY_MAIN -f fifo.sby nofullskip +python3 $SBY_MAIN -f fifo.sby + +if [[ $? -ne 0 ]] ; then + exit 1 +fi + +python3 $SBY_MAIN -f fifo.sby nofullskip if [[ $? -ne 2 ]] ; then - echo "Unexpected result" exit 1 fi From cfa4352bae6352df8428d9dfff7d9b3b7d359b46 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 2 Aug 2022 12:11:09 +1200 Subject: [PATCH 121/220] Changes to reset Active high. Removed init. Better over/underfill cover properties for verific. Moved basic cover statement to only be used when there is no verific. Other general tidy up. Also updated/fixed a couple minor things in newstart.rst. --- docs/examples/fifo/fifo.sv | 98 ++++++++++++++----------------- docs/examples/fifo/golden/fifo.sv | 89 ++++++++++++++-------------- docs/source/newstart.rst | 13 ++-- 3 files changed, 94 insertions(+), 106 deletions(-) diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 15522b8a..9beafdfe 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -1,32 +1,36 @@ // address generator/counter module addr_gen #( parameter MAX_DATA=16 -) ( input en, clk, rst_n, +) ( input en, clk, rst, output reg [3:0] addr ); initial addr <= 0; // async reset // increment address when enabled - always @(posedge clk or negedge rst_n) - if (~rst_n) + always @(posedge clk or posedge rst) + if (rst) addr <= 0; - else if (en) + else if (en) begin if (addr == MAX_DATA-1) addr <= 0; else addr <= addr + 1; + end endmodule // Define our top level fifo entity module fifo #( parameter MAX_DATA=16 -) ( input wen, ren, clk, rst_n, +) ( input wen, ren, clk, rst, input [7:0] wdata, output [7:0] rdata, output [4:0] count, output full, empty ); + wire wskip, rskip; + reg [4:0] data_count; + // fifo storage // async read, sync write wire [3:0] waddr, raddr; @@ -42,7 +46,7 @@ module fifo fifo_writer ( .en (wen || wskip), .clk (clk ), - .rst_n (rst_n), + .rst (rst), .addr (waddr) ); @@ -50,16 +54,15 @@ module fifo fifo_reader ( .en (ren || rskip), .clk (clk ), - .rst_n (rst_n), + .rst (rst), .addr (raddr) ); // status signals - reg [4:0] data_count; initial data_count <= 0; - always @(posedge clk or negedge rst_n) begin - if (~rst_n) + always @(posedge clk or posedge rst) begin + if (rst) data_count <= 0; else if (wen && !ren && data_count < MAX_DATA) data_count <= data_count + 1; @@ -68,11 +71,10 @@ module fifo end assign full = data_count == MAX_DATA; - assign empty = (data_count == 0) && rst_n; + assign empty = (data_count == 0) && ~rst; assign count = data_count; // overflow protection - wire wskip, rskip; `ifndef NO_FULL_SKIP // write while full => overwrite oldest data, move read pointer assign rskip = wen && !ren && data_count >= MAX_DATA; @@ -90,19 +92,10 @@ module fifo ? waddr - raddr : waddr + MAX_DATA - raddr; - reg init = 0; - always @(posedge clk) begin - if (rst_n) - init <= 1; - // if init is low we don't care about the value of rst_n - // if init is high (rst_n has ben high), then rst_n must remain high - assume (!init || rst_n); - end - // tests always @(posedge clk) begin - if (rst_n) begin - // waddr and raddr can only be non zero if reset is high + if (~rst) begin + // waddr and raddr can only be non zero if reset is low w_nreset: cover (waddr || raddr); // count never more than max @@ -132,49 +125,44 @@ module fifo w_full: cover (wen && !ren && count == MAX_DATA-1); a_empty: assert (!empty || count == 0); w_empty: cover (ren && !wen && count == 1); - - // can we corrupt our data? - w_overfill: cover ($past(rskip) && raddr); - w_underfill: cover ($past(wskip) && waddr); + + // reading/writing non zero values + w_nzero_write: cover (wen && wdata); + w_nzero_read: cover (ren && rdata); end else begin - // waddr and raddr are zero while reset is low + // waddr and raddr are zero while reset is high a_reset: assert (!waddr && !raddr); - w_reset: cover (~rst_n); + w_reset: cover (rst); - // outputs are zero while reset is low + // outputs are zero while reset is high a_zero_out: assert (!empty && !full && !count); end end `ifdef VERIFIC // if we have verific we can also do the following additional tests - always @(posedge clk) begin - if (rst_n) begin - // read/write enables enable - ap_raddr2: assert property (ren |=> $changed(raddr)); - ap_waddr2: assert property (wen |=> $changed(waddr)); - - // read/write needs enable UNLESS full/empty - ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); - ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); - - // can we corrupt our data? - // these should already be covered by ap_{r,w}addr2 - ap_overfill: assert property (wen && full |=> $changed(raddr)); - ap_underfill: assert property (ren && empty |=> $changed(waddr)); - - // change data when writing (and only when writing) so we can line - // up reads with writes - //TODO: this but with a cover statement - // assume property (wen |=> $changed(wdata)); - // assume property (!wen |=> $stable(wdata)); - end - end + // read/write enables enable + ap_raddr2: assert property (@(posedge clk) disable iff (rst) ren |=> $changed(raddr)); + ap_waddr2: assert property (@(posedge clk) disable iff (rst) wen |=> $changed(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (@(posedge clk) disable iff (rst) !ren && !full |=> $stable(raddr)); + ap_waddr3: assert property (@(posedge clk) disable iff (rst) !wen && !empty |=> $stable(waddr)); + + // can we corrupt our data? + w_underfill: cover property (@(posedge clk) disable iff (rst) !wen |=> $changed(waddr)); + // look for an overfill where the value in memory changes + let d_change = (wdata != rdata); + w_overfill: cover property (@(posedge clk) disable iff (rst) !ren && d_change |=> $changed(raddr)); `else // !VERIFIC - // without verific we are more limited in describing the above assumption always @(posedge clk) begin - assume ((wen && wdata != $past(wdata)) - || (!wen && wdata == $past(wdata))); + if (~rst) begin + // this is less reliable because $past() can sometimes give false + // positives in the first cycle + w_overfill: cover ($past(rskip) && raddr); + w_underfill: cover ($past(wskip) && waddr); + + end end `endif // VERIFIC diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv index 2ef5cca6..194db627 100644 --- a/docs/examples/fifo/golden/fifo.sv +++ b/docs/examples/fifo/golden/fifo.sv @@ -2,33 +2,37 @@ module addr_gen #( parameter MAX_DATA=16, parameter ADDR_BITS=5 -) ( input en, clk, rst_n, +) ( input en, clk, rst, output reg [ADDR_BITS-1:0] addr ); initial addr <= 0; // async reset // increment address when enabled - always @(posedge clk or negedge rst_n) - if (~rst_n) + always @(posedge clk or posedge rst) + if (rst) addr <= 0; - else if (en) + else if (en) begin if (addr == MAX_DATA-1) addr <= 0; else addr <= addr + 1; + end endmodule // Define our top level fifo entity module fifo #( parameter MAX_DATA=16, parameter ADDR_BITS=5 -) ( input wen, ren, clk, rst_n, +) ( input wen, ren, clk, rst, input [7:0] wdata, output [7:0] rdata, output [ADDR_BITS:0] count, output full, empty ); + wire wskip, rskip; + reg [ADDR_BITS:0] data_count; + // fifo storage // async read, sync write wire [ADDR_BITS-1:0] waddr, raddr; @@ -44,7 +48,7 @@ module fifo fifo_writer ( .en (wen || wskip), .clk (clk ), - .rst_n (rst_n), + .rst (rst), .addr (waddr) ); @@ -52,16 +56,15 @@ module fifo fifo_reader ( .en (ren || rskip), .clk (clk ), - .rst_n (rst_n), + .rst (rst), .addr (raddr) ); // status signals - reg [ADDR_BITS:0] data_count; initial data_count <= 0; - always @(posedge clk or negedge rst_n) begin - if (~rst_n) + always @(posedge clk or posedge rst) begin + if (rst) data_count <= 0; else if (wen && !ren && data_count < MAX_DATA) data_count <= data_count + 1; @@ -70,11 +73,10 @@ module fifo end assign full = data_count == MAX_DATA; - assign empty = (data_count == 0) && rst_n; + assign empty = (data_count == 0) && ~rst; assign count = data_count; // overflow protection - wire wskip, rskip; `ifndef NO_FULL_SKIP // write while full => overwrite oldest data, move read pointer assign rskip = wen && !ren && data_count >= MAX_DATA; @@ -89,22 +91,13 @@ module fifo // observers wire [ADDR_BITS:0] addr_diff; assign addr_diff = waddr >= raddr - ? waddr - raddr - : waddr + MAX_DATA - raddr; - - reg init = 0; - always @(posedge clk) begin - if (rst_n) - init <= 1; - // if init is low we don't care about the value of rst_n - // if init is high (rst_n has ben high), then rst_n must remain high - assume (!init || rst_n); - end + ? waddr - raddr + : waddr + MAX_DATA - raddr; // tests always @(posedge clk) begin - if (rst_n) begin - // waddr and raddr can only be non zero if reset is high + if (~rst) begin + // waddr and raddr can only be non zero if reset is low w_nreset: cover (waddr || raddr); // count never more than max @@ -134,35 +127,43 @@ module fifo w_full: cover (wen && !ren && count == MAX_DATA-1); a_empty: assert (!empty || count == 0); w_empty: cover (ren && !wen && count == 1); - - // can we corrupt our data? - w_overfill: cover ($past(rskip) && raddr); - w_underfill: cover ($past(wskip) && waddr); + + // reading/writing non zero values + w_nzero_write: cover (wen && wdata); + w_nzero_read: cover (ren && rdata); end else begin - // waddr and raddr are zero while reset is low + // waddr and raddr are zero while reset is high a_reset: assert (!waddr && !raddr); - w_reset: cover (~rst_n); + w_reset: cover (rst); - // outputs are zero while reset is low + // outputs are zero while reset is high a_zero_out: assert (!empty && !full && !count); end end `ifdef VERIFIC // if we have verific we can also do the following additional tests + // read/write enables enable + ap_raddr2: assert property (@(posedge clk) disable iff (rst) ren |=> $changed(raddr)); + ap_waddr2: assert property (@(posedge clk) disable iff (rst) wen |=> $changed(waddr)); + + // read/write needs enable UNLESS full/empty + ap_raddr3: assert property (@(posedge clk) disable iff (rst) !ren && !full |=> $stable(raddr)); + ap_waddr3: assert property (@(posedge clk) disable iff (rst) !wen && !empty |=> $stable(waddr)); + + // can we corrupt our data? + w_underfill: cover property (@(posedge clk) disable iff (rst) !wen |=> $changed(waddr)); + // look for an overfill where the value in memory changes + let d_change = (wdata != rdata); + w_overfill: cover property (@(posedge clk) disable iff (rst) !ren && d_change |=> $changed(raddr)); +`else // !VERIFIC always @(posedge clk) begin - if (rst_n) begin - // read/write enables enable - ap_raddr2: assert property (ren |=> $changed(raddr)); - ap_waddr2: assert property (wen |=> $changed(waddr)); - - // read/write needs enable UNLESS full/empty - ap_raddr3: assert property (!ren && !full |=> $stable(raddr)); - ap_waddr3: assert property (!wen && !empty |=> $stable(waddr)); - - // can we corrupt our data? - ap_overfill: assert property (wen && full |=> $changed(raddr)); - ap_underfill: assert property (ren && empty |=> $changed(waddr)); + if (~rst) begin + // this is less reliable because $past() can sometimes give false + // positives in the first cycle + w_overfill: cover ($past(rskip) && raddr); + w_underfill: cover ($past(wskip) && waddr); + end end `endif // VERIFIC diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 214a3af7..98468ae3 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -131,7 +131,7 @@ adjustments to code and rerunning tests to validate. sby -f fifo.sby nofullskip -The noskip task disables the code shown below. Because the count signal has +The nofullskip task disables the code shown below. Because the count signal has been written such that it cannot exceed MAX_DATA, removing this code will lead to the ``a_count_diff`` assertion failing. Without this assertion, there is no guarantee that data will be read in the same order it was written should an @@ -143,13 +143,12 @@ overflow occur and the oldest data be written. :end-at: endif :lines: 1-5,9 -The last few lines of output for the noskip task should be similar to the +The last few lines of output for the nofullskip task should be similar to the following: .. code-block:: text SBY [fifo_nofullskip] engine_0.basecase: ## Assert failed in fifo: a_count_diff - SBY [fifo_nofullskip] engine_0.basecase: ## Assert failed in fifo: ap_underfill SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to VCD file: engine_0/trace.vcd SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to Verilog testbench: engine_0/trace_tb.v SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to constraints file: engine_0/trace.smtc @@ -162,7 +161,7 @@ following: SBY [fifo_nofullskip] summary: engine_0 (smtbmc boolector) returned FAIL for basecase SBY [fifo_nofullskip] summary: counterexample trace: fifo_nofullskip/engine_0/trace.vcd SBY [fifo_nofullskip] DONE (FAIL, rc=2) - SBY The following tasks failed: ['noskip'] + SBY The following tasks failed: ['nofullskip'] Using the ``noskip.gtkw`` file provided, use the below command to examine the error trace. @@ -189,16 +188,16 @@ Searching the file for ``w_underfill`` will reveal the below. .. code-block:: text - $ grep "w_underfill" fifo_cover/logfile.txt -A 1 + $ grep "w_underfill" fifo_cover/logfile.txt -A 2 SBY [fifo_cover] engine_0: ## Reached cover statement at w_underfill in step 2. - SBY [fifo_cover] engine_0: ## Writing trace to VCD file: engine_0/trace2.vcd + SBY [fifo_cover] engine_0: ## Writing trace to VCD file: engine_0/trace4.vcd We can then run gtkwave with the trace file indicated to see the correct operation as in the image below. When the buffer is empty, a read with no write will result in the ``wksip`` signal going high, incrementing *both* read and write addresses and avoiding underflow. - gtkwave fifo_cover/engine_0/trace2.vcd noskip.gtkw + gtkwave fifo_cover/engine_0/trace4.vcd noskip.gtkw .. image:: media/gtkwave_coverskip.png From 523b7a252e654c2121a0734cd03c84d30accc21b Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 2 Aug 2022 20:22:27 +0200 Subject: [PATCH 122/220] Regression test for YosysHQ/yosys#3433 --- tests/regression/smt_dynamic_index_assign.sby | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 tests/regression/smt_dynamic_index_assign.sby diff --git a/tests/regression/smt_dynamic_index_assign.sby b/tests/regression/smt_dynamic_index_assign.sby new file mode 100644 index 00000000..993d75a5 --- /dev/null +++ b/tests/regression/smt_dynamic_index_assign.sby @@ -0,0 +1,22 @@ +[options] +mode cover +depth 36 + +[engines] +smtbmc boolector + +[script] +read -formal top.sv +prep -top top + +[file top.sv] +module top(input clk); + reg [33:0] bits = 0; + reg [5:0] counter = 0; + + always @(posedge clk) begin + counter <= counter + 1; + bits[counter] <= 1; + cover (&bits); + end +endmodule From 8133aaa8f831cc8306525d286e0dc81c902cc3b2 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Tue, 2 Aug 2022 08:15:27 -0400 Subject: [PATCH 123/220] sby: core: changed how the sections and their arguments are handled and cleared up the strangly worded error messages related to that --- sbysrc/sby_core.py | 47 +++++++++++++++++++++++++++------------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 481fde1b..677f249d 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -268,37 +268,41 @@ def parse_config(self, f): continue match = re.match(r"^\s*\[(.*)\]\s*$", line) if match: - entries = match.group(1).split() + entries = match.group(1).split(" ", maxsplit = 1) if len(entries) == 0: self.error(f"sby file syntax error: Expected section header, got '{line}'") + elif len(entries) == 1: + section, args = (*entries, None) + else: + section, args = entries - if entries[0] == "options": + if section == "options": mode = "options" if len(self.options) != 0: self.error(f"sby file syntax error: '[options]' section already defined") - if len(entries) != 1: - self.error(f"sby file syntax error: '[options]' section accepts no arguments, got '{line}'") + if args is not None: + self.error(f"sby file syntax error: '[options]' section does not accept any arguments. got {args}") continue - if entries[0] == "engines": + if section == "engines": mode = "engines" if len(self.engines) != 0: self.error(f"sby file syntax error: '[engines]' section already defined") - if len(entries) != 1: - self.error(f"sby file syntax error: '[engines]' section accepts no arguments, got '{line}'") + if args is not None: + self.error(f"sby file syntax error: '[engines]' section does not accept any arguments. got {args}") continue - if entries[0] == "script": + if section == "script": mode = "script" if len(self.script) != 0: self.error(f"sby file syntax error: '[script]' section already defined") - if len(entries) != 1: - self.error(f"sby file syntax error: '[script]' section accepts no arguments, got '{line}'") + if args is not None: + self.error(f"sby file syntax error: '[script]' section does not accept any arguments. got {args}") continue - if entries[0] == "autotune": + if section == "autotune": mode = "autotune" if self.autotune_config: self.error(f"sby file syntax error: '[autotune]' section already defined") @@ -307,23 +311,28 @@ def parse_config(self, f): self.autotune_config = sby_autotune.SbyAutotuneConfig() continue - if entries[0] == "file": + if section == "file": mode = "file" - if len(entries) != 2: + if args is None: self.error(f"sby file syntax error: '[file]' section expects a file name argument") - current_verbatim_file = entries[1] + + section_args = args.split() + + if len(section_args) > 1: + self.error(f"sby file syntax error: '[file]' section expects exactly one file name argument, got {len(section_args)}") + current_verbatim_file = section_args[0] if current_verbatim_file in self.verbatim_files: - self.error(f"duplicate file: {entries[1]}") + self.error(f"duplicate file: {current_verbatim_file}") self.verbatim_files[current_verbatim_file] = list() continue - if entries[0] == "files": + if section == "files": mode = "files" - if len(entries) != 1: - self.error(f"sby file syntax error: '[files]' section expects no arguments, got '{line}'") + if args is not None: + self.error(f"sby file syntax error: '[files]' section does not accept any arguments. got {args}") continue - self.error(f"sby file syntax error: unexpected section '{entries[0]}', expected one of 'options, engines, script, autotune, file, files'") + self.error(f"sby file syntax error: unexpected section '{section}', expected one of 'options, engines, script, autotune, file, files'") if mode == "options": entries = line.split() From 10234fef0041e62f71e28e0165017c089fbaac1a Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Tue, 2 Aug 2022 08:55:35 -0400 Subject: [PATCH 124/220] sby: core: changed how the split for the section header and arguments are done, with a prior strip to remove and extra whitespace --- sbysrc/sby_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 677f249d..e3cf922d 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -268,7 +268,7 @@ def parse_config(self, f): continue match = re.match(r"^\s*\[(.*)\]\s*$", line) if match: - entries = match.group(1).split(" ", maxsplit = 1) + entries = match.group(1).strip().split(maxsplit = 1) if len(entries) == 0: self.error(f"sby file syntax error: Expected section header, got '{line}'") elif len(entries) == 1: From 9368f3f987c6927c50c5d26d952a5276e14fc422 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 05:52:59 -0400 Subject: [PATCH 125/220] sby: core: explicitly split the the entries for the `[options]` section params to be at most one, --- sbysrc/sby_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index e3cf922d..ad877b7e 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -335,7 +335,7 @@ def parse_config(self, f): self.error(f"sby file syntax error: unexpected section '{section}', expected one of 'options, engines, script, autotune, file, files'") if mode == "options": - entries = line.split() + entries = line.split(maxsplit = 1) if len(entries) != 2: self.error(f"sby file syntax error: '[options]' section entry does not have an argument '{line}'") self.options[entries[0]] = entries[1] From 46ca20f8ec007d4f517c40bb17454a7f33185176 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 05:58:52 -0400 Subject: [PATCH 126/220] sby: core: ensured to strip the line of any uneeded whitespace --- sbysrc/sby_core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ad877b7e..790d64d8 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -335,7 +335,7 @@ def parse_config(self, f): self.error(f"sby file syntax error: unexpected section '{section}', expected one of 'options, engines, script, autotune, file, files'") if mode == "options": - entries = line.split(maxsplit = 1) + entries = line.strip().split(maxsplit = 1) if len(entries) != 2: self.error(f"sby file syntax error: '[options]' section entry does not have an argument '{line}'") self.options[entries[0]] = entries[1] From edb068bff4b8f67b3c130c63d9f51b88b3761aec Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 2 Aug 2022 17:11:38 +0200 Subject: [PATCH 127/220] Fix print_junit_results failure during some error conditions There is a small window between setting self.precise_prop_status and initializing self.design. I've only managed to produce an error within that windows during development, but getting unrelated stacktraces from print_junit_result failing distracts from debugging the issue at hand. --- sbysrc/sby_core.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 790d64d8..ce69f9d7 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -906,6 +906,8 @@ def exit_callback(self): def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): junit_time = strftime('%Y-%m-%dT%H:%M:%S') + if not self.design: + self.precise_prop_status = False if self.precise_prop_status: checks = self.design.hierarchy.get_property_list() junit_tests = len(checks) From 5265a52b65df0d196d48d30029bf3de1bbcff36a Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 25 Jul 2022 11:35:10 +0200 Subject: [PATCH 128/220] Refactor flow to use a common prep model The goal of this is to make sure that all backend flows are compatible and we can map between them, so that e.g. the aiger model can be used to minimize a counterexample trace produced by smtbmc. Reducing the parts that differ per backend (including parts that receive different input depending on the used backend) also makes testing more effective as the common parts are easier to cover. --- sbysrc/sby_autotune.py | 2 +- sbysrc/sby_core.py | 81 +++++++++++++++++++++++++----------------- tests/unsorted/mixed.v | 13 ++++--- 3 files changed, 55 insertions(+), 41 deletions(-) diff --git a/sbysrc/sby_autotune.py b/sbysrc/sby_autotune.py index 9e2f28c0..c7d741c7 100644 --- a/sbysrc/sby_autotune.py +++ b/sbysrc/sby_autotune.py @@ -391,7 +391,7 @@ def run(self): else: self.task.copy_src() - self.model(None, "base") + self.model(None, "prep") self.task.taskloop.run() if self.task.status == "ERROR": diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ce69f9d7..8ef528dd 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -585,25 +585,44 @@ def make_model(self, model_name): if not os.path.isdir(f"{self.workdir}/model"): os.makedirs(f"{self.workdir}/model") - def print_common_prep(check): - print("scc -select; simplemap; select -clear", file=f) - if self.opt_multiclock: - print("clk2fflogic", file=f) - else: - print("async2sync", file=f) - print("chformal -assume -early", file=f) - if self.opt_mode in ["bmc", "prove"]: - print("chformal -live -fair -cover -remove", file=f) - if self.opt_mode == "cover": - print("chformal -live -fair -remove", file=f) - if self.opt_mode == "live": - print("chformal -assert2assume", file=f) - print("chformal -cover -remove", file=f) - print("opt_clean", file=f) - print("setundef -anyseq", file=f) - print("opt -keepdc -fast", file=f) - print("check", file=f) - print(f"hierarchy {check}", file=f) + if model_name == "prep": + with open(f"""{self.workdir}/model/design_prep.ys""", "w") as f: + print(f"# running in {self.workdir}/model/", file=f) + print(f"""read_ilang design.il""", file=f) + print("scc -select; simplemap; select -clear", file=f) + print("memory_nordff", file=f) + if self.opt_multiclock: + print("clk2fflogic", file=f) + else: + print("async2sync", file=f) + print("chformal -assume -early", file=f) + print("formalff -clk2ff -ff2anyinit", file=f) + if self.opt_mode in ["bmc", "prove"]: + print("chformal -live -fair -cover -remove", file=f) + if self.opt_mode == "cover": + print("chformal -live -fair -remove", file=f) + if self.opt_mode == "live": + print("chformal -assert2assume", file=f) + print("chformal -cover -remove", file=f) + print("opt_clean", file=f) + print("check", file=f) # can't detect undriven wires past this point + print("setundef -undriven -anyseq", file=f) + print("opt -fast", file=f) + # running opt before the renames below results in fewer unnamed witness signals + for celltype in ["anyconst", "anyseq", "anyinit", "allconst", "allseq"]: + print(f"rename -enumerate -pattern _sby_witness_{celltype}_% t:${celltype} %co w:* %i", file=f) + print("opt_clean", file=f) + print(f"""write_rtlil ../model/design_prep.il""", file=f) + + proc = SbyProc( + self, + model_name, + self.model("base"), + "cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name) + ) + proc.checkretcode = True + + return [proc] if model_name == "base": with open(f"""{self.workdir}/model/design.ys""", "w") as f: @@ -639,12 +658,10 @@ def instance_hierarchy_error_callback(retcode): if re.match(r"^smt2(_syn)?(_nomem)?(_stbv|_stdt)?$", model_name): with open(f"{self.workdir}/model/design_{model_name}.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) - print(f"""read_ilang design.il""", file=f) + print(f"""read_ilang design_prep.il""", file=f) + print("hierarchy -smtcheck", file=f) if "_nomem" in model_name: print("memory_map", file=f) - else: - print("memory_nordff", file=f) - print_common_prep("-smtcheck") if "_syn" in model_name: print("techmap", file=f) print("opt -fast", file=f) @@ -662,7 +679,7 @@ def instance_hierarchy_error_callback(retcode): proc = SbyProc( self, model_name, - self.model("base"), + self.model("prep"), "cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name) ) proc.checkretcode = True @@ -672,12 +689,10 @@ def instance_hierarchy_error_callback(retcode): if re.match(r"^btor(_syn)?(_nomem)?$", model_name): with open(f"{self.workdir}/model/design_{model_name}.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) - print(f"""read_ilang design.il""", file=f) + print(f"""read_ilang design_prep.il""", file=f) + print("hierarchy -simcheck", file=f) if "_nomem" in model_name: print("memory_map", file=f) - else: - print("memory_nordff", file=f) - print_common_prep("-simcheck") print("flatten", file=f) print("setundef -undriven -anyseq", file=f) if "_syn" in model_name: @@ -687,7 +702,7 @@ def instance_hierarchy_error_callback(retcode): print("abc", file=f) print("opt_clean", file=f) else: - print("opt -fast -keepdc", file=f) + print("opt -fast", file=f) print("delete -output", file=f) print("dffunmap", file=f) print("stat", file=f) @@ -697,7 +712,7 @@ def instance_hierarchy_error_callback(retcode): proc = SbyProc( self, model_name, - self.model("base"), + self.model("prep"), "cd {}/model; {} -ql design_{s}.log design_{s}.ys".format(self.workdir, self.exe_paths["yosys"], s=model_name) ) proc.checkretcode = True @@ -707,9 +722,9 @@ def instance_hierarchy_error_callback(retcode): if model_name == "aig": with open(f"{self.workdir}/model/design_aiger.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) - print("read_ilang design.il", file=f) + print("read_ilang design_prep.il", file=f) + print("hierarchy -simcheck", file=f) print("memory_map", file=f) - print_common_prep("-simcheck") print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) @@ -726,7 +741,7 @@ def instance_hierarchy_error_callback(retcode): proc = SbyProc( self, "aig", - self.model("base"), + self.model("prep"), f"""cd {self.workdir}/model; {self.exe_paths["yosys"]} -ql design_aiger.log design_aiger.ys""" ) proc.checkretcode = True diff --git a/tests/unsorted/mixed.v b/tests/unsorted/mixed.v index fa3cf2c4..26bf3c9e 100644 --- a/tests/unsorted/mixed.v +++ b/tests/unsorted/mixed.v @@ -1,17 +1,16 @@ -module test (input CP, CN, CX, input A, B, output reg XP, XN, YP, YN); +module test (input CP, CN, input A, B, output reg XP, XN); + reg [7:0] counter = 0; always @* begin assume (A || B); assume (!A || !B); assert (A != B); - cover (A); - cover (B); + cover (counter == 3 && A); + cover (counter == 3 && B); end + always @(posedge CP) + counter <= counter + 1; always @(posedge CP) XP <= A; always @(negedge CN) XN <= B; - always @(posedge CX) - YP <= A; - always @(negedge CX) - YN <= B; endmodule From acaf6ef0c2799e5ff506610bef23840c1f819853 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 2 Aug 2022 16:34:25 +0200 Subject: [PATCH 129/220] Use new memory_map -formal for aiger/_nomem --- sbysrc/sby_core.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 8ef528dd..08fd89fa 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -661,7 +661,8 @@ def instance_hierarchy_error_callback(retcode): print(f"""read_ilang design_prep.il""", file=f) print("hierarchy -smtcheck", file=f) if "_nomem" in model_name: - print("memory_map", file=f) + print("memory_map -formal", file=f) + print("formalff -clk2ff -ff2anyinit", file=f) if "_syn" in model_name: print("techmap", file=f) print("opt -fast", file=f) @@ -692,7 +693,8 @@ def instance_hierarchy_error_callback(retcode): print(f"""read_ilang design_prep.il""", file=f) print("hierarchy -simcheck", file=f) if "_nomem" in model_name: - print("memory_map", file=f) + print("memory_map -formal", file=f) + print("formalff -clk2ff -ff2anyinit", file=f) print("flatten", file=f) print("setundef -undriven -anyseq", file=f) if "_syn" in model_name: @@ -724,7 +726,8 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print("read_ilang design_prep.il", file=f) print("hierarchy -simcheck", file=f) - print("memory_map", file=f) + print("memory_map -formal", file=f) + print("formalff -clk2ff -ff2anyinit", file=f) print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) From d3520037b9f7c95380c3f0ecffc640011c835532 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 2 Aug 2022 18:19:01 +0200 Subject: [PATCH 130/220] Write native yosys witness traces --- sbysrc/sby_core.py | 2 +- sbysrc/sby_engine_aiger.py | 4 ++-- sbysrc/sby_engine_smtbmc.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 08fd89fa..3b7a5feb 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -739,7 +739,7 @@ def instance_hierarchy_error_callback(retcode): print("abc -g AND -fast", file=f) print("opt_clean", file=f) print("stat", file=f) - print("write_aiger -I -B -zinit -no-startoffset -map design_aiger.aim design_aiger.aig", file=f) + print("write_aiger -I -B -zinit -no-startoffset -map design_aiger.aim -ywmap design_aiger.ywa design_aiger.aig", file=f) proc = SbyProc( self, diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index e392932e..d59105a0 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -111,7 +111,7 @@ def exit_callback(retcode): f"engine_{engine_idx}", task.model("smt2"), ("cd {}; {} -g -s {}{} --noprogress --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " + - "--dump-smtc engine_{i}/trace.smtc --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format + "--dump-smtc engine_{i}/trace.smtc --dump-yw engine_{i}/trace.yw --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", i=engine_idx), @@ -123,7 +123,7 @@ def exit_callback(retcode): f"engine_{engine_idx}", task.model("smt2"), ("cd {}; {} -s {}{} --noprogress --append {} --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " + - "--dump-smtc engine_{i}/trace.smtc --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format + "--dump-smtc engine_{i}/trace.smtc --dump-yw engine_{i}/trace.yw --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", task.opt_append, i=engine_idx), diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 8c11388d..9dd92c3c 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -152,7 +152,7 @@ def run(mode, task, engine_idx, engine): task, procname, task.model(model_name), - f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --dump-vcd {trace_prefix}.vcd --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", + f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --dump-vcd {trace_prefix}.vcd --dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", logfile=open(logfile_prefix + ".txt", "w"), logstderr=(not progress) ) From 22585b33dc2c3a2067a601fe9d8d63a12e7144b5 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 3 Aug 2022 17:33:04 +0200 Subject: [PATCH 131/220] Use 'rename -witness' instead of multiple 'rename -enumerate' --- sbysrc/sby_core.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 3b7a5feb..d855abf3 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -608,9 +608,7 @@ def make_model(self, model_name): print("check", file=f) # can't detect undriven wires past this point print("setundef -undriven -anyseq", file=f) print("opt -fast", file=f) - # running opt before the renames below results in fewer unnamed witness signals - for celltype in ["anyconst", "anyseq", "anyinit", "allconst", "allseq"]: - print(f"rename -enumerate -pattern _sby_witness_{celltype}_% t:${celltype} %co w:* %i", file=f) + print("rename -witness", file=f) print("opt_clean", file=f) print(f"""write_rtlil ../model/design_prep.il""", file=f) From 231f0b80aa15bea98987ebc47527cac8f9eb2067 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 2 Aug 2022 17:08:53 +0200 Subject: [PATCH 132/220] Add make_model option to generate models not required by the task Useful to do custom things (like counter example minimization) but still use sby's flow to prepare models. --- docs/source/reference.rst | 6 ++++++ sbysrc/sby_core.py | 14 ++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 8d003143..447673ac 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -153,6 +153,12 @@ options are: | ``tbtop`` | All | The top module for generated Verilog test benches, as | | | | hierarchical path relative to the design top module. | +------------------+------------+---------------------------------------------------------+ +| ``make_model`` | All | Force generation of the named formal models. Takes a | +| | | comma-separated list of model names. For a model | +| | | ```` this will generate the | +| | | ``model/design_.*`` files within the working | +| | | directory, even when not required to run the task. | ++------------------+------------+---------------------------------------------------------+ | ``smtc`` | ``bmc``, | Pass this ``.smtc`` file to the smtbmc engine. All | | | ``prove``, | other engines are disabled when this option is used. | | | ``cover`` | Default: None | diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index d855abf3..2a1a9948 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -81,6 +81,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.linebuffer = "" self.logstderr = logstderr self.silent = silent + self.wait = False self.task.update_proc_pending(self) @@ -130,7 +131,7 @@ def handle_error(self, retcode): self.error_callback(retcode) def terminate(self, timeout=False): - if self.task.opt_wait and not timeout: + if (self.task.opt_wait or self.wait) and not timeout: return if self.running: if not self.silent: @@ -749,7 +750,7 @@ def instance_hierarchy_error_callback(retcode): return [proc] - assert False + self.error(f"Invalid model name: {model_name}") def model(self, model_name): if model_name not in self.models: @@ -827,6 +828,8 @@ def handle_non_engine_options(self): self.handle_int_option("skip", None) self.handle_str_option("tbtop", None) + self.handle_str_option("make_model", None) + def setup_procs(self, setupmode): self.handle_non_engine_options() if self.opt_smtc is not None: @@ -854,6 +857,13 @@ def setup_procs(self, setupmode): self.retcode = 0 return + if self.opt_make_model is not None: + for name in self.opt_make_model.split(","): + self.model(name.strip()) + + for proc in self.procs_pending: + proc.wait = True + if self.opt_mode == "bmc": import sby_mode_bmc sby_mode_bmc.run(self) From 3412ea859ba6510fda22aa3de140af78929f9d14 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 5 Aug 2022 15:51:11 +0200 Subject: [PATCH 133/220] New "none" engine to be used with the "make_model" option --- docs/source/reference.rst | 9 +++++++++ sbysrc/sby_core.py | 9 +++++---- sbysrc/sby_mode_bmc.py | 3 +++ sbysrc/sby_mode_cover.py | 3 +++ sbysrc/sby_mode_live.py | 3 +++ sbysrc/sby_mode_prove.py | 3 +++ 6 files changed, 26 insertions(+), 4 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 447673ac..710ba7bc 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -346,6 +346,15 @@ solvers: Solver options are passed as additional arguments to the ABC command implementing the solver. + +``none`` engine +~~~~~~~~~~~~~~~ + +The ``none`` engine does not run any solver. It can be used together with the +``make_model`` option to manually generate any model supported by one of the +other engines. This makes it easier to use the same models outside of sby. + + Script section -------------- diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 2a1a9948..7eab2773 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -597,7 +597,8 @@ def make_model(self, model_name): else: print("async2sync", file=f) print("chformal -assume -early", file=f) - print("formalff -clk2ff -ff2anyinit", file=f) + print("opt_clean", file=f) + print("formalff -setundef -clk2ff -ff2anyinit", file=f) if self.opt_mode in ["bmc", "prove"]: print("chformal -live -fair -cover -remove", file=f) if self.opt_mode == "cover": @@ -661,7 +662,7 @@ def instance_hierarchy_error_callback(retcode): print("hierarchy -smtcheck", file=f) if "_nomem" in model_name: print("memory_map -formal", file=f) - print("formalff -clk2ff -ff2anyinit", file=f) + print("formalff -setundef -clk2ff -ff2anyinit", file=f) if "_syn" in model_name: print("techmap", file=f) print("opt -fast", file=f) @@ -693,7 +694,7 @@ def instance_hierarchy_error_callback(retcode): print("hierarchy -simcheck", file=f) if "_nomem" in model_name: print("memory_map -formal", file=f) - print("formalff -clk2ff -ff2anyinit", file=f) + print("formalff -setundef -clk2ff -ff2anyinit", file=f) print("flatten", file=f) print("setundef -undriven -anyseq", file=f) if "_syn" in model_name: @@ -726,7 +727,7 @@ def instance_hierarchy_error_callback(retcode): print("read_ilang design_prep.il", file=f) print("hierarchy -simcheck", file=f) print("memory_map -formal", file=f) - print("formalff -clk2ff -ff2anyinit", file=f) + print("formalff -setundef -clk2ff -ff2anyinit", file=f) print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index 78324edb..cc4eba78 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -46,5 +46,8 @@ def run(task): import sby_engine_btor sby_engine_btor.run("bmc", task, engine_idx, engine) + elif engine[0] == "none": + pass + else: task.error(f"Invalid engine '{engine[0]}' for bmc mode.") diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index d7705ee3..8fb1da99 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -37,5 +37,8 @@ def run(task): import sby_engine_btor sby_engine_btor.run("cover", task, engine_idx, engine) + elif engine[0] == "none": + pass + else: task.error(f"Invalid engine '{engine[0]}' for cover mode.") diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index 46b556fc..6746200e 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -34,5 +34,8 @@ def run(task): import sby_engine_aiger sby_engine_aiger.run("live", task, engine_idx, engine) + elif engine[0] == "none": + pass + else: task.error(f"Invalid engine '{engine[0]}' for live mode.") diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index 3abadf75..1f62bb94 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -49,5 +49,8 @@ def run(task): import sby_engine_abc sby_engine_abc.run("prove", task, engine_idx, engine) + elif engine[0] == "none": + pass + else: task.error(f"Invalid engine '{engine[0]}' for prove mode.") From ad8730fa44e73cb7b6f9566873db71228ca60e30 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 8 Aug 2022 21:30:31 +1200 Subject: [PATCH 134/220] Fix typo --- docs/source/newstart.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 98468ae3..79604f7b 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -229,7 +229,7 @@ while still passing all of the tests? Once the tests are passing with ``MAX_DATA=17``, try something bigger, like 64, or 100. Does the ``basic`` task still pass? What about ``cover``? By default, -``bmc & cover`` modes will run to a depth of 20 cycles. If a maximum of one +``bmc`` & ``cover`` modes will run to a depth of 20 cycles. If a maximum of one value can be loaded in each cycle, how many cycles will it take to load 100 values? Using the :ref:`.sby reference page `, try to increase the cover mode depth to be at least a few cycles larger than the From 1d4716a5f9e701814dad0d928b753b7d752f83b6 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 9 Aug 2022 11:29:19 +1200 Subject: [PATCH 135/220] Add noverific task to test the non verific code Mostly for CI to ensure fallback code still functions as intended.a Also reverted the change in the grep command to 1 line after. --- docs/examples/fifo/fifo.sby | 2 ++ docs/examples/fifo/fifo.sh | 2 +- docs/examples/fifo/golden/fifo.sby | 2 ++ docs/source/newstart.rst | 4 +++- 4 files changed, 8 insertions(+), 2 deletions(-) diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/fifo.sby index 22ebcddf..4ca4bc69 100644 --- a/docs/examples/fifo/fifo.sby +++ b/docs/examples/fifo/fifo.sby @@ -2,6 +2,7 @@ basic bmc nofullskip prove cover +noverific cover basic cover : default [options] @@ -20,6 +21,7 @@ smtbmc boolector [script] nofullskip: read -define NO_FULL_SKIP=1 +noverific: read -noverific read -formal fifo.sv prep -top fifo diff --git a/docs/examples/fifo/fifo.sh b/docs/examples/fifo/fifo.sh index 74f05c1b..10242228 100644 --- a/docs/examples/fifo/fifo.sh +++ b/docs/examples/fifo/fifo.sh @@ -1,6 +1,6 @@ #!/bin/bash -python3 $SBY_MAIN -f fifo.sby +python3 $SBY_MAIN -f fifo.sby basic cover noverific if [[ $? -ne 0 ]] ; then exit 1 diff --git a/docs/examples/fifo/golden/fifo.sby b/docs/examples/fifo/golden/fifo.sby index 824f3590..10f2d85a 100644 --- a/docs/examples/fifo/golden/fifo.sby +++ b/docs/examples/fifo/golden/fifo.sby @@ -2,6 +2,7 @@ basic bmc nofullskip prove cover +noverific cover bigtest cover [options] @@ -23,6 +24,7 @@ smtbmc boolector [script] nofullskip: read -define NO_FULL_SKIP=1 +noverific: read -noverific read -formal fifo.sv bigtest: hierarchy -check -top fifo -chparam MAX_DATA 100 -chparam ADDR_BITS 7 ~bigtest: hierarchy -check -top fifo -chparam MAX_DATA 5 -chparam ADDR_BITS 3 diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 79604f7b..9792bd8f 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -114,6 +114,7 @@ verification. **cover** Cover mode (testing cover statements). + The use of the ``:default`` tag indicates that by default, basic and cover should be run if no tasks are specified, such as when running the command below. @@ -188,7 +189,7 @@ Searching the file for ``w_underfill`` will reveal the below. .. code-block:: text - $ grep "w_underfill" fifo_cover/logfile.txt -A 2 + $ grep "w_underfill" fifo_cover/logfile.txt -A 1 SBY [fifo_cover] engine_0: ## Reached cover statement at w_underfill in step 2. SBY [fifo_cover] engine_0: ## Writing trace to VCD file: engine_0/trace4.vcd @@ -210,6 +211,7 @@ Adjust the ``[script]`` section of ``fifo.sby`` so that it looks like the below. [script] nofullskip: read -define NO_FULL_SKIP=1 + noverific: read -noverific read -formal fifo.sv hierarchy -check -top fifo -chparam MAX_DATA 17 prep -top fifo From d6d7119cd5edd99aa494b6df5d042db2ba4249ea Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Tue, 9 Aug 2022 11:33:29 +1200 Subject: [PATCH 136/220] Rewrite of non verific underfill/overfill w_underfill should provide identical results regardless of whether or not Verific is used. w_overfill doesn't have the extra check for prettiness without Verific because I'm too lazy to do it. Replaced $past function with past_nwen register to ensure correct operation. Expanded w_underfill under Verific to use a property block to more easily compare the two versions side by side. Changed Concurrent assertions section of doc to compare the two implementations of w_underfill. Should provide a better example for why using verific makes it easier. --- docs/examples/fifo/fifo.sv | 42 +++++++++++++++++----- docs/examples/fifo/golden/fifo.sv | 43 ++++++++++++++++++----- docs/source/newstart.rst | 58 +++++++++++++++++++++++-------- 3 files changed, 112 insertions(+), 31 deletions(-) diff --git a/docs/examples/fifo/fifo.sv b/docs/examples/fifo/fifo.sv index 9beafdfe..ba4d8e79 100644 --- a/docs/examples/fifo/fifo.sv +++ b/docs/examples/fifo/fifo.sv @@ -149,19 +149,45 @@ module fifo ap_raddr3: assert property (@(posedge clk) disable iff (rst) !ren && !full |=> $stable(raddr)); ap_waddr3: assert property (@(posedge clk) disable iff (rst) !wen && !empty |=> $stable(waddr)); - // can we corrupt our data? - w_underfill: cover property (@(posedge clk) disable iff (rst) !wen |=> $changed(waddr)); + // use block formatting for w_underfill so it's easier to describe in docs + // and is more readily comparable with the non SVA implementation + property write_skip; + @(posedge clk) disable iff (rst) + !wen |=> $changed(waddr); + endproperty + w_underfill: cover property (write_skip); + // look for an overfill where the value in memory changes + // the change in data makes certain that the value is overriden let d_change = (wdata != rdata); - w_overfill: cover property (@(posedge clk) disable iff (rst) !ren && d_change |=> $changed(raddr)); + property read_skip; + @(posedge clk) disable iff (rst) + !ren && d_change |=> $changed(raddr); + endproperty + w_overfill: cover property (read_skip); `else // !VERIFIC + // implementing w_underfill without properties + // can't use !$past(wen) since it will always trigger in the first cycle + reg past_nwen; + initial past_nwen <= 0; always @(posedge clk) begin - if (~rst) begin - // this is less reliable because $past() can sometimes give false - // positives in the first cycle - w_overfill: cover ($past(rskip) && raddr); - w_underfill: cover ($past(wskip) && waddr); + if (rst) past_nwen <= 0; + if (!rst) begin + w_underfill: cover (past_nwen && $changed(waddr)); + past_nwen <= !wen; + end + end + // end w_underfill + // w_overfill does the same, but has been separated so that w_underfill + // can be included in the docs more cleanly + reg past_nren; + initial past_nren <= 0; + always @(posedge clk) begin + if (rst) past_nren <= 0; + if (!rst) begin + w_overfill: cover (past_nren && $changed(raddr)); + past_nren <= !ren; end end `endif // VERIFIC diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv index 194db627..1d44dae7 100644 --- a/docs/examples/fifo/golden/fifo.sv +++ b/docs/examples/fifo/golden/fifo.sv @@ -151,19 +151,44 @@ module fifo ap_raddr3: assert property (@(posedge clk) disable iff (rst) !ren && !full |=> $stable(raddr)); ap_waddr3: assert property (@(posedge clk) disable iff (rst) !wen && !empty |=> $stable(waddr)); - // can we corrupt our data? - w_underfill: cover property (@(posedge clk) disable iff (rst) !wen |=> $changed(waddr)); + // use block formatting for w_underfill so it's easier to describe in docs + // and is more readily comparable with the non SVA implementation + property write_skip; + @(posedge clk) disable iff (rst) + !wen |=> $changed(waddr); + endproperty + w_underfill: cover property (write_skip); + // look for an overfill where the value in memory changes - let d_change = (wdata != rdata); - w_overfill: cover property (@(posedge clk) disable iff (rst) !ren && d_change |=> $changed(raddr)); + // the change in data makes certain that the value is overriden + property read_skip; + @(posedge clk) disable iff (rst) + !ren && d_change |=> $changed(raddr); + endproperty + w_overfill: cover property (read_skip); `else // !VERIFIC + // implementing w_underfill without properties + // can't use !$past(wen) since it will always trigger in the first cycle + reg past_nwen; + initial past_nwen <= 0; always @(posedge clk) begin - if (~rst) begin - // this is less reliable because $past() can sometimes give false - // positives in the first cycle - w_overfill: cover ($past(rskip) && raddr); - w_underfill: cover ($past(wskip) && waddr); + if (rst) past_nwen <= 0; + if (!rst) begin + w_underfill: cover (past_nwen && $changed(waddr)); + past_nwen <= !wen; + end + end + // end w_underfill + // w_overfill does the same, but has been separated so that w_underfill + // can be included in the docs more cleanly + reg past_nren; + initial past_nren <= 0; + always @(posedge clk) begin + if (rst) past_nren <= 0; + if (!rst) begin + w_overfill: cover (past_nren && $changed(raddr)); + past_nren <= !ren; end end `endif // VERIFIC diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 9792bd8f..6b4c1ef6 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -114,6 +114,8 @@ verification. **cover** Cover mode (testing cover statements). +**noverific** + Test fallback to default Verilog frontend. The use of the ``:default`` tag indicates that by default, basic and cover should be run if no tasks are specified, such as when running the command below. @@ -202,6 +204,12 @@ write addresses and avoiding underflow. .. image:: media/gtkwave_coverskip.png +.. note:: + + Implementation of the ``w_underfill`` cover statement depends on whether + Verific is used or not. See the `Concurrent assertions`_ section for more + detail. + Exercise ******** @@ -250,24 +258,46 @@ Until this point, all of the properties described have been *immediate* assertions. As the name suggests, immediate assertions are evaluated immediately whereas concurrent assertions allow for the capture of sequences of events which occur across time. The use of concurrent assertions requires a -more advanced series of checks. Using a parser such as Verific supports these -checks *without* having to write out potentially complicated state machines. -Verific is included for use in the *Tabby CAD Suite*. - -With concurrent assertions we are able to verify more fully that our enables and -status flags work as desired. For example, we can assert that if the read -enable signal is high then the address of the read pointer *must* change. -Because of our earlier *immediate* assertions that the pointer address can only -increment or remain the same we do not need to specify that here. We can also -assert that if the enable is low, and the buffer is not full and potentially -requires a skip in the read address, then the read address will *not* change. +more advanced series of checks. + +Compare the difference in implementation of ``w_underfill`` depending on the +presence of Verific. ``w_underfill`` looks for a sequence of events where the +write enable is low but the write address changes in the following cycle. This +is the expected behaviour for reading while empty and implies that the +``w_skip`` signal went high. Verific enables elaboration of SystemVerilog +Assertions (SVA) properties. Here we use such a property, ``write_skip``. + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: property write_skip + :end-at: w_underfill + :dedent: + +This property describes a *sequence* of events which occurs on the ``clk`` +signal and are disabled/restarted when the ``rst`` signal is high. The property +first waits for a low ``wen`` signal, and then a change in ``waddr`` in the +following cycle. ``w_underfill`` is then a cover of this property to verify +that it is possible. Now look at the implementation without Verific. .. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog - :start-at: ap_raddr2 - :end-at: ap_raddr3 + :start-at: reg past_nwen; + :end-before: end w_underfill :dedent: - :lines: 1,5 + +In this case we do not have access to SVA properties and are more limited in the +tools available to us. Ideally we would use ``$past`` to read the value of +``wen`` in the previous cycle and then check for a change in ``waddr``. However, +in the first cycle of simulation, reading ``$past`` will return a value of +``X``. This results in false triggers of the property so we instead implement +the ``past_nwen`` register which we can initialise to ``0`` and ensure it does +not trigger in the first cycle. + +As verification properties become more complex and check longer sequences, the +additional effort of hand-coding without SVA properties becomes much more +difficult. Using a parser such as Verific supports these checks *without* +having to write out potentially complicated state machines. Verific is included +for use in the *Tabby CAD Suite*. Further information ******************* From 0aebf0b4d0b0693917560972b5fb022f8f081d1c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 8 Aug 2022 14:25:48 +0200 Subject: [PATCH 137/220] aig model: Call memory_map late to avoid performance issues This requires running simplemap on the output as memory_map produces coarse-grained cells even though we already have a fine-grained design. --- sbysrc/sby_core.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 7eab2773..366817f7 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -726,8 +726,6 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print("read_ilang design_prep.il", file=f) print("hierarchy -simcheck", file=f) - print("memory_map -formal", file=f) - print("formalff -setundef -clk2ff -ff2anyinit", file=f) print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) @@ -735,6 +733,9 @@ def instance_hierarchy_error_callback(retcode): print("opt -full", file=f) print("techmap", file=f) print("opt -fast", file=f) + print("memory_map -formal", file=f) + print("formalff -clk2ff -ff2anyinit", file=f) + print("simplemap", file=f) print("dffunmap", file=f) print("abc -g AND -fast", file=f) print("opt_clean", file=f) From 4cccbf77fa7f3e7df0c3d12993fb4edda869461e Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 31 Mar 2022 08:50:49 -0400 Subject: [PATCH 138/220] sby: core: Added preliminary support for the `[setup]` section --- sbysrc/sby_core.py | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 366817f7..599e7bcc 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -247,6 +247,7 @@ class SbyConfig: def __init__(self): self.options = dict() self.engines = list() + self.setup = dict() self.script = list() self.autotune_config = None self.files = dict() @@ -294,6 +295,12 @@ def parse_config(self, f): self.error(f"sby file syntax error: '[engines]' section does not accept any arguments. got {args}") continue + if entries[0] == "setup": + mode = "setup" + if len(self.setup) != 0 or len(entries) != 1: + self.error(f"sby file syntax error: {line}") + continue + if section == "script": mode = "script" if len(self.script) != 0: @@ -351,6 +358,28 @@ def parse_config(self, f): self.engines.append(entries) continue + if mode == "setup": + self.error("[setup] section not yet supported") + kvp = line.split() + if kvp[0] not in ("cutpoint", "disable", "enable", "assume", "define"): + self.error(f"sby file syntax error: {line}") + else: + stmt = kvp[0] + if stmt == 'define': + if 'define' not in self.setup: + self.setup['define'] = {} + + if len(kvp[1:]) < 2: + self.error(f"sby file syntax error: {line}") + elif kvp[1][0] != '@': + self.error(f"sby file syntax error: {line}") + else: + name = kvp[1][1:] + self.setup['define'][name] = kvp[2:] + else: + self.setup[key] = kvp[1:] + continue + if mode == "script": self.script.append(line) continue From ed82c78accdf25dc2e249ec4249fd9f86dcb907f Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 31 Mar 2022 10:06:02 -0400 Subject: [PATCH 139/220] sby: core: Added preliminary support for `[stage]` sections --- sbysrc/sby_core.py | 56 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 56 insertions(+) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 599e7bcc..b12723e4 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -248,6 +248,7 @@ def __init__(self): self.options = dict() self.engines = list() self.setup = dict() + self.stage = dict() self.script = list() self.autotune_config = None self.files = dict() @@ -301,6 +302,27 @@ def parse_config(self, f): self.error(f"sby file syntax error: {line}") continue + if entries[0] == "stage": + mode = "stage" + if len(entries) > 3 or len(entries) < 2: + self.error(f"sby file syntax error: {line}") + + if len(entries) == 2: + parent = None + else: + parent = entries[2] + + key = entries[1] + + if key in self.stage: + self.error(f"stage {key} already defined") + + self.stage[key] = { + 'parent': parent + } + + continue + if section == "script": mode = "script" if len(self.script) != 0: @@ -380,6 +402,35 @@ def parse_config(self, f): self.setup[key] = kvp[1:] continue + if mode == "stage": + self.error("[stage] section not yet supported") + kvp = line.split() + if key is None or key == '': + self.error(f"sby file syntax error: in stage mode but unknown key") + + if len(kvp) == 0: + continue + + if kvp[0] not in ("mode", "depth", "timeout", "expect", "engine", + "cutpoint", "enable", "disable", "assume", "skip", + "check", "prove", "abstract", "setsel") or len(kvp) < 2: + self.error(f"sby file syntax error: {line}") + else: + stmt = kvp[0] + if stmt == 'setsel': + if len(kvp[1:]) < 2: + self.error(f"sby file syntax error: {line}") + elif kvp[1][0] != '@': + self.error(f"sby file syntax error: {line}") + else: + name = kvp[1][1:] + self.stage[key][stmt] = { + 'name': name, 'pattern': kvp[2:] + } + else: + self.stage[key][stmt] = kvp[1:] + continue + if mode == "script": self.script.append(line) continue @@ -834,6 +885,11 @@ def handle_non_engine_options(self): with open(f"{self.workdir}/config.sby", "r") as f: self.parse_config(f) + if len(self.stage) == 0: + self.stage['default'] = { + 'enable', '*' + } + self.handle_str_option("mode", None) if self.opt_mode not in ["bmc", "prove", "cover", "live"]: From 0ab158eea1de1b24afd9d66f0a95864934f2ea4a Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Tue, 28 Jun 2022 22:54:49 -0400 Subject: [PATCH 140/220] sby: core: minor update to the stage parsing --- sbysrc/sby_core.py | 50 +++++++++++++++++++++++----------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index b12723e4..9264569b 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -399,37 +399,37 @@ def parse_config(self, f): name = kvp[1][1:] self.setup['define'][name] = kvp[2:] else: - self.setup[key] = kvp[1:] + self.setup[stmt] = kvp[1:] continue - if mode == "stage": + if mode == "stage": self.error("[stage] section not yet supported") - kvp = line.split() - if key is None or key == '': - self.error(f"sby file syntax error: in stage mode but unknown key") + kvp = line.split() + if key is None or key == '': + self.error(f"sby file syntax error: in stage mode but unknown key") - if len(kvp) == 0: - continue + if len(kvp) == 0: + continue - if kvp[0] not in ("mode", "depth", "timeout", "expect", "engine", - "cutpoint", "enable", "disable", "assume", "skip", - "check", "prove", "abstract", "setsel") or len(kvp) < 2: - self.error(f"sby file syntax error: {line}") - else: - stmt = kvp[0] - if stmt == 'setsel': - if len(kvp[1:]) < 2: - self.error(f"sby file syntax error: {line}") - elif kvp[1][0] != '@': - self.error(f"sby file syntax error: {line}") - else: - name = kvp[1][1:] - self.stage[key][stmt] = { - 'name': name, 'pattern': kvp[2:] - } + if kvp[0] not in ("mode", "depth", "timeout", "expect", "engine", + "cutpoint", "enable", "disable", "assume", "skip", + "check", "prove", "abstract", "setsel") or len(kvp) < 2: + self.error(f"sby file syntax error: {line}") + else: + stmt = kvp[0] + if stmt == 'setsel': + if len(kvp[1:]) < 2: + self.error(f"sby file syntax error: {line}") + elif kvp[1][0] != '@': + self.error(f"sby file syntax error: {line}") else: - self.stage[key][stmt] = kvp[1:] - continue + name = kvp[1][1:] + self.stage[key][stmt] = { + 'name': name, 'pattern': kvp[2:] + } + else: + self.stage[key][stmt] = kvp[1:] + continue if mode == "script": self.script.append(line) From a0d366e58a94e163c6cb9f097727ce000681d182 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 09:21:45 -0400 Subject: [PATCH 141/220] some cleanup, added some rough parser tests, and started altering the engines section --- sbysrc/sby_core.py | 25 +++++++++++++++++-------- tests/parser/.gitignore | 4 ++++ tests/parser/Makefile | 2 ++ tests/parser/engines.sby | 13 +++++++++++++ tests/parser/options.sby | 4 ++++ tests/parser/setup.sby | 8 ++++++++ tests/parser/stage.sby | 12 ++++++++++++ 7 files changed, 60 insertions(+), 8 deletions(-) create mode 100644 tests/parser/.gitignore create mode 100644 tests/parser/Makefile create mode 100644 tests/parser/engines.sby create mode 100644 tests/parser/options.sby create mode 100644 tests/parser/setup.sby create mode 100644 tests/parser/stage.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 9264569b..8f988a62 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -246,7 +246,8 @@ class SbyAbort(BaseException): class SbyConfig: def __init__(self): self.options = dict() - self.engines = list() + # Define a default case for the engine block + self.engines = list() # { None: list() } self.setup = dict() self.stage = dict() self.script = list() @@ -257,6 +258,7 @@ def __init__(self): def parse_config(self, f): mode = None + engine_mode = None for line in f: raw_line = line @@ -290,18 +292,26 @@ def parse_config(self, f): if section == "engines": mode = "engines" - if len(self.engines) != 0: - self.error(f"sby file syntax error: '[engines]' section already defined") - if args is not None: - self.error(f"sby file syntax error: '[engines]' section does not accept any arguments. got {args}") - continue + if len(entries) > 2: + self.error(f"sby file syntax error: [engine] sections expects at most 1 argument, got more '{line}'") + + if len(entries) == 2 and entries[1] not in ("bmc", "prove", "cover", "live"): + self.error(f"sby file syntax error: Expected one of 'bmc, prove, cover, live' not '{entries[1]}'") + elif len(entries) == 2: + pass + # if entries[1] not in self.engines: + # self.engines[entries[1]] = list() + # else: + # self.error(f"Already defined engine block for mode '{entries[1]}'") + # [setup] if entries[0] == "setup": mode = "setup" if len(self.setup) != 0 or len(entries) != 1: self.error(f"sby file syntax error: {line}") continue + # [stage (PARENTS...)] if entries[0] == "stage": mode = "stage" if len(entries) > 3 or len(entries) < 2: @@ -377,11 +387,11 @@ def parse_config(self, f): if mode == "engines": entries = line.split() + # self.engines[engine_mode].append(entries) self.engines.append(entries) continue if mode == "setup": - self.error("[setup] section not yet supported") kvp = line.split() if kvp[0] not in ("cutpoint", "disable", "enable", "assume", "define"): self.error(f"sby file syntax error: {line}") @@ -403,7 +413,6 @@ def parse_config(self, f): continue if mode == "stage": - self.error("[stage] section not yet supported") kvp = line.split() if key is None or key == '': self.error(f"sby file syntax error: in stage mode but unknown key") diff --git a/tests/parser/.gitignore b/tests/parser/.gitignore new file mode 100644 index 00000000..b87b1902 --- /dev/null +++ b/tests/parser/.gitignore @@ -0,0 +1,4 @@ +* +!Makefile +!.gitignore +!*.sby diff --git a/tests/parser/Makefile b/tests/parser/Makefile new file mode 100644 index 00000000..7827c43e --- /dev/null +++ b/tests/parser/Makefile @@ -0,0 +1,2 @@ +SUBDIR=parser +include ../make/subdir.mk diff --git a/tests/parser/engines.sby b/tests/parser/engines.sby new file mode 100644 index 00000000..22771514 --- /dev/null +++ b/tests/parser/engines.sby @@ -0,0 +1,13 @@ +[options] +mode bmc +depth 1 +expect error + +[engines] +smtbmc + +[engines bmc] +smtbmc + +[engines cover] +smtbmc diff --git a/tests/parser/options.sby b/tests/parser/options.sby new file mode 100644 index 00000000..666953da --- /dev/null +++ b/tests/parser/options.sby @@ -0,0 +1,4 @@ +[options] +mode bmc +depth 1 +expect error diff --git a/tests/parser/setup.sby b/tests/parser/setup.sby new file mode 100644 index 00000000..6ca49e30 --- /dev/null +++ b/tests/parser/setup.sby @@ -0,0 +1,8 @@ +[options] +mode bmc +depth 1 +expect error + + +[setup] +enable * diff --git a/tests/parser/stage.sby b/tests/parser/stage.sby new file mode 100644 index 00000000..b69a1860 --- /dev/null +++ b/tests/parser/stage.sby @@ -0,0 +1,12 @@ +[options] +mode bmc +depth 1 +expect error + + +[stage stage_1] +mode prove +depth 20 +timeout 60 +expect error +enable * From f1a645bb187c3b84b9a6910abd102672ce22302f Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 09:28:37 -0400 Subject: [PATCH 142/220] sby: core: config: Updated the `[stage]` section to use commas for the parents --- sbysrc/sby_core.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 8f988a62..417548c6 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -311,7 +311,7 @@ def parse_config(self, f): self.error(f"sby file syntax error: {line}") continue - # [stage (PARENTS...)] + # [stage (PARENTS,...)] if entries[0] == "stage": mode = "stage" if len(entries) > 3 or len(entries) < 2: @@ -320,7 +320,7 @@ def parse_config(self, f): if len(entries) == 2: parent = None else: - parent = entries[2] + parent = entries[2].split(',') key = entries[1] From 9293081308c64027b2b27c594ac6f722bb78866e Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 10:21:48 -0400 Subject: [PATCH 143/220] modified the mode runners to accept the modified engine layout in preperation for the per-mode engine sections --- sbysrc/sby_core.py | 7 +++---- sbysrc/sby_mode_bmc.py | 9 +++++++-- sbysrc/sby_mode_cover.py | 10 ++++++++-- sbysrc/sby_mode_live.py | 9 +++++++-- sbysrc/sby_mode_prove.py | 9 +++++++-- 5 files changed, 32 insertions(+), 12 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 417548c6..7651f095 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -247,7 +247,7 @@ class SbyConfig: def __init__(self): self.options = dict() # Define a default case for the engine block - self.engines = list() # { None: list() } + self.engines = { None: list() } self.setup = dict() self.stage = dict() self.script = list() @@ -387,8 +387,7 @@ def parse_config(self, f): if mode == "engines": entries = line.split() - # self.engines[engine_mode].append(entries) - self.engines.append(entries) + self.engines[engine_mode].append(entries) continue if mode == "setup": @@ -565,7 +564,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logf print(line, file=f) def engine_list(self): - return list(enumerate(self.engines)) + return list(enumerate(self.engines.items())) def check_timeout(self): if self.opt_timeout is not None: diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index cc4eba78..e8f4f711 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -24,10 +24,15 @@ def run(task): task.handle_int_option("append", 0) task.handle_str_option("aigsmt", "yices") - for engine_idx, engine in task.engine_list(): + for engine_idx, engine_section in task.engine_list(): + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: + engine_name = engine_idx + assert len(engine) > 0 - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index 8fb1da99..f9182477 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -23,10 +23,16 @@ def run(task): task.handle_int_option("depth", 20) task.handle_int_option("append", 0) - for engine_idx, engine in task.engine_list(): + for engine_idx, engine_section in task.engine_list(): + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: + engine_name = engine_idx + + assert len(engine) > 0 - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index 6746200e..d713215f 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -24,10 +24,15 @@ def run(task): task.status = "UNKNOWN" - for engine_idx, engine in task.engine_list(): + for engine_idx, engine_section in task.engine_list(): + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: + engine_name = engine_idx + assert len(engine) > 0 - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "aiger": diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index 1f62bb94..c4dcb0da 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -31,10 +31,15 @@ def run(task): task.basecase_procs = list() task.induction_procs = list() - for engine_idx, engine in task.engine_list(): + for engine_idx, engine_section in task.engine_list(): + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: + engine_name = engine_idx + assert len(engine) > 0 - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": From 204869bfedbcaec09465580654367da50ba7c5de Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 10:34:36 -0400 Subject: [PATCH 144/220] sby: core: config: updated the error messages for the new setctions to make them more descriptive --- sbysrc/sby_core.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 7651f095..d5de81fa 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -393,7 +393,7 @@ def parse_config(self, f): if mode == "setup": kvp = line.split() if kvp[0] not in ("cutpoint", "disable", "enable", "assume", "define"): - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: found '{kvp[0]}' but expected one of 'cutpoint', 'disable', 'enable', 'assume', or 'define'") else: stmt = kvp[0] if stmt == 'define': @@ -401,9 +401,9 @@ def parse_config(self, f): self.setup['define'] = {} if len(kvp[1:]) < 2: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: 'define' statement takes 2 arguments, got {len(kvp[1:])}") elif kvp[1][0] != '@': - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: 'define' statement expects an '@' prefixed name as the first parameter, got {line}") else: name = kvp[1][1:] self.setup['define'][name] = kvp[2:] @@ -427,9 +427,9 @@ def parse_config(self, f): stmt = kvp[0] if stmt == 'setsel': if len(kvp[1:]) < 2: - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: 'setsel' statement takes 2 arguments, got {len(kvp[1:])}") elif kvp[1][0] != '@': - self.error(f"sby file syntax error: {line}") + self.error(f"sby file syntax error: 'setsel' statement expects an '@' prefixed name as the first parameter, got {line}") else: name = kvp[1][1:] self.stage[key][stmt] = { From 4abd8a7d6944e48b96fcddd193f850852baa20c7 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 10:50:57 -0400 Subject: [PATCH 145/220] tests: parser: updated the parser tests that caused a failure due to the lack of engines section --- tests/parser/setup.sby | 2 ++ tests/parser/stage.sby | 2 ++ 2 files changed, 4 insertions(+) diff --git a/tests/parser/setup.sby b/tests/parser/setup.sby index 6ca49e30..9b7e468a 100644 --- a/tests/parser/setup.sby +++ b/tests/parser/setup.sby @@ -3,6 +3,8 @@ mode bmc depth 1 expect error +[engines] +smtbmc [setup] enable * diff --git a/tests/parser/stage.sby b/tests/parser/stage.sby index b69a1860..7048b60d 100644 --- a/tests/parser/stage.sby +++ b/tests/parser/stage.sby @@ -3,6 +3,8 @@ mode bmc depth 1 expect error +[engines] +smtbmc [stage stage_1] mode prove From 987e439967d5894bf7525e6a44db1b6f1d585c7d Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 10:51:25 -0400 Subject: [PATCH 146/220] tests: parser: added the stages option to the options test file --- tests/parser/options.sby | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tests/parser/options.sby b/tests/parser/options.sby index 666953da..bf6a5536 100644 --- a/tests/parser/options.sby +++ b/tests/parser/options.sby @@ -2,3 +2,7 @@ mode bmc depth 1 expect error +stages foo,bar,nya + +[engines] +smtbmc From e4a7f624c1483451a9dd331b518ba96a87587767 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 29 Jul 2022 10:52:49 -0400 Subject: [PATCH 147/220] sby: core: config: fixed the engines section parsing where it was not setting the engine mode when parsing the section --- sbysrc/sby_core.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index d5de81fa..b4c95739 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -301,6 +301,7 @@ def parse_config(self, f): pass # if entries[1] not in self.engines: # self.engines[entries[1]] = list() + # engine_mode = entries[1] # else: # self.error(f"Already defined engine block for mode '{entries[1]}'") From 2f841e5d55f2c0a0d408b4cee38ee92bb92657ef Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Tue, 2 Aug 2022 08:38:24 -0400 Subject: [PATCH 148/220] sby: core: updated the parsing to match the changes in PR #206 --- sbysrc/sby_core.py | 37 ++++++++++++++++++++++--------------- 1 file changed, 22 insertions(+), 15 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index b4c95739..81b0da18 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -290,6 +290,7 @@ def parse_config(self, f): self.error(f"sby file syntax error: '[options]' section does not accept any arguments. got {args}") continue + # [engines (MODE)] if section == "engines": mode = "engines" if len(entries) > 2: @@ -306,30 +307,36 @@ def parse_config(self, f): # self.error(f"Already defined engine block for mode '{entries[1]}'") # [setup] - if entries[0] == "setup": + if section == "setup": mode = "setup" - if len(self.setup) != 0 or len(entries) != 1: - self.error(f"sby file syntax error: {line}") - continue + if len(self.setup) != 0: + self.error(f"sby file syntax error: '[setup]' section already defined") + + if args is not None: + self.error(f"sby file syntax error: '[setup]' section does not accept any arguments. got {args}") # [stage (PARENTS,...)] - if entries[0] == "stage": + if section == "stage": mode = "stage" - if len(entries) > 3 or len(entries) < 2: - self.error(f"sby file syntax error: {line}") - if len(entries) == 2: - parent = None + if args is None: + self.error(f"sby file syntax error: '[stage]' section expects arguments, got none") + + section_args = args.split(" ", maxsplit = 1) + + + if len(section_args) == 1: + parents = None else: - parent = entries[2].split(',') + parents = list(map(lambda a: a.trim(), section_args[1].split(','))) - key = entries[1] + stage_name = section_args[0] - if key in self.stage: - self.error(f"stage {key} already defined") + if stage_name in self.stage: + self.error(f"stage {stage_name} already defined") - self.stage[key] = { - 'parent': parent + self.stage[stage_name] = { + 'parents': parents } continue From ad4f506d2a1e4b8354dc2ec43e4e9a8166ab83e8 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 06:32:32 -0400 Subject: [PATCH 149/220] sby: core: fixed up the `engines` section parser --- sbysrc/sby_core.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 81b0da18..9f4b48bc 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -293,18 +293,22 @@ def parse_config(self, f): # [engines (MODE)] if section == "engines": mode = "engines" - if len(entries) > 2: - self.error(f"sby file syntax error: [engine] sections expects at most 1 argument, got more '{line}'") - if len(entries) == 2 and entries[1] not in ("bmc", "prove", "cover", "live"): - self.error(f"sby file syntax error: Expected one of 'bmc, prove, cover, live' not '{entries[1]}'") - elif len(entries) == 2: - pass - # if entries[1] not in self.engines: - # self.engines[entries[1]] = list() - # engine_mode = entries[1] - # else: - # self.error(f"Already defined engine block for mode '{entries[1]}'") + if args is not None: + section_args = args.split() + + if len(section_args) > 1: + self.error(f"sby file syntax error: '[engine]' sections expects at most 1 argument, got '{len(section_args)}'") + + if section_args[0] not in ("bmc", "prove", "cover", "live"): + self.error(f"sby file syntax error: Expected one of 'bmc, prove, cover, live' as '[engine]` argument, not '{section_args[0]}'") + + if section_args[0] in self.engines: + self.error(f"Already defined engine block for mode '{section_args[0]}'") + else: + self.engines[section_args[0]] = list() + engine_mode = section_args[0] + continue # [setup] if section == "setup": From 6c959577f39462e733f8376de1514b38c55a20fe Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 06:56:11 -0400 Subject: [PATCH 150/220] sby: core: cleaned up the `[stage]` section parsing --- sbysrc/sby_core.py | 54 +++++++++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 22 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 9f4b48bc..711e331a 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -259,6 +259,7 @@ def __init__(self): def parse_config(self, f): mode = None engine_mode = None + stage_name = None for line in f: raw_line = line @@ -326,7 +327,7 @@ def parse_config(self, f): if args is None: self.error(f"sby file syntax error: '[stage]' section expects arguments, got none") - section_args = args.split(" ", maxsplit = 1) + section_args = args.strip().split(maxsplit = 1) if len(section_args) == 1: @@ -424,31 +425,40 @@ def parse_config(self, f): continue if mode == "stage": - kvp = line.split() - if key is None or key == '': - self.error(f"sby file syntax error: in stage mode but unknown key") + _valid_options = ( + "mode", "depth", "timeout", "expect", "engine", + "cutpoint", "enable", "disable", "assume", "skip", + "check", "prove", "abstract", "setsel" + ) - if len(kvp) == 0: - continue + args = line.strip().split(maxsplit = 1) + + if args is None: + self.error(f"sby file syntax error: unknown key in '[stage]' section") + + if len(args) < 2: + self.error(f"sby file syntax error: entry in '[stage]' must have an argument, got {' '.join(args)}") - if kvp[0] not in ("mode", "depth", "timeout", "expect", "engine", - "cutpoint", "enable", "disable", "assume", "skip", - "check", "prove", "abstract", "setsel") or len(kvp) < 2: - self.error(f"sby file syntax error: {line}") + if args[0] not in _valid_options: + self.error(f"sby file syntax error: expected on of '{', '.join(_valid_options)}' in '[stage]' section, got '{args[0]}'") else: - stmt = kvp[0] - if stmt == 'setsel': - if len(kvp[1:]) < 2: - self.error(f"sby file syntax error: 'setsel' statement takes 2 arguments, got {len(kvp[1:])}") - elif kvp[1][0] != '@': - self.error(f"sby file syntax error: 'setsel' statement expects an '@' prefixed name as the first parameter, got {line}") - else: - name = kvp[1][1:] - self.stage[key][stmt] = { - 'name': name, 'pattern': kvp[2:] - } + opt_key = args[0] + opt_args = args[1].strip().split() + if opt_key == 'setsel': + + if len(opt_args) != 2: + self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section takes exactly 2 arguments, got {len(opt_args)}") + + if opt_args[0][0] != '@': + self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section expects an '@' prefixed name as the first parameter, got {opt_args[0]}") + + name = opt_args[0][1:] + self.stage[stage_name][opt_key] = { + 'name': name, 'pattern': opt_args[2:] + } + else: - self.stage[key][stmt] = kvp[1:] + self.stage[stage_name][opt_key] = opt_args[1:] continue if mode == "script": From 98fdcd7772bad802d5d30563f992ad445926250f Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 07:05:32 -0400 Subject: [PATCH 151/220] sby: core: fixed up the `[setup]` section --- sbysrc/sby_core.py | 54 +++++++++++++++++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 13 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 711e331a..ddc342f8 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -320,6 +320,8 @@ def parse_config(self, f): if args is not None: self.error(f"sby file syntax error: '[setup]' section does not accept any arguments. got {args}") + continue + # [stage (PARENTS,...)] if section == "stage": mode = "stage" @@ -404,24 +406,40 @@ def parse_config(self, f): continue if mode == "setup": - kvp = line.split() - if kvp[0] not in ("cutpoint", "disable", "enable", "assume", "define"): - self.error(f"sby file syntax error: found '{kvp[0]}' but expected one of 'cutpoint', 'disable', 'enable', 'assume', or 'define'") + _valid_options = ( + "cutpoint", "disable", "enable", "assume", "define" + ) + + args = line.strip().split(maxsplit = 1) + + + if args is None: + self.error(f"sby file syntax error: unknown key in '[setup]' section") + + if len(args) < 2: + self.error(f"sby file syntax error: entry in '[setup]' must have an argument, got {' '.join(args)}") + + if args[0] not in _valid_options: + self.error(f"sby file syntax error: expected on of '{', '.join(_valid_options)}' in '[setup]' section, got '{args[0]}'") + else: - stmt = kvp[0] - if stmt == 'define': + opt_key = args[0] + opt_args = args[1].strip().split() + + if opt_key == 'define': if 'define' not in self.setup: self.setup['define'] = {} - if len(kvp[1:]) < 2: - self.error(f"sby file syntax error: 'define' statement takes 2 arguments, got {len(kvp[1:])}") - elif kvp[1][0] != '@': - self.error(f"sby file syntax error: 'define' statement expects an '@' prefixed name as the first parameter, got {line}") - else: - name = kvp[1][1:] - self.setup['define'][name] = kvp[2:] + if len(opt_args) != 2: + self.error(f"sby file syntax error: 'define' statement in '[setup]' section takes exactly 2 arguments, got {len(opt_args)}") + + if opt_args[0][0] != '@': + self.error(f"sby file syntax error: 'define' statement in '[setup]' section expects an '@' prefixed name as the first parameter, got {opt_args[0]}") + + name = opt_args[0][1:] + self.setup['define'][name] = opt_args[2:] else: - self.setup[stmt] = kvp[1:] + self.setup[opt_key] = opt_args[1:] continue if mode == "stage": @@ -453,11 +471,18 @@ def parse_config(self, f): self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section expects an '@' prefixed name as the first parameter, got {opt_args[0]}") name = opt_args[0][1:] + + if stage_name not in self.stage: + self.stage[stage_name] = dict() + self.stage[stage_name][opt_key] = { 'name': name, 'pattern': opt_args[2:] } else: + if stage_name not in self.stage: + self.stage[stage_name] = dict() + self.stage[stage_name][opt_key] = opt_args[1:] continue @@ -483,6 +508,9 @@ def parse_config(self, f): self.error(f"sby file syntax error: In an incomprehensible mode '{mode}'") + if len(self.stage.keys()) == 0: + self.stage['default'] = { 'enable': '*' } + def error(self, logmessage): raise SbyAbort(logmessage) From da56a3c6d175571c51e0005d380169550df6e1a9 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 07:39:00 -0400 Subject: [PATCH 152/220] docs: started working on a rough draft of the docs for the new sections and changes to existing sections --- docs/source/reference.rst | 58 +++++++++++++++++++++++++++++++++++++++ tests/parser/options.sby | 8 ------ 2 files changed, 58 insertions(+), 8 deletions(-) delete mode 100644 tests/parser/options.sby diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 710ba7bc..4af5e0ad 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -178,6 +178,9 @@ options are: | | ``prove``, | specified number of cycles at the end of the trace. | | | ``cover`` | Default: ``0`` | +------------------+------------+---------------------------------------------------------+ +| ``stage`` | All | The stages to select to run. | +| | | Default: All | ++------------------+------------+---------------------------------------------------------+ Engines section --------------- @@ -205,6 +208,19 @@ solver options. In the 2nd line ``abc`` is the engine, there are no engine options, ``sim3`` is the solver, and ``-W 15`` are solver options. + +The ``[engines]`` section also takes an argument as to what mode that block applies to. +Meaning you can specify an engine block for a given mode. + +Example: + +.. code-block:: text + + [engines bmc] + btor pono + abc sim3 + + The following mode/engine/solver combinations are currently supported: +-----------+--------------------------+ @@ -391,6 +407,48 @@ Run ``yosys`` in a terminal window and enter ``help`` on the Yosys prompt for a command list. Run ``help `` for a detailed description of the command, for example ``help prep``. +Setup section +------------- + +The ``[setup]`` section provides a way to add global cutpoints, and enable/disable/assume/define patterns. +By default properties that are unspecified default to enabled, and a ``disable *`` at the end of the settings +will set the default to be disabled. + +For example: + +.. code-block:: text + + [setup] + enable * + +The following options are available for the ``[setup]`` section: + +.. todo:: + + Document better + ++------------------+---------------------------------------------------------+ +| Option | Description | ++==================+=========================================================+ +| ``cutpoint`` | Defines a cutpoint pattern. | ++------------------+---------------------------------------------------------+ +| ``disable`` | Defines a disable pattern. | ++------------------+---------------------------------------------------------+ +| ``enable`` | Defines an enable pattern. | ++------------------+---------------------------------------------------------+ +| ``assume`` | Defines an assume pattern. | ++------------------+---------------------------------------------------------+ +| ``define`` | Define a value. | ++------------------+---------------------------------------------------------+ + + +Stage section +------------- + +.. todo:: + + Document + Files section ------------- diff --git a/tests/parser/options.sby b/tests/parser/options.sby deleted file mode 100644 index bf6a5536..00000000 --- a/tests/parser/options.sby +++ /dev/null @@ -1,8 +0,0 @@ -[options] -mode bmc -depth 1 -expect error -stages foo,bar,nya - -[engines] -smtbmc From 637095a8ecd2014df1b0a998003911fe29349914 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 08:51:35 -0400 Subject: [PATCH 153/220] sby: fixed the sby task execution to accept the new engine internal layout --- sbysrc/sby_core.py | 4 ++-- sbysrc/sby_mode_bmc.py | 10 ++++++++-- sbysrc/sby_mode_cover.py | 9 +++++++-- sbysrc/sby_mode_live.py | 9 +++++++-- sbysrc/sby_mode_prove.py | 9 +++++++-- 5 files changed, 31 insertions(+), 10 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ddc342f8..7ea25fc6 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -401,8 +401,8 @@ def parse_config(self, f): continue if mode == "engines": - entries = line.split() - self.engines[engine_mode].append(entries) + args = line.strip().split() + self.engines[engine_mode].append(args) continue if mode == "setup": diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index e8f4f711..e9afbc5d 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -25,13 +25,19 @@ def run(task): task.handle_str_option("aigsmt", "yices") for engine_idx, engine_section in task.engine_list(): - engine = engine_section[1][0] - engine_name = engine_section[0] + if isinstance(engine_section, list): + engine = engine_section + engine_name = None + else: + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: engine_name = engine_idx assert len(engine) > 0 + task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index f9182477..f1e6ff5f 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -24,8 +24,13 @@ def run(task): task.handle_int_option("append", 0) for engine_idx, engine_section in task.engine_list(): - engine = engine_section[1][0] - engine_name = engine_section[0] + if isinstance(engine_section, list): + engine = engine_section + engine_name = None + else: + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: engine_name = engine_idx diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index d713215f..bdc772c0 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -25,8 +25,13 @@ def run(task): task.status = "UNKNOWN" for engine_idx, engine_section in task.engine_list(): - engine = engine_section[1][0] - engine_name = engine_section[0] + if isinstance(engine_section, list): + engine = engine_section + engine_name = None + else: + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: engine_name = engine_idx diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index c4dcb0da..36f9929f 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -32,8 +32,13 @@ def run(task): task.induction_procs = list() for engine_idx, engine_section in task.engine_list(): - engine = engine_section[1][0] - engine_name = engine_section[0] + if isinstance(engine_section, list): + engine = engine_section + engine_name = None + else: + engine = engine_section[1][0] + engine_name = engine_section[0] + if engine_name is None: engine_name = engine_idx From 841e0cb797b920067d23918df138c12d8bbfdf58 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 4 Aug 2022 09:41:24 -0400 Subject: [PATCH 154/220] sby: core: Added unsupported messages to the new sections --- sbysrc/sby_core.py | 6 ++++-- tests/parser/engines.sby | 13 ------------- tests/parser/setup.sby | 10 ---------- tests/parser/stage.sby | 14 -------------- 4 files changed, 4 insertions(+), 39 deletions(-) delete mode 100644 tests/parser/engines.sby delete mode 100644 tests/parser/setup.sby delete mode 100644 tests/parser/stage.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 7ea25fc6..a1e42a20 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -291,7 +291,6 @@ def parse_config(self, f): self.error(f"sby file syntax error: '[options]' section does not accept any arguments. got {args}") continue - # [engines (MODE)] if section == "engines": mode = "engines" @@ -311,8 +310,9 @@ def parse_config(self, f): engine_mode = section_args[0] continue - # [setup] if section == "setup": + self.error(f"sby file syntax error: the '[setup]' section is not yet supported") + mode = "setup" if len(self.setup) != 0: self.error(f"sby file syntax error: '[setup]' section already defined") @@ -324,6 +324,8 @@ def parse_config(self, f): # [stage (PARENTS,...)] if section == "stage": + self.error(f"sby file syntax error: the '[stage]' section is not yet supported") + mode = "stage" if args is None: diff --git a/tests/parser/engines.sby b/tests/parser/engines.sby deleted file mode 100644 index 22771514..00000000 --- a/tests/parser/engines.sby +++ /dev/null @@ -1,13 +0,0 @@ -[options] -mode bmc -depth 1 -expect error - -[engines] -smtbmc - -[engines bmc] -smtbmc - -[engines cover] -smtbmc diff --git a/tests/parser/setup.sby b/tests/parser/setup.sby deleted file mode 100644 index 9b7e468a..00000000 --- a/tests/parser/setup.sby +++ /dev/null @@ -1,10 +0,0 @@ -[options] -mode bmc -depth 1 -expect error - -[engines] -smtbmc - -[setup] -enable * diff --git a/tests/parser/stage.sby b/tests/parser/stage.sby deleted file mode 100644 index 7048b60d..00000000 --- a/tests/parser/stage.sby +++ /dev/null @@ -1,14 +0,0 @@ -[options] -mode bmc -depth 1 -expect error - -[engines] -smtbmc - -[stage stage_1] -mode prove -depth 20 -timeout 60 -expect error -enable * From a6c220dd5d73aca3eab0ccc43865510063291232 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Fri, 5 Aug 2022 08:22:55 -0400 Subject: [PATCH 155/220] docs: Cut out the in-progress docs in preperation for a merge --- docs/source/reference.rst | 56 --------------------------------------- sbysrc/sby_core.py | 5 ---- 2 files changed, 61 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 4af5e0ad..370001ec 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -178,9 +178,6 @@ options are: | | ``prove``, | specified number of cycles at the end of the trace. | | | ``cover`` | Default: ``0`` | +------------------+------------+---------------------------------------------------------+ -| ``stage`` | All | The stages to select to run. | -| | | Default: All | -+------------------+------------+---------------------------------------------------------+ Engines section --------------- @@ -209,18 +206,6 @@ In the 2nd line ``abc`` is the engine, there are no engine options, ``sim3`` is solver, and ``-W 15`` are solver options. -The ``[engines]`` section also takes an argument as to what mode that block applies to. -Meaning you can specify an engine block for a given mode. - -Example: - -.. code-block:: text - - [engines bmc] - btor pono - abc sim3 - - The following mode/engine/solver combinations are currently supported: +-----------+--------------------------+ @@ -407,47 +392,6 @@ Run ``yosys`` in a terminal window and enter ``help`` on the Yosys prompt for a command list. Run ``help `` for a detailed description of the command, for example ``help prep``. -Setup section -------------- - -The ``[setup]`` section provides a way to add global cutpoints, and enable/disable/assume/define patterns. -By default properties that are unspecified default to enabled, and a ``disable *`` at the end of the settings -will set the default to be disabled. - -For example: - -.. code-block:: text - - [setup] - enable * - -The following options are available for the ``[setup]`` section: - -.. todo:: - - Document better - -+------------------+---------------------------------------------------------+ -| Option | Description | -+==================+=========================================================+ -| ``cutpoint`` | Defines a cutpoint pattern. | -+------------------+---------------------------------------------------------+ -| ``disable`` | Defines a disable pattern. | -+------------------+---------------------------------------------------------+ -| ``enable`` | Defines an enable pattern. | -+------------------+---------------------------------------------------------+ -| ``assume`` | Defines an assume pattern. | -+------------------+---------------------------------------------------------+ -| ``define`` | Define a value. | -+------------------+---------------------------------------------------------+ - - -Stage section -------------- - -.. todo:: - - Document Files section ------------- diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index a1e42a20..c94e9881 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -945,11 +945,6 @@ def handle_non_engine_options(self): with open(f"{self.workdir}/config.sby", "r") as f: self.parse_config(f) - if len(self.stage) == 0: - self.stage['default'] = { - 'enable', '*' - } - self.handle_str_option("mode", None) if self.opt_mode not in ["bmc", "prove", "cover", "live"]: From e8b8816143cd7eef51f79d1927c9021de75e3aaa Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Wed, 17 Aug 2022 09:02:13 -0400 Subject: [PATCH 156/220] docs: removed empty line --- docs/source/reference.rst | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 370001ec..589f9978 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -205,7 +205,6 @@ solver options. In the 2nd line ``abc`` is the engine, there are no engine options, ``sim3`` is the solver, and ``-W 15`` are solver options. - The following mode/engine/solver combinations are currently supported: +-----------+--------------------------+ From 8f5508142d02a9d373b613225ae37c765fbc886f Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Wed, 17 Aug 2022 09:03:34 -0400 Subject: [PATCH 157/220] sby: core: minor error message cleanups for consistency --- sbysrc/sby_core.py | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index c94e9881..8c1ebac3 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -298,10 +298,10 @@ def parse_config(self, f): section_args = args.split() if len(section_args) > 1: - self.error(f"sby file syntax error: '[engine]' sections expects at most 1 argument, got '{len(section_args)}'") + self.error(f"sby file syntax error: '[engines]' section expects at most 1 argument, got '{' '.join(section_args)}'") if section_args[0] not in ("bmc", "prove", "cover", "live"): - self.error(f"sby file syntax error: Expected one of 'bmc, prove, cover, live' as '[engine]` argument, not '{section_args[0]}'") + self.error(f"sby file syntax error: Expected one of 'bmc', 'prove', 'cover', 'live' as '[engines]' argument, got '{section_args[0]}'") if section_args[0] in self.engines: self.error(f"Already defined engine block for mode '{section_args[0]}'") @@ -318,7 +318,7 @@ def parse_config(self, f): self.error(f"sby file syntax error: '[setup]' section already defined") if args is not None: - self.error(f"sby file syntax error: '[setup]' section does not accept any arguments. got {args}") + self.error(f"sby file syntax error: '[setup]' section does not accept any arguments, got '{args}'") continue @@ -337,12 +337,12 @@ def parse_config(self, f): if len(section_args) == 1: parents = None else: - parents = list(map(lambda a: a.trim(), section_args[1].split(','))) + parents = list(map(lambda a: a.strip(), section_args[1].split(','))) stage_name = section_args[0] if stage_name in self.stage: - self.error(f"stage {stage_name} already defined") + self.error(f"stage '{stage_name}' already defined") self.stage[stage_name] = { 'parents': parents @@ -419,10 +419,10 @@ def parse_config(self, f): self.error(f"sby file syntax error: unknown key in '[setup]' section") if len(args) < 2: - self.error(f"sby file syntax error: entry in '[setup]' must have an argument, got {' '.join(args)}") + self.error(f"sby file syntax error: entry in '[setup]' must have an argument, got '{' '.join(args)}'") if args[0] not in _valid_options: - self.error(f"sby file syntax error: expected on of '{', '.join(_valid_options)}' in '[setup]' section, got '{args[0]}'") + self.error(f"sby file syntax error: expected one of '{', '.join(_valid_options)}' in '[setup]' section, got '{args[0]}'") else: opt_key = args[0] @@ -433,10 +433,10 @@ def parse_config(self, f): self.setup['define'] = {} if len(opt_args) != 2: - self.error(f"sby file syntax error: 'define' statement in '[setup]' section takes exactly 2 arguments, got {len(opt_args)}") + self.error(f"sby file syntax error: 'define' statement in '[setup]' section takes exactly 2 arguments, got '{' '.join(opt_args)}'") if opt_args[0][0] != '@': - self.error(f"sby file syntax error: 'define' statement in '[setup]' section expects an '@' prefixed name as the first parameter, got {opt_args[0]}") + self.error(f"sby file syntax error: 'define' statement in '[setup]' section expects an '@' prefixed name as the first parameter, got '{opt_args[0]}'") name = opt_args[0][1:] self.setup['define'][name] = opt_args[2:] @@ -460,17 +460,17 @@ def parse_config(self, f): self.error(f"sby file syntax error: entry in '[stage]' must have an argument, got {' '.join(args)}") if args[0] not in _valid_options: - self.error(f"sby file syntax error: expected on of '{', '.join(_valid_options)}' in '[stage]' section, got '{args[0]}'") + self.error(f"sby file syntax error: expected one of '{', '.join(map(repr, _valid_options))}' in '[stage]' section, got '{args[0]}'") else: opt_key = args[0] opt_args = args[1].strip().split() if opt_key == 'setsel': if len(opt_args) != 2: - self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section takes exactly 2 arguments, got {len(opt_args)}") + self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section takes exactly 2 arguments, got '{' '.join(opt_args)}'") if opt_args[0][0] != '@': - self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section expects an '@' prefixed name as the first parameter, got {opt_args[0]}") + self.error(f"sby file syntax error: 'setsel' statement in '[stage]' section expects an '@' prefixed name as the first parameter, got '{opt_args[0]}'") name = opt_args[0][1:] From 41b4ce5a7e19134d4d82f88b906d0d4d246e7086 Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 18 Aug 2022 05:51:03 -0400 Subject: [PATCH 158/220] sby: fixed issue where engine index would be out of range --- sbysrc/sby_core.py | 7 ++++--- sbysrc/sby_mode_bmc.py | 3 +-- sbysrc/sby_mode_cover.py | 4 +--- sbysrc/sby_mode_live.py | 3 +-- sbysrc/sby_mode_prove.py | 3 +-- 5 files changed, 8 insertions(+), 12 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 8c1ebac3..e4d58a53 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -246,8 +246,7 @@ class SbyAbort(BaseException): class SbyConfig: def __init__(self): self.options = dict() - # Define a default case for the engine block - self.engines = { None: list() } + self.engines = dict() self.setup = dict() self.stage = dict() self.script = list() @@ -263,7 +262,7 @@ def parse_config(self, f): for line in f: raw_line = line - if mode in ["options", "engines", "files", "autotune"]: + if mode in ["options", "engines", "files", "autotune", "setup", "stage"]: line = re.sub(r"\s*(\s#.*)?$", "", line) if line == "" or line[0] == "#": continue @@ -404,6 +403,8 @@ def parse_config(self, f): if mode == "engines": args = line.strip().split() + if engine_mode not in self.engines: + self.engines[engine_mode] = list() self.engines[engine_mode].append(args) continue diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index e9afbc5d..2613efab 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -29,14 +29,13 @@ def run(task): engine = engine_section engine_name = None else: + assert len(engine_section[1]) > 0 engine = engine_section[1][0] engine_name = engine_section[0] if engine_name is None: engine_name = engine_idx - assert len(engine) > 0 - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index f1e6ff5f..61d0b079 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -28,15 +28,13 @@ def run(task): engine = engine_section engine_name = None else: + assert len(engine_section[1]) > 0 engine = engine_section[1][0] engine_name = engine_section[0] if engine_name is None: engine_name = engine_idx - - assert len(engine) > 0 - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index bdc772c0..c624ec5a 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -29,14 +29,13 @@ def run(task): engine = engine_section engine_name = None else: + assert len(engine_section[1]) > 0 engine = engine_section[1][0] engine_name = engine_section[0] if engine_name is None: engine_name = engine_idx - assert len(engine) > 0 - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index 36f9929f..f3dc1b7d 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -36,14 +36,13 @@ def run(task): engine = engine_section engine_name = None else: + assert len(engine_section[1]) > 0 engine = engine_section[1][0] engine_name = engine_section[0] if engine_name is None: engine_name = engine_idx - assert len(engine) > 0 - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") From de40cc499f08e517ece48be1ab4f0d3350d0a72a Mon Sep 17 00:00:00 2001 From: Aki Van Ness Date: Thu, 18 Aug 2022 05:52:38 -0400 Subject: [PATCH 159/220] sby: core: removed invalid None check in setup section --- sbysrc/sby_core.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index e4d58a53..ad9552cc 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -415,10 +415,6 @@ def parse_config(self, f): args = line.strip().split(maxsplit = 1) - - if args is None: - self.error(f"sby file syntax error: unknown key in '[setup]' section") - if len(args) < 2: self.error(f"sby file syntax error: entry in '[setup]' must have an argument, got '{' '.join(args)}'") From ea84c67f958feef14334c61230eeeb492dbef3b0 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 18 Aug 2022 12:09:41 +0200 Subject: [PATCH 160/220] tests: Ignore .sby files starting with skip_ --- tests/make/collect_tests.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py index b5f76996..2aecceee 100644 --- a/tests/make/collect_tests.py +++ b/tests/make/collect_tests.py @@ -18,6 +18,8 @@ def collect(path): if not SAFE_PATH.match(filename): print(f"skipping {filename!r}, use only [a-zA-Z0-9_./] in filenames") continue + if entry.name.startswith("skip_"): + continue tests.append(entry) for entry in path.glob("*"): if entry.is_dir(): From de939e279a66236f91ae98d6e03e94483b0fb70a Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 6 Jul 2022 14:28:20 +0200 Subject: [PATCH 161/220] Run tasks in parallel --- sbysrc/sby.py | 99 +++++++++++++++++++++++++++++------------- sbysrc/sby_autotune.py | 9 +++- sbysrc/sby_core.py | 12 ++--- 3 files changed, 78 insertions(+), 42 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 0628c6d2..7e55da8c 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -19,7 +19,7 @@ import argparse, json, os, sys, shutil, tempfile, re ##yosys-sys-path## -from sby_core import SbyConfig, SbyTask, SbyAbort, process_filename +from sby_core import SbyConfig, SbyTask, SbyAbort, SbyTaskloop, process_filename import time, platform class DictAction(argparse.Action): @@ -401,7 +401,7 @@ def find_files(taskname): print("ERROR: Exactly one task is required when workdir is specified. Specify the task or use --prefix instead of -d.", file=sys.stderr) sys.exit(1) -def run_task(taskname): +def start_task(taskloop, taskname): sbyconfig, _, _, _ = read_sbyconfig(sbydata, taskname) my_opt_tmpdir = opt_tmpdir @@ -463,48 +463,85 @@ def run_task(taskname): else: junit_filename = "junit" - task = SbyTask(sbyconfig, my_workdir, early_logmsgs, reusedir) + task = SbyTask(sbyconfig, my_workdir, early_logmsgs, reusedir, taskloop) for k, v in exe_paths.items(): task.exe_paths[k] = v - try: - if autotune: - import sby_autotune - sby_autotune.SbyAutotune(task, autotune_config).run() - else: - task.run(setupmode) - except SbyAbort: - if throw_err: - raise + def exit_callback(): + if not autotune and not setupmode: + task.summarize() + task.write_summary_file() - if my_opt_tmpdir: - task.log(f"Removing directory '{my_workdir}'.") - shutil.rmtree(my_workdir, ignore_errors=True) + if my_opt_tmpdir: + task.log(f"Removing directory '{my_workdir}'.") + shutil.rmtree(my_workdir, ignore_errors=True) - if setupmode: - task.log(f"SETUP COMPLETE (rc={task.retcode})") - else: - task.log(f"DONE ({task.status}, rc={task.retcode})") - task.logfile.close() + if setupmode: + task.log(f"SETUP COMPLETE (rc={task.retcode})") + else: + task.log(f"DONE ({task.status}, rc={task.retcode})") + task.logfile.close() + + if not my_opt_tmpdir and not setupmode and not autotune: + with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: + task.print_junit_result(f, junit_ts_name, junit_tc_name, junit_format_strict=False) - if not my_opt_tmpdir and not setupmode and not autotune: - with open("{}/{}.xml".format(task.workdir, junit_filename), "w") as f: - task.print_junit_result(f, junit_ts_name, junit_tc_name, junit_format_strict=False) + with open(f"{task.workdir}/status", "w") as f: + print(f"{task.status} {task.retcode} {task.total_time}", file=f) - with open(f"{task.workdir}/status", "w") as f: - print(f"{task.status} {task.retcode} {task.total_time}", file=f) + task.exit_callback = exit_callback - return task.retcode + if not autotune: + task.setup_procs(setupmode) + task.task_local_abort = not throw_err + return task failed = [] retcode = 0 -for task in tasknames: - task_retcode = run_task(task) - retcode |= task_retcode - if task_retcode: - failed.append(task) + +# Autotune is already parallel, parallelizing it across tasks needs some more work +sequential = autotune # TODO selection between parallel/sequential + +if sequential: + for taskname in tasknames: + taskloop = SbyTaskloop() + try: + task = start_task(taskloop, taskname) + except SbyAbort: + if throw_err: + raise + sys.exit(1) + + if autotune: + from sby_autotune import SbyAutotune + SbyAutotune(task, autotune_config).run() + elif setupmode: + task.exit_callback() + else: + taskloop.run() + retcode |= task.retcode + if task.retcode: + failed.append(taskname) +else: + taskloop = SbyTaskloop() + + tasks = {} + for taskname in tasknames: + try: + tasks[taskname] = start_task(taskloop, taskname) + except SbyAbort: + if throw_err: + raise + sys.exit(1) + + taskloop.run() + + for taskname, task in tasks.items(): + retcode |= task.retcode + if task.retcode: + failed.append(taskname) if failed and (len(tasknames) > 1 or tasknames[0] is not None): tm = time.localtime() diff --git a/sbysrc/sby_autotune.py b/sbysrc/sby_autotune.py index c7d741c7..771a9a01 100644 --- a/sbysrc/sby_autotune.py +++ b/sbysrc/sby_autotune.py @@ -168,6 +168,7 @@ class SbyAutotune: """Performs automatic engine selection for a given task. """ def __init__(self, task, config_file=None): + self.task_exit_callback = task.exit_callback task.exit_callback = lambda: None task.check_timeout = lambda: None task.status = "TIMEOUT" @@ -432,6 +433,8 @@ def run(self): self.task.status = "FAIL" self.task.retcode = 2 + self.task_exit_callback() + def next_candidate(self, peek=False): # peek=True is used to check whether we need to timeout running candidates to # give other candidates a chance. @@ -635,6 +638,8 @@ def __init__(self, autotune, candidate): self.model_time = 0 self.model_requests = [] + self.exit_callback = self.autotune_exit_callback + def parse_config(self, f): super().parse_config(f) @@ -650,8 +655,8 @@ def model(self, model_name): self.log(f"using model '{model_name}'") return self.autotune.model(self, model_name) - def exit_callback(self): - super().exit_callback() + def autotune_exit_callback(self): + self.summarize() self.candidate.total_adjusted_time = int(monotonic() - self.start_clock_time + self.model_time) self.candidate.engine_retcode = self.retcode diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 366817f7..f49aad00 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -437,6 +437,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logf self.precise_prop_status = False self.timeout_reached = False self.task_local_abort = False + self.exit_callback = self.summarize yosys_program_prefix = "" ##yosys-program-prefix## self.exe_paths = { @@ -795,12 +796,6 @@ def update_status(self, new_status): else: assert 0 - def run(self, setupmode): - self.setup_procs(setupmode) - if not setupmode: - self.taskloop.run() - self.write_summary_file() - def handle_non_engine_options(self): with open(f"{self.workdir}/config.sby", "r") as f: self.parse_config(f) @@ -897,6 +892,8 @@ def summarize(self): total_process_time = int((ru.ru_utime + ru.ru_stime) - self.start_process_time) self.total_time = total_process_time + # TODO process time is incorrect when running in parallel + self.summary = [ "Elapsed clock time [H:MM:SS (secs)]: {}:{:02d}:{:02d} ({})".format (total_clock_time // (60*60), (total_clock_time // 60) % 60, total_clock_time % 60, total_clock_time), @@ -929,9 +926,6 @@ def write_summary_file(self): for line in self.summary: print(line, file=f) - def exit_callback(self): - self.summarize() - def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): junit_time = strftime('%Y-%m-%dT%H:%M:%S') if not self.design: From b0786aea434a7a5239c1f74bb898b6e2eb9064a4 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 13 Jul 2022 15:51:26 +0200 Subject: [PATCH 162/220] Make jobserver integration Only implements the POSIX jobserver and will break on windows. Unbreaking it on windows will be done as a follow up. Not used for autotune, that needs some more changes. --- sbysrc/sby.py | 27 +++- sbysrc/sby_core.py | 32 +++- sbysrc/sby_jobserver.py | 303 +++++++++++++++++++++++++++++++++++ tests/make/required_tools.py | 2 +- tests/make/test_rules.py | 2 +- 5 files changed, 359 insertions(+), 7 deletions(-) create mode 100644 sbysrc/sby_jobserver.py diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 7e55da8c..63c9f339 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -20,8 +20,11 @@ import argparse, json, os, sys, shutil, tempfile, re ##yosys-sys-path## from sby_core import SbyConfig, SbyTask, SbyAbort, SbyTaskloop, process_filename +from sby_jobserver import SbyJobClient, process_jobserver_environment import time, platform +process_jobserver_environment() # needs to be called early + class DictAction(argparse.Action): def __call__(self, parser, namespace, values, option_string=None): assert isinstance(getattr(namespace, self.dest), dict), f"Use ArgumentParser.set_defaults() to initialize {self.dest} to dict()" @@ -46,6 +49,11 @@ def __call__(self, parser, namespace, values, option_string=None): help="add taskname (useful when sby file is read from stdin)") parser.add_argument("-E", action="store_true", dest="throw_err", help="throw an exception (incl stack trace) for most errors") +parser.add_argument("-j", metavar="", type=int, dest="jobcount", + help="maximum number of processes to run in parallel") +parser.add_argument("--sequential", action="store_true", dest="sequential", + help="run tasks in sequence, not in parallel") + parser.add_argument("--autotune", action="store_true", dest="autotune", help="automatically find a well performing engine and engine configuration for each task") parser.add_argument("--autotune-config", dest="autotune_config", @@ -114,6 +122,8 @@ def __call__(self, parser, namespace, values, option_string=None): setupmode = args.setupmode autotune = args.autotune autotune_config = args.autotune_config +sequential = args.sequential +jobcount = args.jobcount init_config_file = args.init_config_file if sbyfile is not None: @@ -501,12 +511,22 @@ def exit_callback(): failed = [] retcode = 0 +if jobcount is not None and jobcount < 1: + print("ERROR: The -j option requires a positive number as argument") + sys.exit(1) + # Autotune is already parallel, parallelizing it across tasks needs some more work -sequential = autotune # TODO selection between parallel/sequential +if autotune: + sequential = True if sequential: + if autotune: + jobclient = None # TODO make autotune use a jobclient + else: + jobclient = SbyJobClient(jobcount) + for taskname in tasknames: - taskloop = SbyTaskloop() + taskloop = SbyTaskloop(jobclient) try: task = start_task(taskloop, taskname) except SbyAbort: @@ -525,7 +545,8 @@ def exit_callback(): if task.retcode: failed.append(taskname) else: - taskloop = SbyTaskloop() + jobclient = SbyJobClient(jobcount) + taskloop = SbyTaskloop(jobclient) tasks = {} for taskname in tasknames: diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index f49aad00..1f72f56b 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -82,6 +82,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile self.logstderr = logstderr self.silent = silent self.wait = False + self.job_lease = None self.task.update_proc_pending(self) @@ -162,6 +163,13 @@ def poll(self, force_unchecked=False): if not dep.finished: return + if self.task.taskloop.jobclient: + if self.job_lease is None: + self.job_lease = self.task.taskloop.jobclient.request_lease() + + if not self.job_lease.is_ready: + return + if not self.silent: self.task.log(f"{self.info}: starting process \"{self.cmdline}\"") @@ -190,8 +198,12 @@ def preexec_fn(): # The process might have written something since the last time we checked self.read_output() + if self.job_lease: + self.job_lease.done() + if not self.silent: self.task.log(f"{self.info}: finished (returncode={self.p.returncode})") + self.task.update_proc_stopped(self) self.running = False self.exited = True @@ -378,18 +390,26 @@ def error(self, logmessage): class SbyTaskloop: - def __init__(self): + def __init__(self, jobclient=None): self.procs_pending = [] self.procs_running = [] self.tasks = [] self.poll_now = False + self.jobclient = jobclient def run(self): for proc in self.procs_pending: proc.poll() - while len(self.procs_running) or self.poll_now: + + waiting_for_jobslots = False + if self.jobclient: + waiting_for_jobslots = self.jobclient.has_pending_leases() + + while self.procs_running or waiting_for_jobslots or self.poll_now: fds = [] + if self.jobclient: + fds.extend(self.jobclient.poll_fds()) for proc in self.procs_running: if proc.running: fds.append(proc.p.stdout) @@ -404,12 +424,20 @@ def run(self): sleep(0.1) self.poll_now = False + if self.jobclient: + self.jobclient.poll() + + self.procs_waiting = [] + for proc in self.procs_running: proc.poll() for proc in self.procs_pending: proc.poll() + if self.jobclient: + waiting_for_jobslots = self.jobclient.has_pending_leases() + tasks = self.tasks self.tasks = [] for task in tasks: diff --git a/sbysrc/sby_jobserver.py b/sbysrc/sby_jobserver.py new file mode 100644 index 00000000..0f359b8b --- /dev/null +++ b/sbysrc/sby_jobserver.py @@ -0,0 +1,303 @@ +# +# SymbiYosys (sby) -- Front-end for Yosys-based formal verification flows +# +# Copyright (C) 2022 Jannis Harder +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# + +import atexit +import fcntl +import os +import select +import shlex +import subprocess +import sys +import weakref +import signal + + +inherited_jobcount = None +inherited_jobserver_auth = None +inherited_jobserver_auth_present = None + +def process_jobserver_environment(): + """Process the environment looking for a make jobserver. This should be called + early (when only inherited fds are present) to reliably detect whether the jobserver + specified in the environment is accessible.""" + global inherited_jobcount + global inherited_jobserver_auth + global inherited_jobserver_auth_present + + if len(sys.argv) >= 2 and sys.argv[1] == '--jobserver-helper': + jobserver_helper(*map(int, sys.argv[2:])) + exit(0) + + inherited_jobserver_auth_present = False + + for flag in shlex.split(os.environ.get("MAKEFLAGS", "")): + if flag.startswith("-j"): + if flag == "-j": + inherited_jobcount = 0 + else: + try: + inherited_jobcount = int(flag[2:]) + except ValueError: + pass + elif flag.startswith("--jobserver-auth=") or flag.startswith("--jobserver-fds="): + inherited_jobserver_auth_present = True + arg = flag.split("=", 1)[1].split(",") + try: + jobserver_fds = int(arg[0]), int(arg[1]) + for fd in jobserver_fds: + fcntl.fcntl(fd, fcntl.F_GETFD) + except (ValueError, OSError): + pass + else: + inherited_jobserver_auth = jobserver_fds + + +def jobserver_helper(jobserver_read_fd, jobserver_write_fd, request_fd, response_fd): + """Helper process to handle blocking jobserver pipes.""" + signal.signal(signal.SIGINT, signal.SIG_IGN) + pending = 0 + while True: + try: + new_pending = len(os.read(request_fd, 1024)) + if new_pending == 0: + pending = 0 + break + else: + pending += new_pending + continue + except BlockingIOError: + if pending == 0: + select.select([request_fd], [], []) + continue + + if pending > 0: + try: + # Depending on the make version (4.3 vs 4.2) this is blocking or + # non-blocking. As this is an attribute of the pipe not the fd, we + # cannot change it without affecting other processes. Older versions of + # gnu make require this to be blocking, and produce errors if it is + # non-blocking. Newer versions of gnu make set this non-blocking, both, + # as client and as server. The documentation still says it is blocking. + # This leaves us no choice but to handle both cases, which is the reason + # we have this helper process in the first place. + token = os.read(jobserver_read_fd, 1) + except BlockingIOError: + select.select([jobserver_read_fd], [], []) + continue + + pending -= 1 + + try: + os.write(response_fd, token) + except: + os.write(jobserver_write_fd, token) + raise + os.close(jobserver_write_fd) + + +class SbyJobLease: + def __init__(self, client): + self.client = client + self.is_ready = False + self.is_done = False + + def done(self): + if self.is_ready and not self.is_done: + self.client.return_lease() + + self.is_done = True + + def __repr__(self): + return f"{self.is_ready=} {self.is_done=}" + + def __del__(self): + self.done() + + +class SbyJobServer: + def __init__(self, jobcount): + assert jobcount >= 1 + # TODO support unlimited parallelism? + self.jobcount = jobcount + if jobcount == 1: + self.read_fd, self.write_fd = None, None + self.makeflags = None + elif jobcount > 1: + self.read_fd, self.write_fd = os.pipe() + if os.getenv('SBY_BLOCKING_JOBSERVER') != '1': + os.set_blocking(self.read_fd, False) + os.write(self.write_fd, b"*" * (jobcount - 1)) + self.makeflags = f"-j{jobcount} --jobserver-auth={self.read_fd},{self.write_fd} --jobserver-fds={self.read_fd},{self.write_fd}" + + +class SbyJobClient: + def __init__(self, fallback_jobcount=None): + self.jobcount = None + self.read_fd = self.write_fd = None + self.helper_process = None + + self.local_slots = 1 + self.acquired_slots = [] + self.pending_leases = [] + + assert inherited_jobserver_auth_present is not None, "process_jobserver_environment was not called" + + have_jobserver = inherited_jobserver_auth_present + + if have_jobserver and inherited_jobserver_auth is None: + print("WARNING: Could not connect to jobserver specified in MAKEFLAGS, disabling parallel execution.") + have_jobserver = False + fallback_jobcount = 1 + + if have_jobserver: + jobcount = inherited_jobcount + elif fallback_jobcount is not None: + jobcount = fallback_jobcount + elif inherited_jobcount is not None and inherited_jobcount > 0: + jobcount = inherited_jobcount + else: + try: + jobcount = len(os.sched_getaffinity(0)) + except AttributeError: + jobcount = os.cpu_count() + + if have_jobserver: + self.read_fd, self.write_fd = inherited_jobserver_auth + else: + self.sby_jobserver = SbyJobServer(jobcount) + self.read_fd = self.sby_jobserver.read_fd + self.write_fd = self.sby_jobserver.write_fd + + self.jobcount = jobcount + + if self.read_fd is not None: + if os.get_blocking(self.read_fd): + request_read_fd, self.request_write_fd = os.pipe() + self.response_read_fd, response_write_fd = os.pipe() + os.set_blocking(self.response_read_fd, False) + os.set_blocking(request_read_fd, False) + + pass_fds = [self.read_fd, self.write_fd, request_read_fd, response_write_fd] + + self.helper_process = subprocess.Popen( + [sys.executable, sys.modules['__main__'].__file__, '--jobserver-helper', *map(str, pass_fds)], + stdin=subprocess.DEVNULL, + pass_fds=pass_fds, + ) + + os.close(request_read_fd) + os.close(response_write_fd) + + atexit.register(self.atexit_blocking) + else: + atexit.register(self.atexit_nonblocking) + + def atexit_nonblocking(self): + while self.acquired_slots: + os.write(self.write_fd, self.acquired_slots.pop()) + + def atexit_blocking(self): + # Return all slot tokens we are currently holding + while self.acquired_slots: + os.write(self.write_fd, self.acquired_slots.pop()) + + if self.helper_process: + # Closing the request pipe singals the helper that we want to exit + os.close(self.request_write_fd) + + # The helper might have been in the process of sending us some tokens, which + # we still need to return + while True: + try: + token = os.read(self.response_read_fd, 1) + except BlockingIOError: + select.select([self.response_read_fd], [], []) + continue + if not token: + break + os.write(self.write_fd, token) + os.close(self.response_read_fd) + + # Wait for the helper to exit, should be immediate at this point + self.helper_process.wait() + + def request_lease(self): + pending = SbyJobLease(self) + + if self.local_slots > 0: + self.local_slots -= 1 + pending.is_ready = True + else: + self.pending_leases.append(weakref.ref(pending)) + if self.helper_process: + os.write(self.request_write_fd, b"!") + + return pending + + def return_lease(self): + if self.acquired_slots: + os.write(self.write_fd, self.acquired_slots.pop()) + return + + if self.activate_pending_lease(): + return + + self.local_slots += 1 + + def activate_pending_lease(self): + while self.pending_leases: + pending = self.pending_leases.pop(0)() + if pending is None: + continue + pending.is_ready = True + return True + return False + + def has_pending_leases(self): + while self.pending_leases and not self.pending_leases[-1](): + self.pending_leases.pop() + return bool(self.pending_leases) + + def poll_fds(self): + if self.helper_process: + return [self.response_read_fd] + elif self.read_fd is not None: + return [self.read_fd] + else: + return [] + + def poll(self): + read_fd = self.response_read_fd if self.helper_process else self.read_fd + if read_fd is None: + return + + while self.helper_process or self.has_pending_leases(): + try: + token = os.read(read_fd, 1) + except BlockingIOError: + break + + self.got_token(token) + + def got_token(self, token): + self.acquired_slots.append(token) + + if self.activate_pending_lease(): + return + + self.return_lease() diff --git a/tests/make/required_tools.py b/tests/make/required_tools.py index ce333564..82b5f499 100644 --- a/tests/make/required_tools.py +++ b/tests/make/required_tools.py @@ -59,7 +59,7 @@ def found_tools(): exit(noskip) print(command, flush=True) - exit(subprocess.call(command, shell=True)) + exit(subprocess.call(command, shell=True, close_fds=False)) found_tools = [] check_tools = set() diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 9607d814..64f80e18 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -82,7 +82,7 @@ def parse_engine(engine): command = f"cd {sby_dir_unix} && python3 $(SBY_MAIN) -f {sby_file.name} {task}" print( - f"\t@python3 make/required_tools.py run {target} {shlex.quote(command)} {shlex.join(required_tools)}", + f"\t+@python3 make/required_tools.py run {target} {shlex.quote(command)} {shlex.join(required_tools)}", file=rules, ) From e91977e01ef00454438b5b6739c2035f5438f93b Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 13 Jul 2022 18:14:19 +0200 Subject: [PATCH 163/220] Use local jobslots as fallback on Windows. As we have no make jobserver support on windows, fallback to using process local slots to limit prallelism. --- sbysrc/sby_jobserver.py | 31 +++++++++++++++++++++---------- 1 file changed, 21 insertions(+), 10 deletions(-) diff --git a/sbysrc/sby_jobserver.py b/sbysrc/sby_jobserver.py index 0f359b8b..104bcc35 100644 --- a/sbysrc/sby_jobserver.py +++ b/sbysrc/sby_jobserver.py @@ -17,7 +17,6 @@ # import atexit -import fcntl import os import select import shlex @@ -26,6 +25,8 @@ import weakref import signal +if os.name == "posix": + import fcntl inherited_jobcount = None inherited_jobserver_auth = None @@ -56,15 +57,16 @@ def process_jobserver_environment(): pass elif flag.startswith("--jobserver-auth=") or flag.startswith("--jobserver-fds="): inherited_jobserver_auth_present = True - arg = flag.split("=", 1)[1].split(",") - try: - jobserver_fds = int(arg[0]), int(arg[1]) - for fd in jobserver_fds: - fcntl.fcntl(fd, fcntl.F_GETFD) - except (ValueError, OSError): - pass - else: - inherited_jobserver_auth = jobserver_fds + if os.name == "posix": + arg = flag.split("=", 1)[1].split(",") + try: + jobserver_fds = int(arg[0]), int(arg[1]) + for fd in jobserver_fds: + fcntl.fcntl(fd, fcntl.F_GETFD) + except (ValueError, OSError): + pass + else: + inherited_jobserver_auth = jobserver_fds def jobserver_helper(jobserver_read_fd, jobserver_write_fd, request_fd, response_fd): @@ -159,6 +161,12 @@ def __init__(self, fallback_jobcount=None): have_jobserver = inherited_jobserver_auth_present + if os.name == "nt" and inherited_jobserver_auth_present: + # There are even more incompatible variants of the make jobserver on + # windows, none of them are supported for now. + print("WARNING: Found jobserver in MAKEFLAGS, this is not supported on windows.") + have_jobserver = False + if have_jobserver and inherited_jobserver_auth is None: print("WARNING: Could not connect to jobserver specified in MAKEFLAGS, disabling parallel execution.") have_jobserver = False @@ -178,6 +186,9 @@ def __init__(self, fallback_jobcount=None): if have_jobserver: self.read_fd, self.write_fd = inherited_jobserver_auth + elif os.name == "nt": + # On Windows, without a jobserver, use only local slots + self.local_slots = jobcount else: self.sby_jobserver = SbyJobServer(jobcount) self.read_fd = self.sby_jobserver.read_fd From df2610d598cee6025c86ab5be60696fbd7c5970f Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 22 Aug 2022 21:18:40 +1200 Subject: [PATCH 164/220] Fixes before merge --- docs/source/index.rst | 5 ----- docs/source/install.rst | 1 + docs/source/newstart.rst | 2 +- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/docs/source/index.rst b/docs/source/index.rst index 7e8948b7..43c8185d 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -10,11 +10,6 @@ formal tasks: * Unbounded verification of safety properties * Generation of test benches from cover statements * Verification of liveness properties - * Formal equivalence checking [TBD] - * Reactive Synthesis [TBD] - -(Items marked [TBD] are features under construction and not available -at the moment.) .. toctree:: :maxdepth: 3 diff --git a/docs/source/install.rst b/docs/source/install.rst index a232c866..604737b6 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -88,6 +88,7 @@ Boolector https://boolector.github.io .. code-block:: text + git clone https://github.com/boolector/boolector cd boolector ./contrib/setup-btor2tools.sh diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst index 6b4c1ef6..604d04c7 100644 --- a/docs/source/newstart.rst +++ b/docs/source/newstart.rst @@ -233,7 +233,7 @@ while still passing all of the tests? .. note:: If you need a **hint**, try increasing the width of the address wires. 4 bits - supports up to :math:`2^4=16` addresses. Are there other signals that + supports up to 2\ :sup:`4`\ =16 addresses. Are there other signals that need to be wider? Can you make the width parameterisable to support arbitrarily large buffers? From 82a6edf295d794d337fc285b58dfe426c5c53982 Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Mon, 22 Aug 2022 21:20:59 +1200 Subject: [PATCH 165/220] Moving newstart to replace quickstart --- docs/source/index.rst | 2 +- docs/source/newstart.rst | 307 ------------------------------- docs/source/quickstart.rst | 368 ++++++++++++++++++++++++++++--------- 3 files changed, 279 insertions(+), 398 deletions(-) delete mode 100644 docs/source/newstart.rst diff --git a/docs/source/index.rst b/docs/source/index.rst index 43c8185d..fbe43c5f 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -15,7 +15,7 @@ formal tasks: :maxdepth: 3 install.rst - newstart.rst + quickstart.rst reference.rst autotune.rst verilog.rst diff --git a/docs/source/newstart.rst b/docs/source/newstart.rst deleted file mode 100644 index 604d04c7..00000000 --- a/docs/source/newstart.rst +++ /dev/null @@ -1,307 +0,0 @@ - -Getting started -=============== - -.. note:: - - This tutorial assumes sby and boolector installation as per the - :ref:`install-doc`. For this tutorial, it is also recommended to install - `GTKWave `_, an open source VCD viewer. - `Source files used in this tutorial - `_ can be - found on the sby git, under ``docs/examples/fifo``. - -First In, First Out (FIFO) buffer -********************************* - -From `Wikipedia `_, -a FIFO is - - a method for organizing the manipulation of a data structure (often, - specifically a data buffer) where the oldest (first) entry, or "head" of the - queue, is processed first. - - Such processing is analogous to servicing people in a queue area on a - first-come, first-served (FCFS) basis, i.e. in the same sequence in which - they arrive at the queue's tail. - -In hardware we can create such a construct by providing two addresses into a -register file. This tutorial will use an example implementation provided in -`fifo.sv`. - -First, the address generator module: - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: address generator - :end-at: endmodule - -This module is instantiated twice; once for the write address and once for the -read address. In both cases, the address will start at and reset to 0, and will -increment by 1 when an enable signal is received. When the address pointers -increment from the maximum storage value they reset back to 0, providing a -circular queue. - -Next, the register file: - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: fifo storage - :end-before: end storage - :dedent: - -Notice that this register design includes a synchronous write and asynchronous -read. Each word is 8 bits, and up to 16 words can be stored in the buffer. - -Verification properties -*********************** - -In order to verify our design we must first define properties that it must -satisfy. For example, there must never be more than there is memory available. -By assigning a signal to count the number of values in the buffer, we can make -the following assertion in the code: - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: a_oflow - :end-at: ; - :dedent: - -It is also possible to use the prior value of a signal for comparison. This can -be used, for example, to ensure that the count is only able to increase or -decrease by 1. A case must be added to handle resetting the count directly to -0, as well as if the count does not change. This can be seen in the following -code; at least one of these conditions must be true at all times if our design -is to be correct. - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: a_counts - :end-at: ; - :dedent: - -As our count signal is used independently of the read and write pointers, we -must verify that the count is always correct. While the write pointer will -always be at the same point or *after* the read pointer, the circular buffer -means that the write *address* could wrap around and appear *less than* the read -address. So we must first perform some simple arithmetic to find the absolute -difference in addresses, and then compare with the count signal. - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: assign addr_diff - :end-at: ; - :dedent: - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: a_count_diff - :end-at: ; - :dedent: - -SymbiYosys -********** - -SymbiYosys (sby) uses a .sby file to define a set of tasks used for -verification. - -**basic** - Bounded model check of design. - -**nofullskip** - Demonstration of failing model using an unbounded model check. - -**cover** - Cover mode (testing cover statements). - -**noverific** - Test fallback to default Verilog frontend. - -The use of the ``:default`` tag indicates that by default, basic and cover -should be run if no tasks are specified, such as when running the command below. - - sby fifo.sby - -.. note:: - - The default set of tests should all pass. If this is not the case there may - be a problem with the installation of sby or one of its solvers. - -To see what happens when a test fails, the below command can be used. Note the -use of the ``-f`` flag to automatically overwrite existing task output. While -this may not be necessary on the first run, it is quite useful when making -adjustments to code and rerunning tests to validate. - - sby -f fifo.sby nofullskip - -The nofullskip task disables the code shown below. Because the count signal has -been written such that it cannot exceed MAX_DATA, removing this code will lead -to the ``a_count_diff`` assertion failing. Without this assertion, there is no -guarantee that data will be read in the same order it was written should an -overflow occur and the oldest data be written. - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: NO_FULL_SKIP - :end-at: endif - :lines: 1-5,9 - -The last few lines of output for the nofullskip task should be similar to the -following: - -.. code-block:: text - - SBY [fifo_nofullskip] engine_0.basecase: ## Assert failed in fifo: a_count_diff - SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to VCD file: engine_0/trace.vcd - SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to Verilog testbench: engine_0/trace_tb.v - SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to constraints file: engine_0/trace.smtc - SBY [fifo_nofullskip] engine_0.basecase: ## Status: failed - SBY [fifo_nofullskip] engine_0.basecase: finished (returncode=1) - SBY [fifo_nofullskip] engine_0: Status returned by engine for basecase: FAIL - SBY [fifo_nofullskip] engine_0.induction: terminating process - SBY [fifo_nofullskip] summary: Elapsed clock time [H:MM:SS (secs)]: 0:00:02 (2) - SBY [fifo_nofullskip] summary: Elapsed process time unvailable on Windows - SBY [fifo_nofullskip] summary: engine_0 (smtbmc boolector) returned FAIL for basecase - SBY [fifo_nofullskip] summary: counterexample trace: fifo_nofullskip/engine_0/trace.vcd - SBY [fifo_nofullskip] DONE (FAIL, rc=2) - SBY The following tasks failed: ['nofullskip'] - -Using the ``noskip.gtkw`` file provided, use the below command to examine the -error trace. - - gtkwave fifo_nofullskip/engine_0/trace.vcd noskip.gtkw - -This should result in something similar to the below image. We can immediately -see that ``data_count`` and ``addr_diff`` are different. Looking a bit deeper -we can see that in order to reach this state the read enable signal was high in -the first clock cycle while write enable is low. This leads to an underfill -where a value is read while the buffer is empty and the read address increments -to a higher value than the write address. - -.. image:: media/gtkwave_noskip.png - -During correct operation, the ``w_underfill`` statement will cover the underflow -case. Examining ``fifo_cover/logfile.txt`` will reveal which trace file -includes the cover statment we are looking for. If this file doesn't exist, run -the code below. - - sby fifo.sby cover - -Searching the file for ``w_underfill`` will reveal the below. - -.. code-block:: text - - $ grep "w_underfill" fifo_cover/logfile.txt -A 1 - SBY [fifo_cover] engine_0: ## Reached cover statement at w_underfill in step 2. - SBY [fifo_cover] engine_0: ## Writing trace to VCD file: engine_0/trace4.vcd - -We can then run gtkwave with the trace file indicated to see the correct -operation as in the image below. When the buffer is empty, a read with no write -will result in the ``wksip`` signal going high, incrementing *both* read and -write addresses and avoiding underflow. - - gtkwave fifo_cover/engine_0/trace4.vcd noskip.gtkw - -.. image:: media/gtkwave_coverskip.png - -.. note:: - - Implementation of the ``w_underfill`` cover statement depends on whether - Verific is used or not. See the `Concurrent assertions`_ section for more - detail. - -Exercise -******** - -Adjust the ``[script]`` section of ``fifo.sby`` so that it looks like the below. - -.. code-block:: text - - [script] - nofullskip: read -define NO_FULL_SKIP=1 - noverific: read -noverific - read -formal fifo.sv - hierarchy -check -top fifo -chparam MAX_DATA 17 - prep -top fifo - -The ``hierarchy`` command we added changes the ``MAX_DATA`` parameter of the top -module to be 17. Now run the ``basic`` task and see what happens. It should -fail and give an error like ``Assert failed in fifo: a_count_diff``. Can you -modify the verilog code so that it works with larger values of ``MAX_DATA`` -while still passing all of the tests? - -.. note:: - - If you need a **hint**, try increasing the width of the address wires. 4 bits - supports up to 2\ :sup:`4`\ =16 addresses. Are there other signals that - need to be wider? Can you make the width parameterisable to support - arbitrarily large buffers? - -Once the tests are passing with ``MAX_DATA=17``, try something bigger, like 64, -or 100. Does the ``basic`` task still pass? What about ``cover``? By default, -``bmc`` & ``cover`` modes will run to a depth of 20 cycles. If a maximum of one -value can be loaded in each cycle, how many cycles will it take to load 100 -values? Using the :ref:`.sby reference page `, -try to increase the cover mode depth to be at least a few cycles larger than the -``MAX_DATA``. - -.. note:: - - Reference files are provided in the ``fifo/golden`` directory, showing how - the verilog could have been modified and how a ``bigtest`` task could be - added. - -Concurrent assertions -********************* - -Until this point, all of the properties described have been *immediate* -assertions. As the name suggests, immediate assertions are evaluated -immediately whereas concurrent assertions allow for the capture of sequences of -events which occur across time. The use of concurrent assertions requires a -more advanced series of checks. - -Compare the difference in implementation of ``w_underfill`` depending on the -presence of Verific. ``w_underfill`` looks for a sequence of events where the -write enable is low but the write address changes in the following cycle. This -is the expected behaviour for reading while empty and implies that the -``w_skip`` signal went high. Verific enables elaboration of SystemVerilog -Assertions (SVA) properties. Here we use such a property, ``write_skip``. - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: property write_skip - :end-at: w_underfill - :dedent: - -This property describes a *sequence* of events which occurs on the ``clk`` -signal and are disabled/restarted when the ``rst`` signal is high. The property -first waits for a low ``wen`` signal, and then a change in ``waddr`` in the -following cycle. ``w_underfill`` is then a cover of this property to verify -that it is possible. Now look at the implementation without Verific. - -.. literalinclude:: ../examples/fifo/fifo.sv - :language: systemverilog - :start-at: reg past_nwen; - :end-before: end w_underfill - :dedent: - -In this case we do not have access to SVA properties and are more limited in the -tools available to us. Ideally we would use ``$past`` to read the value of -``wen`` in the previous cycle and then check for a change in ``waddr``. However, -in the first cycle of simulation, reading ``$past`` will return a value of -``X``. This results in false triggers of the property so we instead implement -the ``past_nwen`` register which we can initialise to ``0`` and ensure it does -not trigger in the first cycle. - -As verification properties become more complex and check longer sequences, the -additional effort of hand-coding without SVA properties becomes much more -difficult. Using a parser such as Verific supports these checks *without* -having to write out potentially complicated state machines. Verific is included -for use in the *Tabby CAD Suite*. - -Further information -******************* -For more information on the uses of assertions and the difference between -immediate and concurrent assertions, refer to appnote 109: `Property Checking -with SystemVerilog Assertions -`_. diff --git a/docs/source/quickstart.rst b/docs/source/quickstart.rst index 1e660393..604d04c7 100644 --- a/docs/source/quickstart.rst +++ b/docs/source/quickstart.rst @@ -1,119 +1,307 @@ -Getting Started +Getting started =============== -The example files used in this chapter can be downloaded from `here -`_. +.. note:: -First step: A simple BMC example --------------------------------- + This tutorial assumes sby and boolector installation as per the + :ref:`install-doc`. For this tutorial, it is also recommended to install + `GTKWave `_, an open source VCD viewer. + `Source files used in this tutorial + `_ can be + found on the sby git, under ``docs/examples/fifo``. -Here is a simple example design with a safety property (assertion). +First In, First Out (FIFO) buffer +********************************* -.. literalinclude:: ../examples/quickstart/demo.sv +From `Wikipedia `_, +a FIFO is + + a method for organizing the manipulation of a data structure (often, + specifically a data buffer) where the oldest (first) entry, or "head" of the + queue, is processed first. + + Such processing is analogous to servicing people in a queue area on a + first-come, first-served (FCFS) basis, i.e. in the same sequence in which + they arrive at the queue's tail. + +In hardware we can create such a construct by providing two addresses into a +register file. This tutorial will use an example implementation provided in +`fifo.sv`. + +First, the address generator module: + +.. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog + :start-at: address generator + :end-at: endmodule -The property in this example is true. We'd like to verify this using a bounded -model check (BMC) that is 100 cycles deep. +This module is instantiated twice; once for the write address and once for the +read address. In both cases, the address will start at and reset to 0, and will +increment by 1 when an enable signal is received. When the address pointers +increment from the maximum storage value they reset back to 0, providing a +circular queue. -SymbiYosys is controlled by ``.sby`` files. The following file can be used to -configure SymbiYosys to run a BMC for 100 cycles on the design: +Next, the register file: -.. literalinclude:: ../examples/quickstart/demo.sby - :language: text +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: fifo storage + :end-before: end storage + :dedent: -Simply create a text file ``demo.sv`` with the example design and another text -file ``demo.sby`` with the SymbiYosys configuration. Then run:: +Notice that this register design includes a synchronous write and asynchronous +read. Each word is 8 bits, and up to 16 words can be stored in the buffer. - sby demo.sby +Verification properties +*********************** -This will run a bounded model check for 100 cycles. The last few lines of the -output should look something like this: +In order to verify our design we must first define properties that it must +satisfy. For example, there must never be more than there is memory available. +By assigning a signal to count the number of values in the buffer, we can make +the following assertion in the code: -.. code-block:: text +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: a_oflow + :end-at: ; + :dedent: + +It is also possible to use the prior value of a signal for comparison. This can +be used, for example, to ensure that the count is only able to increase or +decrease by 1. A case must be added to handle resetting the count directly to +0, as well as if the count does not change. This can be seen in the following +code; at least one of these conditions must be true at all times if our design +is to be correct. + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: a_counts + :end-at: ; + :dedent: + +As our count signal is used independently of the read and write pointers, we +must verify that the count is always correct. While the write pointer will +always be at the same point or *after* the read pointer, the circular buffer +means that the write *address* could wrap around and appear *less than* the read +address. So we must first perform some simple arithmetic to find the absolute +difference in addresses, and then compare with the count signal. + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: assign addr_diff + :end-at: ; + :dedent: - SBY [demo] engine_0: ## 0 0:00:00 Checking asserts in step 96.. - SBY [demo] engine_0: ## 0 0:00:00 Checking asserts in step 97.. - SBY [demo] engine_0: ## 0 0:00:00 Checking asserts in step 98.. - SBY [demo] engine_0: ## 0 0:00:00 Checking asserts in step 99.. - SBY [demo] engine_0: ## 0 0:00:00 Status: PASSED - SBY [demo] engine_0: Status returned by engine: PASS - SBY [demo] engine_0: finished (returncode=0) - SBY [demo] summary: Elapsed clock time [H:MM:SS (secs)]: 0:00:00 (0) - SBY [demo] summary: Elapsed process time [H:MM:SS (secs)]: 0:00:00 (0) - SBY [demo] summary: engine_0 (smtbmc) returned PASS - SBY [demo] DONE (PASS) - -This will also create a ``demo/`` directory tree with all relevant information, -such as a copy of the design source, various log files, and trace data in case -the proof fails. - -(Use ``sby -f demo.sby`` to re-run the proof. Without ``-f`` the command will -fail because the output directory ``demo/`` already exists.) - -Time for a simple exercise: Modify the design so that the property is false -and the offending state is reachable within 100 cycles. Re-run ``sby`` with -the modified design and see if the proof now fails. Inspect the counterexample -trace (``.vcd`` file) produced by ``sby``. (`GTKWave `_ -is an open source VCD viewer that you can use.) - -Selecting the right engine --------------------------- - -The ``.sby`` file for a project selects one or more engines. (When multiple -engines are selected, all engines are executed in parallel and the result -returned by the first engine to finish is the result returned by SymbiYosys.) - -Each engine has its strengths and weaknesses. Therefore it is important to -select the right engine for each project. The documentation for the individual -engines can provide some guidance for engine selection. (Trial and error can -also be a useful method for evaluating engines.) - -Let's consider the following example: - -.. literalinclude:: ../examples/quickstart/memory.sv +.. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog + :start-at: a_count_diff + :end-at: ; + :dedent: + +SymbiYosys +********** -This example is expected to fail verification (see the BUG comment). -The following ``.sby`` file can be used to show this: +SymbiYosys (sby) uses a .sby file to define a set of tasks used for +verification. -.. literalinclude:: ../examples/quickstart/memory.sby - :language: text +**basic** + Bounded model check of design. -This project uses the ``smtbmc`` engine, which uses SMT solvers to perform the -proof. This engine uses the array-theories provided by those solvers to -efficiently model memories. Since this example uses large memories, the -``smtbmc`` engine is a good match. +**nofullskip** + Demonstration of failing model using an unbounded model check. -(``smtbmc boolector`` selects Boolector as SMT solver, ``smtbmc z3`` selects -Z3, and ``smtbmc yices`` selects Yices 2. Yices 2 is the default solver when -no argument is used with ``smtbmc``.) +**cover** + Cover mode (testing cover statements). -Exercise: The engine ``abc bmc3`` does not provide abstract memory models. -Therefore SymbiYosys has to synthesize the memories in the example to FFs -and address logic. How does the performance of this project change if -``abc bmc3`` is used as engine instead of ``smtbmc boolector``? How fast -can either engine verify the design when the bug has been fixed? +**noverific** + Test fallback to default Verilog frontend. -Beyond bounded model checks ---------------------------- +The use of the ``:default`` tag indicates that by default, basic and cover +should be run if no tasks are specified, such as when running the command below. -Bounded model checks only prove that the safety properties hold for the first -*N* cycles (where *N* is the depth of the BMC). Sometimes this is insufficient -and we need to prove that the safety properties hold forever, not just the first -*N* cycles. Let us consider the following example: + sby fifo.sby -.. literalinclude:: ../examples/quickstart/prove.sv +.. note:: + + The default set of tests should all pass. If this is not the case there may + be a problem with the installation of sby or one of its solvers. + +To see what happens when a test fails, the below command can be used. Note the +use of the ``-f`` flag to automatically overwrite existing task output. While +this may not be necessary on the first run, it is quite useful when making +adjustments to code and rerunning tests to validate. + + sby -f fifo.sby nofullskip + +The nofullskip task disables the code shown below. Because the count signal has +been written such that it cannot exceed MAX_DATA, removing this code will lead +to the ``a_count_diff`` assertion failing. Without this assertion, there is no +guarantee that data will be read in the same order it was written should an +overflow occur and the oldest data be written. + +.. literalinclude:: ../examples/fifo/fifo.sv :language: systemverilog + :start-at: NO_FULL_SKIP + :end-at: endif + :lines: 1-5,9 + +The last few lines of output for the nofullskip task should be similar to the +following: + +.. code-block:: text + + SBY [fifo_nofullskip] engine_0.basecase: ## Assert failed in fifo: a_count_diff + SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to VCD file: engine_0/trace.vcd + SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to Verilog testbench: engine_0/trace_tb.v + SBY [fifo_nofullskip] engine_0.basecase: ## Writing trace to constraints file: engine_0/trace.smtc + SBY [fifo_nofullskip] engine_0.basecase: ## Status: failed + SBY [fifo_nofullskip] engine_0.basecase: finished (returncode=1) + SBY [fifo_nofullskip] engine_0: Status returned by engine for basecase: FAIL + SBY [fifo_nofullskip] engine_0.induction: terminating process + SBY [fifo_nofullskip] summary: Elapsed clock time [H:MM:SS (secs)]: 0:00:02 (2) + SBY [fifo_nofullskip] summary: Elapsed process time unvailable on Windows + SBY [fifo_nofullskip] summary: engine_0 (smtbmc boolector) returned FAIL for basecase + SBY [fifo_nofullskip] summary: counterexample trace: fifo_nofullskip/engine_0/trace.vcd + SBY [fifo_nofullskip] DONE (FAIL, rc=2) + SBY The following tasks failed: ['nofullskip'] + +Using the ``noskip.gtkw`` file provided, use the below command to examine the +error trace. -Proving this design in an unbounded manner can be achieved using the following -SymbiYosys configuration file: + gtkwave fifo_nofullskip/engine_0/trace.vcd noskip.gtkw -.. literalinclude:: ../examples/quickstart/prove.sby - :language: text +This should result in something similar to the below image. We can immediately +see that ``data_count`` and ``addr_diff`` are different. Looking a bit deeper +we can see that in order to reach this state the read enable signal was high in +the first clock cycle while write enable is low. This leads to an underfill +where a value is read while the buffer is empty and the read address increments +to a higher value than the write address. -Note that ``mode`` is now set to ``prove`` instead of ``bmc``. The ``smtbmc`` -engine in ``prove`` mode will perform a k-induction proof. Other engines can -use other methods, e.g. using ``abc pdr`` will prove the design using the IC3 -algorithm. +.. image:: media/gtkwave_noskip.png +During correct operation, the ``w_underfill`` statement will cover the underflow +case. Examining ``fifo_cover/logfile.txt`` will reveal which trace file +includes the cover statment we are looking for. If this file doesn't exist, run +the code below. + + sby fifo.sby cover + +Searching the file for ``w_underfill`` will reveal the below. + +.. code-block:: text + + $ grep "w_underfill" fifo_cover/logfile.txt -A 1 + SBY [fifo_cover] engine_0: ## Reached cover statement at w_underfill in step 2. + SBY [fifo_cover] engine_0: ## Writing trace to VCD file: engine_0/trace4.vcd + +We can then run gtkwave with the trace file indicated to see the correct +operation as in the image below. When the buffer is empty, a read with no write +will result in the ``wksip`` signal going high, incrementing *both* read and +write addresses and avoiding underflow. + + gtkwave fifo_cover/engine_0/trace4.vcd noskip.gtkw + +.. image:: media/gtkwave_coverskip.png + +.. note:: + + Implementation of the ``w_underfill`` cover statement depends on whether + Verific is used or not. See the `Concurrent assertions`_ section for more + detail. + +Exercise +******** + +Adjust the ``[script]`` section of ``fifo.sby`` so that it looks like the below. + +.. code-block:: text + + [script] + nofullskip: read -define NO_FULL_SKIP=1 + noverific: read -noverific + read -formal fifo.sv + hierarchy -check -top fifo -chparam MAX_DATA 17 + prep -top fifo + +The ``hierarchy`` command we added changes the ``MAX_DATA`` parameter of the top +module to be 17. Now run the ``basic`` task and see what happens. It should +fail and give an error like ``Assert failed in fifo: a_count_diff``. Can you +modify the verilog code so that it works with larger values of ``MAX_DATA`` +while still passing all of the tests? + +.. note:: + + If you need a **hint**, try increasing the width of the address wires. 4 bits + supports up to 2\ :sup:`4`\ =16 addresses. Are there other signals that + need to be wider? Can you make the width parameterisable to support + arbitrarily large buffers? + +Once the tests are passing with ``MAX_DATA=17``, try something bigger, like 64, +or 100. Does the ``basic`` task still pass? What about ``cover``? By default, +``bmc`` & ``cover`` modes will run to a depth of 20 cycles. If a maximum of one +value can be loaded in each cycle, how many cycles will it take to load 100 +values? Using the :ref:`.sby reference page `, +try to increase the cover mode depth to be at least a few cycles larger than the +``MAX_DATA``. + +.. note:: + + Reference files are provided in the ``fifo/golden`` directory, showing how + the verilog could have been modified and how a ``bigtest`` task could be + added. + +Concurrent assertions +********************* + +Until this point, all of the properties described have been *immediate* +assertions. As the name suggests, immediate assertions are evaluated +immediately whereas concurrent assertions allow for the capture of sequences of +events which occur across time. The use of concurrent assertions requires a +more advanced series of checks. + +Compare the difference in implementation of ``w_underfill`` depending on the +presence of Verific. ``w_underfill`` looks for a sequence of events where the +write enable is low but the write address changes in the following cycle. This +is the expected behaviour for reading while empty and implies that the +``w_skip`` signal went high. Verific enables elaboration of SystemVerilog +Assertions (SVA) properties. Here we use such a property, ``write_skip``. + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: property write_skip + :end-at: w_underfill + :dedent: + +This property describes a *sequence* of events which occurs on the ``clk`` +signal and are disabled/restarted when the ``rst`` signal is high. The property +first waits for a low ``wen`` signal, and then a change in ``waddr`` in the +following cycle. ``w_underfill`` is then a cover of this property to verify +that it is possible. Now look at the implementation without Verific. + +.. literalinclude:: ../examples/fifo/fifo.sv + :language: systemverilog + :start-at: reg past_nwen; + :end-before: end w_underfill + :dedent: + +In this case we do not have access to SVA properties and are more limited in the +tools available to us. Ideally we would use ``$past`` to read the value of +``wen`` in the previous cycle and then check for a change in ``waddr``. However, +in the first cycle of simulation, reading ``$past`` will return a value of +``X``. This results in false triggers of the property so we instead implement +the ``past_nwen`` register which we can initialise to ``0`` and ensure it does +not trigger in the first cycle. + +As verification properties become more complex and check longer sequences, the +additional effort of hand-coding without SVA properties becomes much more +difficult. Using a parser such as Verific supports these checks *without* +having to write out potentially complicated state machines. Verific is included +for use in the *Tabby CAD Suite*. + +Further information +******************* +For more information on the uses of assertions and the difference between +immediate and concurrent assertions, refer to appnote 109: `Property Checking +with SystemVerilog Assertions +`_. From 586be8ba96a547c38dbdba6fb80309de4a2237d0 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Sat, 3 Sep 2022 00:03:28 +0200 Subject: [PATCH 166/220] tests: Fix test_rules.py after sby config parser changes --- tests/make/test_rules.py | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index 64f80e18..c1b8e840 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -51,15 +51,16 @@ def parse_engine(engine): required_tools = set() - for engine in info["engines"]: - engine, solver = parse_engine(engine) - engines.add(engine) - required_tools.update( - REQUIRED_TOOLS.get((engine, solver), REQUIRED_TOOLS.get(engine, ())) - ) - if solver: - solvers.add(solver) - engine_solvers.add((engine, solver)) + for mode_engines in info["engines"].values(): + for engine in mode_engines: + engine, solver = parse_engine(engine) + engines.add(engine) + required_tools.update( + REQUIRED_TOOLS.get((engine, solver), REQUIRED_TOOLS.get(engine, ())) + ) + if solver: + solvers.add(solver) + engine_solvers.add((engine, solver)) if any( line.startswith("read -verific") or line.startswith("verific") From 326247fdef09eb9096e5d114a2f467b02f84fc24 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Sat, 3 Sep 2022 00:04:40 +0200 Subject: [PATCH 167/220] tests: Skip broken tests --- docs/examples/fifo/golden/{fifo.sby => skip_fifo.sby} | 0 docs/examples/fifo/{fifo.sby => skip_fifo.sby} | 0 2 files changed, 0 insertions(+), 0 deletions(-) rename docs/examples/fifo/golden/{fifo.sby => skip_fifo.sby} (100%) rename docs/examples/fifo/{fifo.sby => skip_fifo.sby} (100%) diff --git a/docs/examples/fifo/golden/fifo.sby b/docs/examples/fifo/golden/skip_fifo.sby similarity index 100% rename from docs/examples/fifo/golden/fifo.sby rename to docs/examples/fifo/golden/skip_fifo.sby diff --git a/docs/examples/fifo/fifo.sby b/docs/examples/fifo/skip_fifo.sby similarity index 100% rename from docs/examples/fifo/fifo.sby rename to docs/examples/fifo/skip_fifo.sby From 168d667b6dbda304aa69201e63377f9c1018a86c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 5 Sep 2022 15:42:24 +0200 Subject: [PATCH 168/220] Add vcd option to make VCD writing optional --- docs/source/reference.rst | 3 +++ sbysrc/sby_core.py | 2 ++ sbysrc/sby_engine_abc.py | 11 ++++++++--- sbysrc/sby_engine_aiger.py | 15 ++++++++++----- sbysrc/sby_engine_btor.py | 25 ++++++++++++++----------- sbysrc/sby_engine_smtbmc.py | 4 +++- tests/unsorted/no_vcd.sby | 37 +++++++++++++++++++++++++++++++++++++ 7 files changed, 77 insertions(+), 20 deletions(-) create mode 100644 tests/unsorted/no_vcd.sby diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 589f9978..29056f3f 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -146,6 +146,9 @@ options are: | | | wait for all engines to return and check for | | | | consistency. Values: ``on``, ``off``. Default: ``off`` | +------------------+------------+---------------------------------------------------------+ +| ``vcd`` | All | Write VCD traces for counter-example or cover traces. | +| | | Values: ``on``, ``off``. Default: ``on`` | ++------------------+------------+---------------------------------------------------------+ | ``aigsmt`` | All | Which SMT2 solver to use for converting AIGER witnesses | | | | to counter example traces. Use ``none`` to disable | | | | conversion of AIGER witnesses. Default: ``yices`` | diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 5b841327..3be1fc8c 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -986,6 +986,8 @@ def handle_non_engine_options(self): self.handle_bool_option("wait", False) self.handle_int_option("timeout", None) + self.handle_bool_option("vcd", True) + self.handle_str_option("smtc", None) self.handle_int_option("skip", None) self.handle_str_option("tbtop", None) diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index e96ddb89..7981b776 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -89,15 +89,20 @@ def exit_callback(retcode): task.terminate() if proc_status == "FAIL" and task.opt_aigsmt != "none": + trace_prefix = f"engine_{engine_idx}/trace" + dump_flags = f"--dump-vcd {trace_prefix}.vcd " if task.opt_vcd else "" + dump_flags += f"--dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc" + proc2 = SbyProc( task, f"engine_{engine_idx}", task.model("smt2"), - ("cd {}; {} -s {}{} --noprogress --append {} --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " + - "--dump-smtc engine_{i}/trace.smtc --aig model/design_aiger.aim:engine_{i}/trace.aiw --aig-noheader model/design_smt2.smt2").format + ("cd {}; {} -s {}{} --noprogress --append {} {dump_flags} --aig model/design_aiger.aim:engine_{i}/trace.aiw --aig-noheader model/design_smt2.smt2").format (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", - task.opt_append, i=engine_idx), + task.opt_append, + dump_flags=dump_flags, + i=engine_idx), logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index d59105a0..e7f80292 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -105,15 +105,19 @@ def exit_callback(retcode): if proc_status == "FAIL" and task.opt_aigsmt != "none": if produced_cex: + trace_prefix = f"engine_{engine_idx}/trace" + dump_flags = f"--dump-vcd {trace_prefix}.vcd " if task.opt_vcd else "" + dump_flags += f"--dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc" + if mode == "live": proc2 = SbyProc( task, f"engine_{engine_idx}", task.model("smt2"), - ("cd {}; {} -g -s {}{} --noprogress --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " + - "--dump-smtc engine_{i}/trace.smtc --dump-yw engine_{i}/trace.yw --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format + ("cd {}; {} -g -s {}{} --noprogress {dump_flags} --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", + dump_flags=dump_flags, i=engine_idx), logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) @@ -122,11 +126,12 @@ def exit_callback(retcode): task, f"engine_{engine_idx}", task.model("smt2"), - ("cd {}; {} -s {}{} --noprogress --append {} --dump-vcd engine_{i}/trace.vcd --dump-vlogtb engine_{i}/trace_tb.v " + - "--dump-smtc engine_{i}/trace.smtc --dump-yw engine_{i}/trace.yw --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format + ("cd {}; {} -s {}{} --noprogress --append {} {dump_flags} --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", - task.opt_append, i=engine_idx), + task.opt_append, + dump_flags=dump_flags, + i=engine_idx), logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index 9670a1bd..0bb4c05d 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -152,17 +152,20 @@ def output_callback(line): suffix = "" else: suffix = common_state.produced_cex - proc2 = SbyProc( - task, - f"engine_{engine_idx}_{common_state.produced_cex}", - task.model("btor"), - "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor{s}.info model/design_btor{s}.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix, s='_single' if solver_args[0] == 'pono' else ''), - logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") - ) - proc2.output_callback = output_callback2 - proc2.exit_callback = make_exit_callback(suffix) - proc2.checkretcode = True - common_state.running_procs += 1 + + if mode == "cover" or task.opt_vcd: + # TODO cover runs btorsim not only for trace generation, can we run it without VCD generation in that case? + proc2 = SbyProc( + task, + f"engine_{engine_idx}_{common_state.produced_cex}", + task.model("btor"), + "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor{s}.info model/design_btor{s}.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix, s='_single' if solver_args[0] == 'pono' else ''), + logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") + ) + proc2.output_callback = output_callback2 + proc2.exit_callback = make_exit_callback(suffix) + proc2.checkretcode = True + common_state.running_procs += 1 common_state.produced_cex += 1 common_state.wit_file.close() diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 9dd92c3c..614c8b46 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -148,11 +148,13 @@ def run(mode, task, engine_idx, engine): t_opt = "{}".format(task.opt_depth) random_seed = f"--info \"(set-option :random-seed {random_seed})\"" if random_seed else "" + dump_flags = f"--dump-vcd {trace_prefix}.vcd " if task.opt_vcd else "" + dump_flags += f"--dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc" proc = SbyProc( task, procname, task.model(model_name), - f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} --dump-vcd {trace_prefix}.vcd --dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc model/design_{model_name}.smt2""", + f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} {dump_flags} model/design_{model_name}.smt2""", logfile=open(logfile_prefix + ".txt", "w"), logstderr=(not progress) ) diff --git a/tests/unsorted/no_vcd.sby b/tests/unsorted/no_vcd.sby new file mode 100644 index 00000000..ea58b12d --- /dev/null +++ b/tests/unsorted/no_vcd.sby @@ -0,0 +1,37 @@ +[tasks] +smtbmc mode_bmc +btor_bmc engine_btor mode_bmc +btor_cover engine_btor mode_cover +abc mode_bmc +aiger engine_aiger mode_prove + +[options] +mode_bmc: mode bmc +mode_prove: mode prove +mode_cover: mode cover +vcd off +~mode_cover: expect fail + +[engines] +smtbmc: smtbmc +engine_btor: btor btormc +abc: abc bmc3 +aiger: aiger suprove + +[script] +read_verilog -formal no_vcd.sv +prep -top top + +[file no_vcd.sv] +module top(input clk, input force); + +reg [4:0] counter = 0; + +always @(posedge clk) begin + if (!counter[4] || force) + counter <= counter + 1; + assert (counter < 10); + cover (counter == 4); +end + +endmodule From 37140e7e818bc2df8c61458df8a9ba351beb7bfa Mon Sep 17 00:00:00 2001 From: KrystalDelusion Date: Wed, 7 Sep 2022 09:52:52 +1200 Subject: [PATCH 169/220] Fixing golden/fifo.sv Had missed a change from (non golden) fifo.sv. --- docs/examples/fifo/{skip_fifo.sby => fifo.sby} | 0 docs/examples/fifo/golden/{skip_fifo.sby => fifo.sby} | 0 docs/examples/fifo/golden/fifo.sv | 1 + 3 files changed, 1 insertion(+) rename docs/examples/fifo/{skip_fifo.sby => fifo.sby} (100%) rename docs/examples/fifo/golden/{skip_fifo.sby => fifo.sby} (100%) diff --git a/docs/examples/fifo/skip_fifo.sby b/docs/examples/fifo/fifo.sby similarity index 100% rename from docs/examples/fifo/skip_fifo.sby rename to docs/examples/fifo/fifo.sby diff --git a/docs/examples/fifo/golden/skip_fifo.sby b/docs/examples/fifo/golden/fifo.sby similarity index 100% rename from docs/examples/fifo/golden/skip_fifo.sby rename to docs/examples/fifo/golden/fifo.sby diff --git a/docs/examples/fifo/golden/fifo.sv b/docs/examples/fifo/golden/fifo.sv index 1d44dae7..d5ceadc1 100644 --- a/docs/examples/fifo/golden/fifo.sv +++ b/docs/examples/fifo/golden/fifo.sv @@ -161,6 +161,7 @@ module fifo // look for an overfill where the value in memory changes // the change in data makes certain that the value is overriden + let d_change = (wdata != rdata); property read_skip; @(posedge clk) disable iff (rst) !ren && d_change |=> $changed(raddr); From a0e3dd3d9a68f2a33087862aa04830c3241a7b5d Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 15 Sep 2022 15:47:27 +0200 Subject: [PATCH 170/220] Fix engine_list's return value This fixes #216 --- sbysrc/sby_core.py | 22 +++++++++++++-------- sbysrc/sby_mode_bmc.py | 16 ++-------------- sbysrc/sby_mode_cover.py | 15 ++------------- sbysrc/sby_mode_live.py | 15 ++------------- sbysrc/sby_mode_prove.py | 15 ++------------- tests/regression/option_skip.sby | 33 ++++++++++++++++++++++++++++++++ 6 files changed, 55 insertions(+), 61 deletions(-) create mode 100644 tests/regression/option_skip.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 5b841327..aed3e04b 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -305,7 +305,9 @@ def parse_config(self, f): if section == "engines": mode = "engines" - if args is not None: + if args is None: + engine_mode = None + else: section_args = args.split() if len(section_args) > 1: @@ -314,11 +316,16 @@ def parse_config(self, f): if section_args[0] not in ("bmc", "prove", "cover", "live"): self.error(f"sby file syntax error: Expected one of 'bmc', 'prove', 'cover', 'live' as '[engines]' argument, got '{section_args[0]}'") - if section_args[0] in self.engines: - self.error(f"Already defined engine block for mode '{section_args[0]}'") + engine_mode = section_args[0] + + if engine_mode in self.engines: + if engine_mode is None: + self.error(f"Already defined engine block") else: - self.engines[section_args[0]] = list() - engine_mode = section_args[0] + self.error(f"Already defined engine block for mode '{engine_mode}'") + else: + self.engines[engine_mode] = list() + continue if section == "setup": @@ -415,8 +422,6 @@ def parse_config(self, f): if mode == "engines": args = line.strip().split() - if engine_mode not in self.engines: - self.engines[engine_mode] = list() self.engines[engine_mode].append(args) continue @@ -642,7 +647,8 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logf print(line, file=f) def engine_list(self): - return list(enumerate(self.engines.items())) + engines = self.engines.get(None, []) + self.engines.get(self.opt_mode, []) + return list(enumerate(engines)) def check_timeout(self): if self.opt_timeout is not None: diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index 2613efab..399f2676 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -24,20 +24,8 @@ def run(task): task.handle_int_option("append", 0) task.handle_str_option("aigsmt", "yices") - for engine_idx, engine_section in task.engine_list(): - if isinstance(engine_section, list): - engine = engine_section - engine_name = None - else: - assert len(engine_section[1]) > 0 - engine = engine_section[1][0] - engine_name = engine_section[0] - - if engine_name is None: - engine_name = engine_idx - - - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") + for engine_idx, engine in task.engine_list(): + task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index 61d0b079..02b586f3 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -23,19 +23,8 @@ def run(task): task.handle_int_option("depth", 20) task.handle_int_option("append", 0) - for engine_idx, engine_section in task.engine_list(): - if isinstance(engine_section, list): - engine = engine_section - engine_name = None - else: - assert len(engine_section[1]) > 0 - engine = engine_section[1][0] - engine_name = engine_section[0] - - if engine_name is None: - engine_name = engine_idx - - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") + for engine_idx, engine in task.engine_list(): + task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index c624ec5a..437fe8db 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -24,19 +24,8 @@ def run(task): task.status = "UNKNOWN" - for engine_idx, engine_section in task.engine_list(): - if isinstance(engine_section, list): - engine = engine_section - engine_name = None - else: - assert len(engine_section[1]) > 0 - engine = engine_section[1][0] - engine_name = engine_section[0] - - if engine_name is None: - engine_name = engine_idx - - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") + for engine_idx, engine in task.engine_list(): + task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "aiger": diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index f3dc1b7d..1118232b 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -31,19 +31,8 @@ def run(task): task.basecase_procs = list() task.induction_procs = list() - for engine_idx, engine_section in task.engine_list(): - if isinstance(engine_section, list): - engine = engine_section - engine_name = None - else: - assert len(engine_section[1]) > 0 - engine = engine_section[1][0] - engine_name = engine_section[0] - - if engine_name is None: - engine_name = engine_idx - - task.log(f"""engine_{engine_name}: {" ".join(engine)}""") + for engine_idx, engine in task.engine_list(): + task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/tests/regression/option_skip.sby b/tests/regression/option_skip.sby new file mode 100644 index 00000000..75a2bd54 --- /dev/null +++ b/tests/regression/option_skip.sby @@ -0,0 +1,33 @@ +[tasks] +smtbmc_pass: smtbmc pass +smtbmc_fail: smtbmc fail +btormc_pass: btormc pass +btormc_fail: btormc fail + +[options] +mode bmc +pass: expect pass +fail: expect fail +pass: depth 5 +fail: depth 6 + +skip 2 + +[engines] +smtbmc: smtbmc boolector +[engines bmc] +btormc: btor btormc + +[script] +read -formal top.sv +prep -top top + +[file top.sv] +module top(input clk); + reg [7:0] counter = 0; + + always @(posedge clk) begin + counter <= counter + 1; + assert (counter < 4); + end +endmodule From 90616c280b19f4e20b513e4da6e33ae4d338cad8 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 20 Oct 2022 13:57:37 +0200 Subject: [PATCH 171/220] tests: Do not run the same SBY task multiple times in parallel --- docs/examples/fifo/fifo.sh | 13 ------------- docs/examples/fifo/fifo_extra_tests.sby | 18 ++++++++++++++++++ tests/make/test_rules.py | 3 ++- 3 files changed, 20 insertions(+), 14 deletions(-) delete mode 100644 docs/examples/fifo/fifo.sh create mode 100644 docs/examples/fifo/fifo_extra_tests.sby diff --git a/docs/examples/fifo/fifo.sh b/docs/examples/fifo/fifo.sh deleted file mode 100644 index 10242228..00000000 --- a/docs/examples/fifo/fifo.sh +++ /dev/null @@ -1,13 +0,0 @@ -#!/bin/bash - -python3 $SBY_MAIN -f fifo.sby basic cover noverific - -if [[ $? -ne 0 ]] ; then - exit 1 -fi - -python3 $SBY_MAIN -f fifo.sby nofullskip - -if [[ $? -ne 2 ]] ; then - exit 1 -fi diff --git a/docs/examples/fifo/fifo_extra_tests.sby b/docs/examples/fifo/fifo_extra_tests.sby new file mode 100644 index 00000000..183def50 --- /dev/null +++ b/docs/examples/fifo/fifo_extra_tests.sby @@ -0,0 +1,18 @@ +--pycode-begin-- +# This is for our test infrastructure and not part of the example + +# Read fifo.sby and patch it on the fly: +for line in open("fifo.sby"): + line = line.rstrip() + + # change the tasks to run as tests + if line.endswith(": default"): + line = "nofullskip noverific : default" + + output(line) + + # add expect fail to the failing tasks + if line == "[options]": + output("nofullskip: expect fail") + +--pycode-end-- diff --git a/tests/make/test_rules.py b/tests/make/test_rules.py index c1b8e840..8e91bfd0 100644 --- a/tests/make/test_rules.py +++ b/tests/make/test_rules.py @@ -18,7 +18,8 @@ def unix_path(path): taskinfo = json.loads( subprocess.check_output( - [sys.executable, os.getenv("SBY_MAIN"), "--dumptaskinfo", sby_file] + [sys.executable, os.getenv("SBY_MAIN"), "--dumptaskinfo", sby_file.name], + cwd=sby_dir, ) ) From 966bdae1f6f5964a560fb46a730f1c3d6486b6fb Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 20 Oct 2022 14:31:57 +0200 Subject: [PATCH 172/220] aigbmc: Convert aiw trace to yw trace and load that into smtbmc This handles more edge cases concerning FF initialization, memories and hierarchy. --- sbysrc/sby.py | 2 ++ sbysrc/sby_core.py | 1 + sbysrc/sby_engine_abc.py | 25 ++++++++++++------- sbysrc/sby_engine_aiger.py | 50 ++++++++++++++++++-------------------- 4 files changed, 43 insertions(+), 35 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 63c9f339..14036582 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -65,6 +65,8 @@ def __call__(self, parser, namespace, values, option_string=None): action=DictAction, dest="exe_paths") parser.add_argument("--smtbmc", metavar="", action=DictAction, dest="exe_paths") +parser.add_argument("--witness", metavar="", + action=DictAction, dest="exe_paths") parser.add_argument("--suprove", metavar="", action=DictAction, dest="exe_paths") parser.add_argument("--aigbmc", metavar="", diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 38a31e35..48da97b8 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -614,6 +614,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logf "yosys": os.getenv("YOSYS", yosys_program_prefix + "yosys"), "abc": os.getenv("ABC", yosys_program_prefix + "yosys-abc"), "smtbmc": os.getenv("SMTBMC", yosys_program_prefix + "yosys-smtbmc"), + "witness": os.getenv("WITNESS", yosys_program_prefix + "yosys-witness"), "suprove": os.getenv("SUPROVE", "suprove"), "aigbmc": os.getenv("AIGBMC", "aigbmc"), "avy": os.getenv("AVY", "avy"), diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 7981b776..ace68a06 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -90,19 +90,26 @@ def exit_callback(retcode): if proc_status == "FAIL" and task.opt_aigsmt != "none": trace_prefix = f"engine_{engine_idx}/trace" - dump_flags = f"--dump-vcd {trace_prefix}.vcd " if task.opt_vcd else "" - dump_flags += f"--dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc" + + smtbmc_opts = [] + smtbmc_opts += ["-s", task.opt_aigsmt] + if task.opt_tbtop is not None: + smtbmc_opts += ["--vlogtb-top", task.opt_tbtop] + smtbmc_opts += ["--noprogress", f"--append {task.opt_append}"] + if task.opt_vcd: + smtbmc_opts += [f"--dump-vcd {trace_prefix}.vcd"] + smtbmc_opts += [f"--dump-yw {trace_prefix}.yw", f"--dump-vlogtb {trace_prefix}_tb.v", f"--dump-smtc {trace_prefix}.smtc"] + + witness_proc = SbyProc( + task, f"engine_{engine_idx}", [], + f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/trace.aiw model/design_aiger.ywa engine_{engine_idx}/trace.yw", + ) proc2 = SbyProc( task, f"engine_{engine_idx}", - task.model("smt2"), - ("cd {}; {} -s {}{} --noprogress --append {} {dump_flags} --aig model/design_aiger.aim:engine_{i}/trace.aiw --aig-noheader model/design_smt2.smt2").format - (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, - "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", - task.opt_append, - dump_flags=dump_flags, - i=engine_idx), + [*task.model("smt2"), witness_proc], + f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/trace.yw model/design_smt2.smt2", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index e7f80292..293b0cbb 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -106,34 +106,32 @@ def exit_callback(retcode): if proc_status == "FAIL" and task.opt_aigsmt != "none": if produced_cex: trace_prefix = f"engine_{engine_idx}/trace" - dump_flags = f"--dump-vcd {trace_prefix}.vcd " if task.opt_vcd else "" - dump_flags += f"--dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc" + smtbmc_opts = [] if mode == "live": - proc2 = SbyProc( - task, - f"engine_{engine_idx}", - task.model("smt2"), - ("cd {}; {} -g -s {}{} --noprogress {dump_flags} --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format - (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, - "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", - dump_flags=dump_flags, - i=engine_idx), - logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") - ) - else: - proc2 = SbyProc( - task, - f"engine_{engine_idx}", - task.model("smt2"), - ("cd {}; {} -s {}{} --noprogress --append {} {dump_flags} --aig model/design_aiger.aim:engine_{i}/trace.aiw model/design_smt2.smt2").format - (task.workdir, task.exe_paths["smtbmc"], task.opt_aigsmt, - "" if task.opt_tbtop is None else f" --vlogtb-top {task.opt_tbtop}", - task.opt_append, - dump_flags=dump_flags, - i=engine_idx), - logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") - ) + smtbmc_opts += ["-g"] + smtbmc_opts += ["-s", task.opt_aigsmt] + if task.opt_tbtop is not None: + smtbmc_opts += ["--vlogtb-top", task.opt_tbtop] + smtbmc_opts += ["--noprogress"] + if mode != "live": + smtbmc_opts += [f"--append {task.opt_append}"] + if task.opt_vcd: + smtbmc_opts += [f"--dump-vcd {trace_prefix}.vcd"] + smtbmc_opts += [f"--dump-yw {trace_prefix}.yw", f"--dump-vlogtb {trace_prefix}_tb.v", f"--dump-smtc {trace_prefix}.smtc"] + + witness_proc = SbyProc( + task, f"engine_{engine_idx}", [], + f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/trace.aiw model/design_aiger.ywa engine_{engine_idx}/trace.yw", + ) + + proc2 = SbyProc( + task, + f"engine_{engine_idx}", + [*task.model("smt2"), witness_proc], + f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/trace.yw model/design_smt2.smt2", + logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") + ) proc2_status = None From 50bdc76fe3c0655c8b2fd2122393b193162020ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miodrag=20Milanovi=C4=87?= Date: Mon, 24 Oct 2022 08:31:45 +0200 Subject: [PATCH 173/220] Update CI script --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index ea48d06b..fcbe8189 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,7 +6,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 + - uses: actions/checkout@v3 - uses: YosysHQ/setup-oss-cad-suite@v1 - name: Run checks run: tabbypip install xmlschema && make ci From 003ccf7197a11fdd4d3b107b643af1a4e1810155 Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Mon, 31 Oct 2022 20:29:32 +0100 Subject: [PATCH 174/220] Add color handling via click.style and click.echo Signed-off-by: Claire Xenia Wolf --- sbysrc/sby_core.py | 46 +++++++++++++++++++++++-------------- sbysrc/sby_engine_abc.py | 4 ++-- sbysrc/sby_engine_aiger.py | 6 ++--- sbysrc/sby_engine_btor.py | 6 ++--- sbysrc/sby_engine_smtbmc.py | 6 ++--- 5 files changed, 40 insertions(+), 28 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 48da97b8..98ddaf07 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import os, re, sys, signal, platform +import os, re, sys, signal, platform, click if os.name == "posix": import resource, fcntl import subprocess @@ -28,7 +28,7 @@ all_procs_running = [] def force_shutdown(signum, frame): - print("SBY ---- Keyboard interrupt or external termination signal ----", flush=True) + click.echo("SBY ---- Keyboard interrupt or external termination signal ----") for proc in list(all_procs_running): proc.terminate() sys.exit(1) @@ -105,8 +105,8 @@ def register_dep(self, next_proc): def log(self, line): if line is not None and (self.noprintregex is None or not self.noprintregex.match(line)): if self.logfile is not None: - print(line, file=self.logfile) - self.task.log(f"{self.info}: {line}") + click.echo(line, file=self.logfile) + self.task.log(f"{click.style(self.info, fg='magenta')}: {line}") def handle_output(self, line): if self.terminated or len(line) == 0: @@ -136,7 +136,7 @@ def terminate(self, timeout=False): return if self.running: if not self.silent: - self.task.log(f"{self.info}: terminating process") + self.task.log(f"{click.style(self.info, fg='magenta')}: terminating process") if os.name == "posix": try: os.killpg(self.p.pid, signal.SIGTERM) @@ -171,7 +171,7 @@ def poll(self, force_unchecked=False): return if not self.silent: - self.task.log(f"{self.info}: starting process \"{self.cmdline}\"") + self.task.log(f"{click.style(self.info, fg='magenta')}: starting process \"{self.cmdline}\"") if os.name == "posix": def preexec_fn(): @@ -202,7 +202,7 @@ def preexec_fn(): self.job_lease.done() if not self.silent: - self.task.log(f"{self.info}: finished (returncode={self.p.returncode})") + self.task.log(f"{click.style(self.info, fg='magenta')}: finished (returncode={self.p.returncode})") self.task.update_proc_stopped(self) self.running = False @@ -218,7 +218,7 @@ def preexec_fn(): if returncode == 127: if not self.silent: - self.task.log(f"{self.info}: COMMAND NOT FOUND. ERROR.") + self.task.log(f"{click.style(self.info, fg='magenta')}: COMMAND NOT FOUND. ERROR.") self.handle_error(returncode) self.terminated = True self.task.proc_failed(self) @@ -226,7 +226,7 @@ def preexec_fn(): if self.checkretcode and returncode not in self.retcodes: if not self.silent: - self.task.log(f"{self.info}: task failed. ERROR.") + self.task.log(f"{click.style(self.info, fg='magenta')}: task failed. ERROR.") self.handle_error(returncode) self.terminated = True self.task.proc_failed(self) @@ -640,12 +640,12 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logf self.log_targets = [sys.stdout, self.logfile] for line in early_logs: - print(line, file=self.logfile, flush=True) + click.echo(line, file=self.logfile) if not reusedir: with open(f"{workdir}/config.sby", "w") as f: for line in sbyconfig: - print(line, file=f) + click.echo(line, file=f) def engine_list(self): engines = self.engines.get(None, []) + self.engines.get(self.opt_mode, []) @@ -680,15 +680,24 @@ def update_proc_canceled(self, proc): self.procs_pending.remove(proc) self.taskloop.procs_pending.remove(proc) + def dress_message(self, logmessage): + tm = localtime() + return " ".join([ + click.style("SBY", fg="blue"), + click.style("{:2d}:{:02d}:{:02d}".format(tm.tm_hour, tm.tm_min, tm.tm_sec), fg="green"), + "[" + click.style(self.workdir, fg="blue") + "]", + logmessage + ]) + def log(self, logmessage): tm = localtime() - line = "SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, self.workdir, logmessage) + line = self.dress_message(logmessage) for target in self.log_targets: - print(line, file=target, flush=True) + click.echo(line, file=target) def error(self, logmessage): tm = localtime() - self.log(f"ERROR: {logmessage}") + self.log(click.style(f"ERROR: {logmessage}", fg="red", bold=True)) self.status = "ERROR" if "ERROR" not in self.expect: self.retcode = 16 @@ -696,7 +705,7 @@ def error(self, logmessage): self.retcode = 0 self.terminate() with open(f"{self.workdir}/{self.status}", "w") as f: - print(f"ERROR: {logmessage}", file=f) + click.echo(f"ERROR: {logmessage}", file=f) raise SbyAbort(logmessage) def makedirs(self, path): @@ -1082,7 +1091,10 @@ def summarize(self): ] + self.summary for line in self.summary: - self.log(f"summary: {line}") + if line.startswith("Elapsed"): + self.log(f"summary: {line}") + else: + self.log("summary: " + click.style(line, fg="green" if self.status in self.expect else "red", bold=True)) assert self.status in ["PASS", "FAIL", "UNKNOWN", "ERROR", "TIMEOUT"] @@ -1098,7 +1110,7 @@ def summarize(self): def write_summary_file(self): with open(f"{self.workdir}/{self.status}", "w") as f: for line in self.summary: - print(line, file=f) + click.echo(line, file=f) def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): junit_time = strftime('%Y-%m-%dT%H:%M:%S') diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index ace68a06..47224d88 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(mode, task, engine_idx, engine): @@ -83,7 +83,7 @@ def exit_callback(retcode): task.error(f"engine_{engine_idx}: Could not determine engine status.") task.update_status(proc_status) - task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status}") task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""") task.terminate() diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index 293b0cbb..18cb876c 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(mode, task, engine_idx, engine): @@ -98,7 +98,7 @@ def exit_callback(retcode): aiw_file.close() task.update_status(proc_status) - task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status}") task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""") task.terminate() @@ -159,7 +159,7 @@ def exit_callback2(line): proc2.exit_callback = exit_callback2 else: - task.log(f"engine_{engine_idx}: Engine did not produce a counter example.") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Engine did not produce a counter example.") proc.output_callback = output_callback proc.exit_callback = exit_callback diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index 0bb4c05d..284a49e8 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from types import SimpleNamespace from sby_core import SbyProc @@ -87,11 +87,11 @@ def print_traces_and_terminate(): task.error(f"engine_{engine_idx}: Engine terminated without status.") task.update_status(proc_status.upper()) - task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status}") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status}") task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""") if len(common_state.produced_traces) == 0: - task.log(f"""engine_{engine_idx}: Engine did not produce a{" counter" if mode != "cover" else "n "}example.""") + task.log(f"""{click.style(f'engine_{engine_idx}', fg='magenta')}: Engine did not produce a{" counter" if mode != "cover" else "n "}example.""") elif len(common_state.produced_traces) <= common_state.print_traces_max: task.summary.extend(common_state.produced_traces) else: diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 614c8b46..6c192680 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(mode, task, engine_idx, engine): @@ -232,7 +232,7 @@ def exit_callback(retcode): if mode == "bmc" or mode == "cover": task.update_status(proc_status) proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status - task.log(f"engine_{engine_idx}: Status returned by engine: {proc_status_lower}") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status_lower}") task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status_lower}""") if proc_status == "FAIL" and mode != "cover": @@ -260,7 +260,7 @@ def exit_callback(retcode): elif mode in ["prove_basecase", "prove_induction"]: proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status - task.log(f"""engine_{engine_idx}: Status returned by engine for {mode.split("_")[1]}: {proc_status_lower}""") + task.log(f"""{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine for {mode.split("_")[1]}: {proc_status_lower}""") task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status_lower} for {mode.split("_")[1]}""") if mode == "prove_basecase": From e8d713cc27c7f88f2d14a6f3696dbb445da39816 Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Wed, 2 Nov 2022 12:35:11 +0100 Subject: [PATCH 175/220] Add colors to early and late log messages Signed-off-by: Claire Xenia Wolf --- sbysrc/sby.py | 12 +++++------- sbysrc/sby_core.py | 21 +++++++++++---------- 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 14036582..5052128e 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -19,9 +19,9 @@ import argparse, json, os, sys, shutil, tempfile, re ##yosys-sys-path## -from sby_core import SbyConfig, SbyTask, SbyAbort, SbyTaskloop, process_filename +from sby_core import SbyConfig, SbyTask, SbyAbort, SbyTaskloop, process_filename, dress_message from sby_jobserver import SbyJobClient, process_jobserver_environment -import time, platform +import time, platform, click process_jobserver_environment() # needs to be called early @@ -177,9 +177,8 @@ def __call__(self, parser, namespace, values, option_string=None): early_logmsgs = list() def early_log(workdir, msg): - tm = time.localtime() - early_logmsgs.append("SBY {:2d}:{:02d}:{:02d} [{}] {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, workdir, msg)) - print(early_logmsgs[-1]) + early_logmsgs.append(dress_message(workdir, msg)) + click.echo(early_logmsgs[-1]) def read_sbyconfig(sbydata, taskname): cfgdata = list() @@ -567,7 +566,6 @@ def exit_callback(): failed.append(taskname) if failed and (len(tasknames) > 1 or tasknames[0] is not None): - tm = time.localtime() - print("SBY {:2d}:{:02d}:{:02d} The following tasks failed: {}".format(tm.tm_hour, tm.tm_min, tm.tm_sec, failed)) + click.echo(dress_message(None, click.style(f"The following tasks failed: {failed}", fg="red", bold=True))) sys.exit(retcode) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 98ddaf07..317c9589 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -46,6 +46,16 @@ def process_filename(filename): return filename +def dress_message(workdir, logmessage): + tm = localtime() + if workdir is not None: + logmessage = "[" + click.style(workdir, fg="blue") + "] " + logmessage + return " ".join([ + click.style("SBY", fg="blue"), + click.style("{:2d}:{:02d}:{:02d}".format(tm.tm_hour, tm.tm_min, tm.tm_sec), fg="green"), + logmessage + ]) + class SbyProc: def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, silent=False): self.running = False @@ -680,18 +690,9 @@ def update_proc_canceled(self, proc): self.procs_pending.remove(proc) self.taskloop.procs_pending.remove(proc) - def dress_message(self, logmessage): - tm = localtime() - return " ".join([ - click.style("SBY", fg="blue"), - click.style("{:2d}:{:02d}:{:02d}".format(tm.tm_hour, tm.tm_min, tm.tm_sec), fg="green"), - "[" + click.style(self.workdir, fg="blue") + "]", - logmessage - ]) - def log(self, logmessage): tm = localtime() - line = self.dress_message(logmessage) + line = dress_message(self.workdir, logmessage) for target in self.log_targets: click.echo(line, file=target) From c29a5bbe8a3c0e2ee7ca6fda7d3a9e0b73d2af48 Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Thu, 24 Nov 2022 18:12:22 +0100 Subject: [PATCH 176/220] Add colors to engine header message Signed-off-by: Claire Xenia Wolf --- sbysrc/sby_mode_bmc.py | 4 ++-- sbysrc/sby_mode_cover.py | 4 ++-- sbysrc/sby_mode_live.py | 4 ++-- sbysrc/sby_mode_prove.py | 4 ++-- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index 399f2676..9ba624cd 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(task): @@ -25,7 +25,7 @@ def run(task): task.handle_str_option("aigsmt", "yices") for engine_idx, engine in task.engine_list(): - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: {' '.join(engine)}") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index 02b586f3..3a5fbe91 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(task): @@ -24,7 +24,7 @@ def run(task): task.handle_int_option("append", 0) for engine_idx, engine in task.engine_list(): - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: {' '.join(engine)}") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": diff --git a/sbysrc/sby_mode_live.py b/sbysrc/sby_mode_live.py index 437fe8db..89bcc577 100644 --- a/sbysrc/sby_mode_live.py +++ b/sbysrc/sby_mode_live.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(task): @@ -25,7 +25,7 @@ def run(task): task.status = "UNKNOWN" for engine_idx, engine in task.engine_list(): - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: {' '.join(engine)}") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "aiger": diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index 1118232b..b289f314 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -16,7 +16,7 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt +import re, os, getopt, click from sby_core import SbyProc def run(task): @@ -32,7 +32,7 @@ def run(task): task.induction_procs = list() for engine_idx, engine in task.engine_list(): - task.log(f"""engine_{engine_idx}: {" ".join(engine)}""") + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: {' '.join(engine)}") task.makedirs(f"{task.workdir}/engine_{engine_idx}") if engine[0] == "smtbmc": From 19109fd9f27e98ae7e18e608d229198bb006d0c8 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 9 Dec 2022 16:24:22 +0100 Subject: [PATCH 177/220] jobserver: Only poll non-helper-process jobserver fd when ready to read This avoids SBY going into a busy wait loop in that case. --- sbysrc/sby_jobserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_jobserver.py b/sbysrc/sby_jobserver.py index 104bcc35..7007c511 100644 --- a/sbysrc/sby_jobserver.py +++ b/sbysrc/sby_jobserver.py @@ -287,7 +287,7 @@ def has_pending_leases(self): def poll_fds(self): if self.helper_process: return [self.response_read_fd] - elif self.read_fd is not None: + elif self.read_fd is not None and self.has_pending_leases(): return [self.read_fd] else: return [] From beb8b3c6e38ee716cd9771eb906c37684e83eab4 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 19 Dec 2022 16:49:37 +0100 Subject: [PATCH 178/220] Do not use fstring syntax that requires Python 3.8 While we most likely will require Python 3.8 going forward, this might restore Python 3.6 compatibility until we update the installation guide. --- sbysrc/sby_jobserver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_jobserver.py b/sbysrc/sby_jobserver.py index 7007c511..b7600079 100644 --- a/sbysrc/sby_jobserver.py +++ b/sbysrc/sby_jobserver.py @@ -125,7 +125,7 @@ def done(self): self.is_done = True def __repr__(self): - return f"{self.is_ready=} {self.is_done=}" + return f"is_ready={self.is_ready} is_done={self.is_done}" def __del__(self): self.done() From 6d3b5aa9602ff130f21a9513c3b079346d67b9e0 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 10 Jan 2023 15:33:18 +0100 Subject: [PATCH 179/220] Unified trace generation using yosys's sim across all engines Currently opt-in using the `fst` or `vcd_sim` options. --- sbysrc/sby_core.py | 240 ++++++++++++++++++++++-- sbysrc/sby_design.py | 88 ++++++++- sbysrc/sby_engine_abc.py | 80 ++------ sbysrc/sby_engine_aiger.py | 78 +++++--- sbysrc/sby_engine_btor.py | 70 ++++--- sbysrc/sby_engine_smtbmc.py | 124 +++++++----- sbysrc/sby_mode_bmc.py | 1 - sbysrc/sby_mode_cover.py | 1 - sbysrc/sby_mode_prove.py | 1 - sbysrc/sby_sim.py | 96 ++++++++++ tests/keepgoing/keepgoing_multi_step.py | 4 +- tests/keepgoing/keepgoing_same_step.py | 2 +- tests/keepgoing/keepgoing_smtc.py | 2 +- tests/unsorted/btor_meminit.sby | 48 +++++ tests/unsorted/cover_unreachable.sby | 34 ++++ 15 files changed, 686 insertions(+), 183 deletions(-) create mode 100644 sbysrc/sby_sim.py create mode 100644 tests/unsorted/btor_meminit.sby create mode 100644 tests/unsorted/cover_unreachable.sby diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 317c9589..c91561f4 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -20,6 +20,9 @@ if os.name == "posix": import resource, fcntl import subprocess +from dataclasses import dataclass, field +from collections import defaultdict +from typing import Optional from shutil import copyfile, copytree, rmtree from select import select from time import monotonic, localtime, sleep, strftime @@ -100,7 +103,7 @@ def __init__(self, task, info, deps, cmdline, logfile=None, logstderr=True, sile dep.register_dep(self) self.output_callback = None - self.exit_callback = None + self.exit_callbacks = [] self.error_callback = None if self.task.timeout_reached: @@ -112,6 +115,9 @@ def register_dep(self, next_proc): else: self.notify.append(next_proc) + def register_exit_callback(self, callback): + self.exit_callbacks.append(callback) + def log(self, line): if line is not None and (self.noprintregex is None or not self.noprintregex.match(line)): if self.logfile is not None: @@ -130,8 +136,8 @@ def handle_exit(self, retcode): return if self.logfile is not None: self.logfile.close() - if self.exit_callback is not None: - self.exit_callback(retcode) + for callback in self.exit_callbacks: + callback(retcode) def handle_error(self, retcode): if self.terminated: @@ -602,6 +608,199 @@ def run(self): for task in self.tasks: task.exit_callback() +@dataclass +class SbySummaryEvent: + engine_idx: int + trace: Optional[str] = field(default=None) + path: Optional[str] = field(default=None) + hdlname: Optional[str] = field(default=None) + type: Optional[str] = field(default=None) + src: Optional[str] = field(default=None) + step: Optional[int] = field(default=None) + prop: Optional[SbyProperty] = field(default=None) + engine_case: Optional[str] = field(default=None) + + @property + def engine(self): + return f"engine_{self.engine_idx}" + +@dataclass +class SbyTraceSummary: + trace: str + path: Optional[str] = field(default=None) + engine_case: Optional[str] = field(default=None) + events: dict = field(default_factory=lambda: defaultdict(lambda: defaultdict(list))) + + @property + def kind(self): + if '$assert' in self.events: + kind = 'counterexample trace' + elif '$cover' in self.events: + kind = 'cover trace' + else: + kind = 'trace' + return kind + +@dataclass +class SbyEngineSummary: + engine_idx: int + traces: dict = field(default_factory=dict) + status: Optional[str] = field(default=None) + unreached_covers: Optional[list] = field(default=None) + + @property + def engine(self): + return f"engine_{self.engine_idx}" + +class SbySummary: + def __init__(self, task): + self.task = task + self.timing = [] + self.lines = [] + + self.engine_summaries = {} + self.traces = defaultdict(dict) + self.engine_status = {} + self.unreached_covers = None + + def append(self, line): + self.lines.append(line) + + def extend(self, lines): + self.lines.extend(lines) + + def engine_summary(self, engine_idx): + if engine_idx not in self.engine_summaries: + self.engine_summaries[engine_idx] = SbyEngineSummary(engine_idx) + return self.engine_summaries[engine_idx] + + def add_event(self, *args, **kwargs): + event = SbySummaryEvent(*args, **kwargs) + if event.prop: + if event.type == "$assert": + event.prop.status = "FAIL" + if event.path: + event.prop.tracefiles.append(event.path) + if event.prop: + if event.type == "$cover": + event.prop.status = "PASS" + if event.path: + event.prop.tracefiles.append(event.path) + + engine = self.engine_summary(event.engine_idx) + + if event.trace not in engine.traces: + engine.traces[event.trace] = SbyTraceSummary(event.trace, path=event.path, engine_case=event.engine_case) + + if event.type: + by_type = engine.traces[event.trace].events[event.type] + if event.hdlname: + by_type[event.hdlname].append(event) + + def set_engine_status(self, engine_idx, status, case=None): + engine_summary = self.engine_summary(engine_idx) + if case is None: + self.task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {status}") + self.engine_summary(engine_idx).status = status + else: + self.task.log(f"{click.style(f'engine_{engine_idx}.{case}', fg='magenta')}: Status returned by engine for {case}: {status}") + if engine_summary.status is None: + engine_summary.status = {} + engine_summary.status[case] = status + + def summarize(self, short): + omitted_excess = False + for line in self.timing: + yield line + + for engine_idx, engine_cmd in self.task.engine_list(): + engine_cmd = ' '.join(engine_cmd) + trace_limit = 5 + prop_limit = 5 + step_limit = 5 + engine = self.engine_summary(engine_idx) + if isinstance(engine.status, dict): + for case, status in sorted(engine.status.items()): + yield f"{engine.engine} ({engine_cmd}) returned {status} for {case}" + elif engine.status: + yield f"{engine.engine} ({engine_cmd}) returned {engine.status}" + else: + yield f"{engine.engine} ({engine_cmd}) did not return a status" + + produced_traces = False + + for i, (trace_name, trace) in enumerate(sorted(engine.traces.items())): + if short and i == trace_limit: + excess = len(engine.traces) - trace_limit + omitted_excess = True + yield f"and {excess} further trace{'s' if excess != 1 else ''}" + break + case_suffix = f" [{trace.engine_case}]" if trace.engine_case else "" + if trace.path: + if short: + yield f"{trace.kind}{case_suffix}: {self.task.workdir}/{trace.path}" + else: + yield f"{trace.kind}{case_suffix}: {trace.path}" + else: + yield f"{trace.kind}{case_suffix}: <{trace.trace}>" + produced_traces = True + for event_type, events in sorted(trace.events.items()): + if event_type == '$assert': + desc = "failed assertion" + short_desc = 'assertion' + elif event_type == '$cover': + desc = "reached cover statement" + short_desc = 'cover statement' + elif event_type == '$assume': + desc = "violated assumption" + short_desc = 'assumption' + else: + continue + for j, (hdlname, same_events) in enumerate(sorted(events.items())): + if short and j == prop_limit: + excess = len(events) - prop_limit + yield f" and {excess} further {short_desc}{'s' if excess != 1 else ''}" + break + + event = same_events[0] + steps = sorted(e.step for e in same_events) + if short and len(steps) > step_limit: + steps = [str(step) for step in steps[:step_limit]] + excess = len(steps) - step_limit + omitted_excess = True + steps[-1] += f" and {excess} further step{'s' if excess != 1 else ''}" + + steps = f"step{'s' if len(steps) > 1 else ''} {', '.join(map(str, steps))}" + yield f" {desc} {event.hdlname} at {event.src} in {steps}" + + if not produced_traces: + yield f"{engine.engine} did not produce any traces" + + if self.unreached_covers is None and self.task.opt_mode == 'cover' and self.task.status != "PASS" and self.task.design: + self.unreached_covers = [] + for prop in self.task.design.hierarchy: + if prop.type == prop.Type.COVER and prop.status == "UNKNOWN": + self.unreached_covers.append(prop) + + if self.unreached_covers: + yield f"unreached cover statements:" + for j, prop in enumerate(self.unreached_covers): + if short and j == prop_limit: + excess = len(self.unreached_covers) - prop_limit + omitted_excess = True + yield f" and {excess} further propert{'ies' if excess != 1 else 'y'}" + break + yield f" {prop.hdlname} at {prop.location}" + + for line in self.lines: + yield line + + if omitted_excess: + yield f"see {self.task.workdir}/{self.task.status} for a complete summary" + def __iter__(self): + yield from self.summarize(True) + + class SbyTask(SbyConfig): def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logfile=None): @@ -644,7 +843,7 @@ def __init__(self, sbyconfig, workdir, early_logs, reusedir, taskloop=None, logf ru = resource.getrusage(resource.RUSAGE_CHILDREN) self.start_process_time = ru.ru_utime + ru.ru_stime - self.summary = list() + self.summary = SbySummary(self) self.logfile = logfile or open(f"{workdir}/logfile.txt", "a") self.log_targets = [sys.stdout, self.logfile] @@ -696,6 +895,15 @@ def log(self, logmessage): for target in self.log_targets: click.echo(line, file=target) + def log_prefix(self, prefix, message=None): + prefix = f"{click.style(prefix, fg='magenta')}: " + def log(message): + self.log(f"{prefix}{message}") + if message is None: + return log + else: + log(message) + def error(self, logmessage): tm = localtime() self.log(click.style(f"ERROR: {logmessage}", fg="red", bold=True)) @@ -833,7 +1041,7 @@ def instance_hierarchy_callback(retcode): def instance_hierarchy_error_callback(retcode): self.precise_prop_status = False - proc.exit_callback = instance_hierarchy_callback + proc.register_exit_callback(instance_hierarchy_callback) proc.error_callback = instance_hierarchy_error_callback return [proc] @@ -891,8 +1099,8 @@ def instance_hierarchy_error_callback(retcode): print("delete -output", file=f) print("dffunmap", file=f) print("stat", file=f) - print("write_btor {}-i design_{m}.info design_{m}.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f) - print("write_btor -s {}-i design_{m}_single.info design_{m}_single.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f) + print("write_btor {}-i design_{m}.info -ywmap design_btor.ywb design_{m}.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f) + print("write_btor -s {}-i design_{m}_single.info -ywmap design_btor_single.ywb design_{m}_single.btor".format("-c " if self.opt_mode == "cover" else "", m=model_name), file=f) proc = SbyProc( self, @@ -967,6 +1175,8 @@ def update_status(self, new_status): if new_status == "PASS": assert self.status != "FAIL" self.status = "PASS" + if self.opt_mode in ("bmc", "prove") and self.design: + self.design.pass_unknown_asserts() elif new_status == "FAIL": assert self.status != "PASS" @@ -1004,11 +1214,17 @@ def handle_non_engine_options(self): self.handle_int_option("timeout", None) self.handle_bool_option("vcd", True) + self.handle_bool_option("vcd_sim", False) + self.handle_bool_option("fst", False) self.handle_str_option("smtc", None) self.handle_int_option("skip", None) self.handle_str_option("tbtop", None) + if self.opt_mode != "live": + self.handle_int_option("append", 0) + self.handle_bool_option("append_assume", False) + self.handle_str_option("make_model", None) def setup_procs(self, setupmode): @@ -1078,18 +1294,18 @@ def summarize(self): # TODO process time is incorrect when running in parallel - self.summary = [ + self.summary.timing = [ "Elapsed clock time [H:MM:SS (secs)]: {}:{:02d}:{:02d} ({})".format (total_clock_time // (60*60), (total_clock_time // 60) % 60, total_clock_time % 60, total_clock_time), "Elapsed process time [H:MM:SS (secs)]: {}:{:02d}:{:02d} ({})".format (total_process_time // (60*60), (total_process_time // 60) % 60, total_process_time % 60, total_process_time), - ] + self.summary + ] else: - self.summary = [ + self.summary.timing = [ "Elapsed clock time [H:MM:SS (secs)]: {}:{:02d}:{:02d} ({})".format (total_clock_time // (60*60), (total_clock_time // 60) % 60, total_clock_time % 60, total_clock_time), "Elapsed process time unvailable on Windows" - ] + self.summary + ] for line in self.summary: if line.startswith("Elapsed"): @@ -1110,7 +1326,7 @@ def summarize(self): def write_summary_file(self): with open(f"{self.workdir}/{self.status}", "w") as f: - for line in self.summary: + for line in self.summary.summarize(short=False): click.echo(line, file=f) def print_junit_result(self, f, junit_ts_name, junit_tc_name, junit_format_strict=False): diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 399ea115..88b63954 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -16,9 +16,37 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import json +import json, re from enum import Enum, auto from dataclasses import dataclass, field +from typing import Optional, Tuple + + +addr_re = re.compile(r'\\\[[0-9]+\]$') +public_name_re = re.compile(r"\\([a-zA-Z_][a-zA-Z0-9_]*(\[[0-9]+\])?|\[[0-9]+\])$") + +def pretty_name(id): + if public_name_re.match(id): + return id.lstrip("\\") + else: + return id + +def pretty_path(path): + out = "" + for name in path: + name = pretty_name(name) + if name.startswith("["): + out += name + continue + if out: + out += "." + if name.startswith("\\") or name.startswith("$"): + out += name + " " + else: + out += name + + return out + @dataclass class SbyProperty: @@ -44,18 +72,36 @@ def from_cell(c, name): raise ValueError("Unknown property type: " + name) name: str + path: Tuple[str, ...] type: Type location: str hierarchy: str status: str = field(default="UNKNOWN") - tracefile: str = field(default="") + tracefiles: str = field(default_factory=list) + + @property + def tracefile(self): + if self.tracefiles: + return self.tracefiles[0] + else: + return "" + + @property + def celltype(self): + return f"${str(self.type).lower()}" + + @property + def hdlname(self): + return pretty_path(self.path).rstrip() + def __repr__(self): - return f"SbyProperty<{self.type} {self.name} at {self.location}: status={self.status}, tracefile=\"{self.tracefile}\">" + return f"SbyProperty<{self.type} {self.name} {self.path} at {self.location}: status={self.status}, tracefile=\"{self.tracefile}\">" @dataclass class SbyModule: name: str + path: Tuple[str, ...] type: str submodules: dict = field(default_factory=dict) properties: list = field(default_factory=list) @@ -105,15 +151,32 @@ class SbyDesign: hierarchy: SbyModule = None memory_bits: int = 0 forall: bool = False + properties_by_path: dict = field(default_factory=dict) + + def pass_unknown_asserts(self): + for prop in self.hierarchy: + if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": + prop.status = "PASS" + + +def cell_path(cell): + path = cell["attributes"].get("hdlname") + if path is None: + if cell["name"].startswith('$'): + return (cell["name"],) + else: + return ("\\" + cell["name"],) + else: + return tuple(f"\\{segment}" for segment in path.split()) def design_hierarchy(filename): design = SbyDesign(hierarchy=None) design_json = json.load(filename) - def make_mod_hier(instance_name, module_name, hierarchy=""): + def make_mod_hier(instance_name, module_name, hierarchy="", path=()): # print(instance_name,":", module_name) sub_hierarchy=f"{hierarchy}/{instance_name}" if hierarchy else instance_name - mod = SbyModule(name=instance_name, type=module_name) + mod = SbyModule(name=instance_name, path=path, type=module_name) for m in design_json["modules"]: if m["name"] == module_name: @@ -129,11 +192,17 @@ def make_mod_hier(instance_name, module_name, hierarchy=""): location = cell["attributes"]["src"] except KeyError: location = "" - p = SbyProperty(name=cell["name"], type=SbyProperty.Type.from_cell(sort["type"]), location=location, hierarchy=sub_hierarchy) + p = SbyProperty( + name=cell["name"], + path=(*path, *cell_path(cell)), + type=SbyProperty.Type.from_cell(sort["type"]), + location=location, + hierarchy=sub_hierarchy) mod.properties.append(p) if sort["type"][0] != '$' or sort["type"].startswith("$paramod"): for cell in sort["cells"]: - mod.submodules[cell["name"]] = make_mod_hier(cell["name"], sort["type"], hierarchy=sub_hierarchy) + mod.submodules[cell["name"]] = make_mod_hier( + cell["name"], sort["type"], sub_hierarchy, (*path, *cell_path(cell))) if sort["type"] in ["$mem", "$mem_v2"]: for cell in sort["cells"]: design.memory_bits += int(cell["parameters"]["WIDTH"], 2) * int(cell["parameters"]["SIZE"], 2) @@ -145,7 +214,10 @@ def make_mod_hier(instance_name, module_name, hierarchy=""): for m in design_json["modules"]: attrs = m["attributes"] if "top" in attrs and int(attrs["top"]) == 1: - design.hierarchy = make_mod_hier(m["name"], m["name"]) + design.hierarchy = make_mod_hier(m["name"], m["name"], "", (m["name"],)) + + for prop in design.hierarchy: + design.properties_by_path[prop.path[1:]] = prop return design else: raise ValueError("Cannot find top module") diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 47224d88..1cb84b50 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -16,8 +16,9 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt, click +import re, getopt from sby_core import SbyProc +from sby_engine_aiger import aigsmt_exit_callback def run(mode, task, engine_idx, engine): abc_opts, abc_command = getopt.getopt(engine[1:], "", []) @@ -46,6 +47,21 @@ def run(mode, task, engine_idx, engine): else: task.error(f"Invalid ABC command {abc_command[0]}.") + smtbmc_vcd = task.opt_vcd and not task.opt_vcd_sim + run_aigsmt = smtbmc_vcd or (task.opt_append and task.opt_append_assume) + smtbmc_append = 0 + sim_append = 0 + log = task.log_prefix(f"engine_{engine_idx}") + + if task.opt_append_assume: + smtbmc_append = task.opt_append + elif smtbmc_vcd: + if not task.opt_append_assume: + log("For VCDs generated by smtbmc the option 'append_assume off' is ignored") + smtbmc_append = task.opt_append + else: + sim_append = task.opt_append + proc = SbyProc( task, f"engine_{engine_idx}", @@ -79,64 +95,8 @@ def output_callback(line): return line def exit_callback(retcode): - if proc_status is None: - task.error(f"engine_{engine_idx}: Could not determine engine status.") - - task.update_status(proc_status) - task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status}") - task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""") - - task.terminate() - - if proc_status == "FAIL" and task.opt_aigsmt != "none": - trace_prefix = f"engine_{engine_idx}/trace" - - smtbmc_opts = [] - smtbmc_opts += ["-s", task.opt_aigsmt] - if task.opt_tbtop is not None: - smtbmc_opts += ["--vlogtb-top", task.opt_tbtop] - smtbmc_opts += ["--noprogress", f"--append {task.opt_append}"] - if task.opt_vcd: - smtbmc_opts += [f"--dump-vcd {trace_prefix}.vcd"] - smtbmc_opts += [f"--dump-yw {trace_prefix}.yw", f"--dump-vlogtb {trace_prefix}_tb.v", f"--dump-smtc {trace_prefix}.smtc"] - - witness_proc = SbyProc( - task, f"engine_{engine_idx}", [], - f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/trace.aiw model/design_aiger.ywa engine_{engine_idx}/trace.yw", - ) - - proc2 = SbyProc( - task, - f"engine_{engine_idx}", - [*task.model("smt2"), witness_proc], - f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/trace.yw model/design_smt2.smt2", - logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") - ) - - proc2_status = None - - def output_callback2(line): - nonlocal proc2_status - - match = re.match(r"^## [0-9: ]+ Status: FAILED", line) - if match: proc2_status = "FAIL" - - match = re.match(r"^## [0-9: ]+ Status: PASSED", line) - if match: proc2_status = "PASS" - - return line - - def exit_callback2(retcode): - if proc2_status is None: - task.error(f"engine_{engine_idx}: Could not determine aigsmt status.") - if proc2_status != "FAIL": - task.error(f"engine_{engine_idx}: Unexpected aigsmt status.") - - if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): - task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd") - - proc2.output_callback = output_callback2 - proc2.exit_callback = exit_callback2 + aigsmt_exit_callback(task, engine_idx, proc_status, + run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append, ) proc.output_callback = output_callback - proc.exit_callback = exit_callback + proc.register_exit_callback(exit_callback) diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index 18cb876c..d6e14d88 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -18,6 +18,7 @@ import re, os, getopt, click from sby_core import SbyProc +from sby_sim import sim_witness_trace def run(mode, task, engine_idx, engine): opts, solver_args = getopt.getopt(engine[1:], "", []) @@ -51,6 +52,22 @@ def run(mode, task, engine_idx, engine): else: task.error(f"Invalid solver command {solver_args[0]}.") + smtbmc_vcd = task.opt_vcd and not task.opt_vcd_sim + run_aigsmt = (mode != "live") and (smtbmc_vcd or (task.opt_append and task.opt_append_assume)) + smtbmc_append = 0 + sim_append = 0 + log = task.log_prefix(f"engine_{engine_idx}") + + if mode != "live": + if task.opt_append_assume: + smtbmc_append = task.opt_append + elif smtbmc_vcd: + if not task.opt_append_assume: + log("For VCDs generated by smtbmc the option 'append_assume off' is ignored") + smtbmc_append = task.opt_append + else: + sim_append = task.opt_append + proc = SbyProc( task, f"engine_{engine_idx}", @@ -92,44 +109,48 @@ def output_callback(line): return None def exit_callback(retcode): + aiw_file.close() + aigsmt_exit_callback(task, engine_idx, proc_status, + run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append, ) + + proc.output_callback = output_callback + proc.register_exit_callback(exit_callback) + + +def aigsmt_exit_callback(task, engine_idx, proc_status, *, run_aigsmt, smtbmc_vcd, smtbmc_append, sim_append): if proc_status is None: task.error(f"engine_{engine_idx}: Could not determine engine status.") - aiw_file.close() - task.update_status(proc_status) - task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status}") - task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""") - + task.summary.set_engine_status(engine_idx, proc_status) task.terminate() - if proc_status == "FAIL" and task.opt_aigsmt != "none": - if produced_cex: - trace_prefix = f"engine_{engine_idx}/trace" + if proc_status == "FAIL" and (not run_aigsmt or task.opt_aigsmt != "none"): + trace_prefix = f"engine_{engine_idx}/trace" + aiw2yw_suffix = '_aiw' if run_aigsmt else '' + + witness_proc = SbyProc( + task, f"engine_{engine_idx}", [], + f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/trace.aiw model/design_aiger.ywa engine_{engine_idx}/trace{aiw2yw_suffix}.yw", + ) + yw_proc = witness_proc + + if run_aigsmt: smtbmc_opts = [] - if mode == "live": - smtbmc_opts += ["-g"] smtbmc_opts += ["-s", task.opt_aigsmt] if task.opt_tbtop is not None: smtbmc_opts += ["--vlogtb-top", task.opt_tbtop] - smtbmc_opts += ["--noprogress"] - if mode != "live": - smtbmc_opts += [f"--append {task.opt_append}"] - if task.opt_vcd: + smtbmc_opts += ["--noprogress", f"--append {smtbmc_append}"] + if smtbmc_vcd: smtbmc_opts += [f"--dump-vcd {trace_prefix}.vcd"] smtbmc_opts += [f"--dump-yw {trace_prefix}.yw", f"--dump-vlogtb {trace_prefix}_tb.v", f"--dump-smtc {trace_prefix}.smtc"] - witness_proc = SbyProc( - task, f"engine_{engine_idx}", [], - f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/trace.aiw model/design_aiger.ywa engine_{engine_idx}/trace.yw", - ) - proc2 = SbyProc( task, f"engine_{engine_idx}", [*task.model("smt2"), witness_proc], - f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/trace.yw model/design_smt2.smt2", + f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/trace{aiw2yw_suffix}.yw model/design_smt2.smt2", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) @@ -146,20 +167,21 @@ def output_callback2(line): return line - def exit_callback2(line): + def exit_callback2(retcode): if proc2_status is None: task.error(f"engine_{engine_idx}: Could not determine aigsmt status.") - if proc2_status != ("PASS" if mode == "live" else "FAIL"): + if proc2_status != "FAIL": task.error(f"engine_{engine_idx}: Unexpected aigsmt status.") - if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): - task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd") + task.summary.add_event(engine_idx, trace="trace", path=f"engine_{engine_idx}/trace.vcd", type="$assert") proc2.output_callback = output_callback2 - proc2.exit_callback = exit_callback2 + proc2.register_exit_callback(exit_callback2) + + yw_proc = proc2 + + if task.opt_fst or (task.opt_vcd and task.opt_vcd_sim): + sim_witness_trace(f"engine_{engine_idx}", task, engine_idx, f"engine_{engine_idx}/trace.yw", append=sim_append, deps=[yw_proc]) else: task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Engine did not produce a counter example.") - - proc.output_callback = output_callback - proc.exit_callback = exit_callback diff --git a/sbysrc/sby_engine_btor.py b/sbysrc/sby_engine_btor.py index 284a49e8..a3e744e3 100644 --- a/sbysrc/sby_engine_btor.py +++ b/sbysrc/sby_engine_btor.py @@ -19,6 +19,7 @@ import re, os, getopt, click from types import SimpleNamespace from sby_core import SbyProc +from sby_sim import sim_witness_trace def run(mode, task, engine_idx, engine): random_seed = None @@ -54,14 +55,27 @@ def run(mode, task, engine_idx, engine): else: task.error(f"Invalid solver command {solver_args[0]}.") + log = task.log_prefix(f"engine_{engine_idx}") + + btorsim_vcd = task.opt_vcd and not task.opt_vcd_sim + run_sim = task.opt_fst or not btorsim_vcd + sim_append = 0 + + if task.opt_append and btorsim_vcd: + log("The BTOR engine does not support the 'append' option when using btorsim.") + else: + sim_append = task.opt_append + + if task.opt_append and task.opt_append_assume: + log("The BTOR engine does not support enforcing assumptions in appended time steps.") + + common_state = SimpleNamespace() common_state.solver_status = None common_state.produced_cex = 0 common_state.expected_cex = 1 common_state.wit_file = None common_state.assert_fail = False - common_state.produced_traces = [] - common_state.print_traces_max = 5 common_state.running_procs = 0 def print_traces_and_terminate(): @@ -87,17 +101,7 @@ def print_traces_and_terminate(): task.error(f"engine_{engine_idx}: Engine terminated without status.") task.update_status(proc_status.upper()) - task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status}") - task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status}""") - - if len(common_state.produced_traces) == 0: - task.log(f"""{click.style(f'engine_{engine_idx}', fg='magenta')}: Engine did not produce a{" counter" if mode != "cover" else "n "}example.""") - elif len(common_state.produced_traces) <= common_state.print_traces_max: - task.summary.extend(common_state.produced_traces) - else: - task.summary.extend(common_state.produced_traces[:common_state.print_traces_max]) - excess_traces = len(common_state.produced_traces) - common_state.print_traces_max - task.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""") + task.summary.set_engine_status(engine_idx, proc_status) task.terminate() @@ -113,9 +117,9 @@ def output_callback2(line): def make_exit_callback(suffix): def exit_callback2(retcode): - vcdpath = f"{task.workdir}/engine_{engine_idx}/trace{suffix}.vcd" - if os.path.exists(vcdpath): - common_state.produced_traces.append(f"""{"" if mode == "cover" else "counterexample "}trace: {vcdpath}""") + vcdpath = f"engine_{engine_idx}/trace{suffix}.vcd" + if os.path.exists(f"{task.workdir}/{vcdpath}"): + task.summary.add_event(engine_idx=engine_idx, trace=f'trace{suffix}', path=vcdpath, type="$cover" if mode == "cover" else "$assert") common_state.running_procs -= 1 if (common_state.running_procs == 0): @@ -123,6 +127,11 @@ def exit_callback2(retcode): return exit_callback2 + def simple_exit_callback(retcode): + common_state.running_procs -= 1 + if (common_state.running_procs == 0): + print_traces_and_terminate() + def output_callback(line): if mode == "cover": if solver_args[0] == "btormc": @@ -153,20 +162,39 @@ def output_callback(line): else: suffix = common_state.produced_cex - if mode == "cover" or task.opt_vcd: + model = f"design_btor{'_single' if solver_args[0] == 'pono' else ''}" + + yw_proc = SbyProc( + task, f"engine_{engine_idx}.trace{suffix}", [], + f"cd {task.workdir}; {task.exe_paths['witness']} wit2yw engine_{engine_idx}/trace{suffix}.wit model/{model}.ywb engine_{engine_idx}/trace{suffix}.yw", + ) + common_state.running_procs += 1 + yw_proc.register_exit_callback(simple_exit_callback) + + btorsim_vcd = (task.opt_vcd and not task.opt_vcd_sim) + + if btorsim_vcd: # TODO cover runs btorsim not only for trace generation, can we run it without VCD generation in that case? proc2 = SbyProc( task, - f"engine_{engine_idx}_{common_state.produced_cex}", + f"engine_{engine_idx}.trace{suffix}", task.model("btor"), - "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}.vcd --hierarchical-symbols --info model/design_btor{s}.info model/design_btor{s}.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix, s='_single' if solver_args[0] == 'pono' else ''), + "cd {dir} ; btorsim -c --vcd engine_{idx}/trace{i}{i2}.vcd --hierarchical-symbols --info model/design_btor{s}.info model/design_btor{s}.btor engine_{idx}/trace{i}.wit".format(dir=task.workdir, idx=engine_idx, i=suffix, i2='' if btorsim_vcd else '_btorsim', s='_single' if solver_args[0] == 'pono' else ''), logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") ) proc2.output_callback = output_callback2 - proc2.exit_callback = make_exit_callback(suffix) + if run_sim: + proc2.register_exit_callback(simple_exit_callback) + else: + proc2.register_exit_callback(make_exit_callback(suffix)) proc2.checkretcode = True common_state.running_procs += 1 + if run_sim: + sim_proc = sim_witness_trace(f"engine_{engine_idx}", task, engine_idx, f"engine_{engine_idx}/trace{suffix}.yw", append=sim_append, deps=[yw_proc]) + sim_proc.register_exit_callback(simple_exit_callback) + common_state.running_procs += 1 + common_state.produced_cex += 1 common_state.wit_file.close() common_state.wit_file = None @@ -226,5 +254,5 @@ def exit_callback(retcode): if solver_args[0] == "pono": proc.retcodes = [0, 1, 255] # UNKNOWN = -1, FALSE = 0, TRUE = 1, ERROR = 2 proc.output_callback = output_callback - proc.exit_callback = exit_callback + proc.register_exit_callback(exit_callback) common_state.running_procs += 1 diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 6c192680..c5f348e3 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -16,8 +16,9 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import re, os, getopt, click +import re, os, getopt, click, glob from sby_core import SbyProc +from sby_sim import sim_witness_trace def run(mode, task, engine_idx, engine): smtbmc_opts = [] @@ -141,20 +142,37 @@ def run(mode, task, engine_idx, engine): if not progress: smtbmc_opts.append("--noprogress") - if task.opt_skip is not None: t_opt = "{}:{}".format(task.opt_skip, task.opt_depth) else: t_opt = "{}".format(task.opt_depth) + smtbmc_vcd = task.opt_vcd and not task.opt_vcd_sim + + smtbmc_append = 0 + sim_append = 0 + + log = task.log_prefix(f"engine_{engine_idx}") + + if task.opt_append_assume: + smtbmc_append = task.opt_append + elif smtbmc_vcd: + if not task.opt_append_assume: + log("For VCDs generated by smtbmc the option 'append_assume off' is ignored") + smtbmc_append = task.opt_append + else: + sim_append = task.opt_append + + trace_ext = 'fst' if task.opt_fst else 'vcd' + random_seed = f"--info \"(set-option :random-seed {random_seed})\"" if random_seed else "" - dump_flags = f"--dump-vcd {trace_prefix}.vcd " if task.opt_vcd else "" + dump_flags = f"--dump-vcd {trace_prefix}.vcd " if smtbmc_vcd else "" dump_flags += f"--dump-yw {trace_prefix}.yw --dump-vlogtb {trace_prefix}_tb.v --dump-smtc {trace_prefix}.smtc" proc = SbyProc( task, procname, task.model(model_name), - f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {task.opt_append} {dump_flags} model/design_{model_name}.smt2""", + f"""cd {task.workdir}; {task.exe_paths["smtbmc"]} {" ".join(smtbmc_opts)} -t {t_opt} {random_seed} --append {smtbmc_append} {dump_flags} model/design_{model_name}.smt2""", logfile=open(logfile_prefix + ".txt", "w"), logstderr=(not progress) ) @@ -167,12 +185,30 @@ def run(mode, task, engine_idx, engine): proc_status = None last_prop = [] + pending_sim = None + current_step = None + procs_running = 1 def output_callback(line): nonlocal proc_status nonlocal last_prop + nonlocal pending_sim + nonlocal current_step + nonlocal procs_running + + if pending_sim: + sim_proc = sim_witness_trace(procname, task, engine_idx, pending_sim, append=sim_append) + sim_proc.register_exit_callback(simple_exit_callback) + procs_running += 1 + pending_sim = None + smt2_trans = {'\\':'/', '|':'/'} + match = re.match(r"^## [0-9: ]+ .* in step ([0-9]+)\.\.", line) + if match: + current_step = int(match[1]) + return line + match = re.match(r"^## [0-9: ]+ Status: FAILED", line) if match: proc_status = "FAIL" @@ -193,31 +229,45 @@ def output_callback(line): proc_status = "ERROR" return line - match = re.match(r"^## [0-9: ]+ Assert failed in (\S+): (\S+) \((\S+)\)", line) + match = re.match(r"^## [0-9: ]+ Assert failed in (\S+): (\S+)(?: \((\S+)\))?", line) if match: - cell_name = match[3] + cell_name = match[3] or match[2] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" last_prop.append(prop) return line - match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+) \((\S+)\) in step \d+.", line) + match = re.match(r"^## [0-9: ]+ Reached cover statement at (\S+)(?: \((\S+)\))? in step \d+\.", line) if match: - cell_name = match[2] + cell_name = match[2] or match[1] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "PASS" last_prop.append(prop) return line - match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) - if match and last_prop: - for p in last_prop: - if not p.tracefile: - p.tracefile = match[1] - last_prop = [] - return line + if smtbmc_vcd and not task.opt_fst: + match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) + if match: + tracefile = match[1] + trace = os.path.basename(tracefile)[:-4] + engine_case = mode.split('_')[1] if '_' in mode else None + task.summary.add_event(engine_idx=engine_idx, trace=trace, path=tracefile, engine_case=engine_case) + + if match and last_prop: + for p in last_prop: + task.summary.add_event( + engine_idx=engine_idx, trace=trace, + type=p.celltype, hdlname=p.hdlname, src=p.location, step=current_step) + p.tracefiles.append(tracefile) + last_prop = [] + return line + else: + match = re.match(r"^## [0-9: ]+ Writing trace to Yosys witness file: (\S+)", line) + if match: + tracefile = match[1] + pending_sim = tracefile - match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\).", line) + match = re.match(r"^## [0-9: ]+ Unreached cover statement at (\S+) \((\S+)\)\.", line) if match: cell_name = match[2] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) @@ -225,43 +275,28 @@ def output_callback(line): return line + def simple_exit_callback(retcode): + nonlocal procs_running + procs_running -= 1 + if not procs_running: + last_exit_callback() + def exit_callback(retcode): if proc_status is None: task.error(f"engine_{engine_idx}: Engine terminated without status.") + simple_exit_callback(retcode) + def last_exit_callback(): if mode == "bmc" or mode == "cover": task.update_status(proc_status) proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status - task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine: {proc_status_lower}") - task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status_lower}""") - - if proc_status == "FAIL" and mode != "cover": - if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): - task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd") - elif proc_status == "PASS" and mode == "cover": - print_traces_max = 5 - for i in range(print_traces_max): - if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace{i}.vcd"): - task.summary.append(f"trace: {task.workdir}/engine_{engine_idx}/trace{i}.vcd") - else: - break - else: - excess_traces = 0 - while os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace{print_traces_max + excess_traces}.vcd"): - excess_traces += 1 - if excess_traces > 0: - task.summary.append(f"""and {excess_traces} further trace{"s" if excess_traces > 1 else ""}""") - elif proc_status == "PASS" and mode == "bmc": - for prop in task.design.hierarchy: - if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": - prop.status = "PASS" + task.summary.set_engine_status(engine_idx, proc_status_lower) task.terminate() elif mode in ["prove_basecase", "prove_induction"]: proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status - task.log(f"""{click.style(f'engine_{engine_idx}', fg='magenta')}: Status returned by engine for {mode.split("_")[1]}: {proc_status_lower}""") - task.summary.append(f"""engine_{engine_idx} ({" ".join(engine)}) returned {proc_status_lower} for {mode.split("_")[1]}""") + task.summary.set_engine_status(engine_idx, proc_status_lower, mode.split("_")[1]) if mode == "prove_basecase": for proc in task.basecase_procs: @@ -272,8 +307,6 @@ def exit_callback(retcode): else: task.update_status(proc_status) - if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): - task.summary.append(f"counterexample trace: {task.workdir}/engine_{engine_idx}/trace.vcd") task.terminate() elif mode == "prove_induction": @@ -287,9 +320,6 @@ def exit_callback(retcode): assert False if task.basecase_pass and task.induction_pass: - for prop in task.design.hierarchy: - if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": - prop.status = "PASS" task.update_status("PASS") task.summary.append("successful proof by k-induction.") task.terminate() @@ -298,4 +328,4 @@ def exit_callback(retcode): assert False proc.output_callback = output_callback - proc.exit_callback = exit_callback + proc.register_exit_callback(exit_callback) diff --git a/sbysrc/sby_mode_bmc.py b/sbysrc/sby_mode_bmc.py index 9ba624cd..173812fd 100644 --- a/sbysrc/sby_mode_bmc.py +++ b/sbysrc/sby_mode_bmc.py @@ -21,7 +21,6 @@ def run(task): task.handle_int_option("depth", 20) - task.handle_int_option("append", 0) task.handle_str_option("aigsmt", "yices") for engine_idx, engine in task.engine_list(): diff --git a/sbysrc/sby_mode_cover.py b/sbysrc/sby_mode_cover.py index 3a5fbe91..c94c6396 100644 --- a/sbysrc/sby_mode_cover.py +++ b/sbysrc/sby_mode_cover.py @@ -21,7 +21,6 @@ def run(task): task.handle_int_option("depth", 20) - task.handle_int_option("append", 0) for engine_idx, engine in task.engine_list(): task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: {' '.join(engine)}") diff --git a/sbysrc/sby_mode_prove.py b/sbysrc/sby_mode_prove.py index b289f314..e50fbfdb 100644 --- a/sbysrc/sby_mode_prove.py +++ b/sbysrc/sby_mode_prove.py @@ -21,7 +21,6 @@ def run(task): task.handle_int_option("depth", 20) - task.handle_int_option("append", 0) task.handle_str_option("aigsmt", "yices") task.status = "UNKNOWN" diff --git a/sbysrc/sby_sim.py b/sbysrc/sby_sim.py new file mode 100644 index 00000000..dee99de3 --- /dev/null +++ b/sbysrc/sby_sim.py @@ -0,0 +1,96 @@ +# +# SymbiYosys (sby) -- Front-end for Yosys-based formal verification flows +# +# Copyright (C) 2022 Jannis Harder +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. +# + +import os, re, glob, json +from sby_core import SbyProc +from sby_design import pretty_path + +def sim_witness_trace(prefix, task, engine_idx, witness_file, *, append, deps=()): + trace_name = os.path.basename(witness_file)[:-3] + formats = [] + tracefile = None + if task.opt_vcd and task.opt_vcd_sim: + tracefile = f"engine_{engine_idx}/{trace_name}.vcd" + formats.append(f"-vcd {trace_name}.vcd") + if task.opt_fst: + tracefile = f"engine_{engine_idx}/{trace_name}.fst" + formats.append(f"-fst {trace_name}.fst") + + # for warnings / error messages + error_tracefile = f"{task.workdir}/{tracefile}" or f"{task.workdir}/engine_{engine_idx}/{trace_name}.yw" + + sim_log = task.log_prefix(f"{prefix}.{trace_name}") + + sim_log(f"Generating simulation trace for witness file: {witness_file}") + + with open(f"{task.workdir}/engine_{engine_idx}/{trace_name}.ys", "w") as f: + print(f"# running in {task.workdir}/engine_{engine_idx}/", file=f) + print(f"read_rtlil ../model/design_prep.il", file=f) + print(f"sim -hdlname -summary {trace_name}.json -append {append} -r {trace_name}.yw {' '.join(formats)}", file=f) + + def exit_callback(retval): + + if task.design: + task.precise_prop_status = True + + assertion_types = set() + + with open(f"{task.workdir}/engine_{engine_idx}/{trace_name}.json") as summary: + summary = json.load(summary) + for assertion in summary["assertions"]: + assertion["path"] = tuple(assertion["path"]) + + first_appended = summary["steps"] + 1 - append + + printed_assumption_warning = False + + task.summary.add_event(engine_idx=engine_idx, trace=trace_name, path=tracefile) + + for assertion in summary["assertions"]: + if task.design: + prop = task.design.properties_by_path[tuple(assertion["path"])] + else: + prop = None + + hdlname = pretty_path((summary['top'], *assertion['path'])).rstrip() + task.summary.add_event( + engine_idx=engine_idx, + trace=trace_name, path=tracefile, hdlname=hdlname, + type=assertion["type"], src=assertion.get("src"), step=assertion["step"], + prop=prop) + + assertion_types.add(assertion["type"]) + + if assertion["type"] == '$assume': + if assertion["step"] < first_appended: + task.error(f"produced trace {error_tracefile!r} violates assumptions during simulation") + elif not printed_assumption_warning: + sim_log(f"Warning: trace {error_tracefile!r} violates assumptions during simulation of the appended time steps.") + if not task.opt_append_assume: + sim_log("For supported engines, the option 'append_assume on' can be used to find inputs that uphold assumptions during appended time steps.") + printed_assumption_warning = True + + proc = SbyProc( + task, + f"{prefix}.{trace_name}", + deps, + f"""cd {task.workdir}/engine_{engine_idx}; {task.exe_paths["yosys"]} -ql {trace_name}.log {trace_name}.ys""", + ) + proc.noprintregex = re.compile(r"Warning: Assert .* failed.*") + proc.register_exit_callback(exit_callback) + return proc diff --git a/tests/keepgoing/keepgoing_multi_step.py b/tests/keepgoing/keepgoing_multi_step.py index c724c663..548f9d2b 100644 --- a/tests/keepgoing/keepgoing_multi_step.py +++ b/tests/keepgoing/keepgoing_multi_step.py @@ -11,7 +11,7 @@ step_7 = line_ref(workdir, src, "step 7") log = open(workdir + "/logfile.txt").read() -log_per_trace = log.split("Writing trace to VCD file")[:-1] +log_per_trace = log.split("Writing trace to Yosys witness file")[:-1] assert len(log_per_trace) == 4 @@ -27,5 +27,5 @@ assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) -pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.vcd" +pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.(vcd|fst)" assert re.search(pattern, open(f"{workdir}/{workdir}.xml").read()) diff --git a/tests/keepgoing/keepgoing_same_step.py b/tests/keepgoing/keepgoing_same_step.py index e2739167..206d1b3e 100644 --- a/tests/keepgoing/keepgoing_same_step.py +++ b/tests/keepgoing/keepgoing_same_step.py @@ -9,7 +9,7 @@ assert_0 = line_ref(workdir, src, "assert(0)") log = open(workdir + "/logfile.txt").read() -log_per_trace = log.split("Writing trace to VCD file")[:-1] +log_per_trace = log.split("Writing trace to Yosys witness file")[:-1] assert len(log_per_trace) == 2 diff --git a/tests/keepgoing/keepgoing_smtc.py b/tests/keepgoing/keepgoing_smtc.py index e0fd27db..b41a7988 100644 --- a/tests/keepgoing/keepgoing_smtc.py +++ b/tests/keepgoing/keepgoing_smtc.py @@ -12,7 +12,7 @@ assert_distinct = line_ref(workdir, "extra.smtc", "assert (distinct") log = open(workdir + "/logfile.txt").read() -log_per_trace = log.split("Writing trace to VCD file")[:-1] +log_per_trace = log.split("Writing trace to Yosys witness file")[:-1] assert len(log_per_trace) == 4 diff --git a/tests/unsorted/btor_meminit.sby b/tests/unsorted/btor_meminit.sby new file mode 100644 index 00000000..ca584a5f --- /dev/null +++ b/tests/unsorted/btor_meminit.sby @@ -0,0 +1,48 @@ +[tasks] +btormc +#pono +smtbmc + +[options] +mode bmc +expect fail + +[engines] +btormc: btor btormc +# pono: btor pono +smtbmc: smtbmc + +[script] +read -formal top.sv +prep -top top -flatten + +[file top.sv] + +module top(input clk); + + inner inner(clk); + +endmodule + +module inner(input clk); + reg [7:0] counter = 0; + + reg [1:0] mem [0:255]; + + initial begin + mem[0] = 0; + mem[1] = 1; + mem[2] = 2; + mem[3] = 2; + mem[4] = 0; + mem[7] = 0; + end + + always @(posedge clk) begin + counter <= counter + 1; + foo: assert (mem[counter] < 3); + bar: assume (counter < 7); + + mem[counter] <= 0; + end +endmodule diff --git a/tests/unsorted/cover_unreachable.sby b/tests/unsorted/cover_unreachable.sby new file mode 100644 index 00000000..63ebcc75 --- /dev/null +++ b/tests/unsorted/cover_unreachable.sby @@ -0,0 +1,34 @@ +[tasks] +btormc +smtbmc + +[options] +mode cover +expect fail + +[engines] +btormc: btor btormc +smtbmc: smtbmc + +[script] +read -formal top.sv +prep -top top -flatten + +[file top.sv] + +module top(input clk); + + inner inner(clk); + +endmodule + +module inner(input clk); + reg [7:0] counter = 0; + + always @(posedge clk) begin + counter <= counter == 4 ? 0 : counter + 1; + + reachable: cover (counter == 3); + unreachable: cover (counter == 5); + end +endmodule From 06c36d5bb0d8eb35db60ba44798477c3867e4644 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 10 Jan 2023 16:17:49 +0100 Subject: [PATCH 180/220] Support "fifo:" make jobserver auth --- sbysrc/sby_jobserver.py | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/sbysrc/sby_jobserver.py b/sbysrc/sby_jobserver.py index b7600079..a3501335 100644 --- a/sbysrc/sby_jobserver.py +++ b/sbysrc/sby_jobserver.py @@ -58,15 +58,24 @@ def process_jobserver_environment(): elif flag.startswith("--jobserver-auth=") or flag.startswith("--jobserver-fds="): inherited_jobserver_auth_present = True if os.name == "posix": - arg = flag.split("=", 1)[1].split(",") - try: - jobserver_fds = int(arg[0]), int(arg[1]) - for fd in jobserver_fds: - fcntl.fcntl(fd, fcntl.F_GETFD) - except (ValueError, OSError): - pass + arg = flag.split("=", 1)[1] + if arg.startswith("fifo:"): + try: + fd = os.open(arg[5:], os.O_RDWR) + except FileNotFoundError: + pass + else: + inherited_jobserver_auth = fd, fd else: - inherited_jobserver_auth = jobserver_fds + arg = arg.split(",") + try: + jobserver_fds = int(arg[0]), int(arg[1]) + for fd in jobserver_fds: + fcntl.fcntl(fd, fcntl.F_GETFD) + except (ValueError, OSError): + pass + else: + inherited_jobserver_auth = jobserver_fds def jobserver_helper(jobserver_read_fd, jobserver_write_fd, request_fd, response_fd): From 6398938e6ad67080c978b680377a0a9647045317 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 11 Jan 2023 18:02:45 +0100 Subject: [PATCH 181/220] Enable yosys sim support for clock signals in hierarchical designs --- sbysrc/sby_core.py | 5 ++++- sbysrc/sby_sim.py | 5 ++++- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index c91561f4..f0fcf293 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -989,7 +989,7 @@ def make_model(self, model_name): print("async2sync", file=f) print("chformal -assume -early", file=f) print("opt_clean", file=f) - print("formalff -setundef -clk2ff -ff2anyinit", file=f) + print("formalff -setundef -clk2ff -ff2anyinit -hierarchy", file=f) if self.opt_mode in ["bmc", "prove"]: print("chformal -live -fair -cover -remove", file=f) if self.opt_mode == "cover": @@ -1051,6 +1051,7 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print(f"""read_ilang design_prep.il""", file=f) print("hierarchy -smtcheck", file=f) + print("formalff -assume", file=f) if "_nomem" in model_name: print("memory_map -formal", file=f) print("formalff -setundef -clk2ff -ff2anyinit", file=f) @@ -1083,6 +1084,7 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print(f"""read_ilang design_prep.il""", file=f) print("hierarchy -simcheck", file=f) + print("formalff -assume", file=f) if "_nomem" in model_name: print("memory_map -formal", file=f) print("formalff -setundef -clk2ff -ff2anyinit", file=f) @@ -1117,6 +1119,7 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print("read_ilang design_prep.il", file=f) print("hierarchy -simcheck", file=f) + print("formalff -assume", file=f) print("flatten", file=f) print("setundef -undriven -anyseq", file=f) print("setattr -unset keep", file=f) diff --git a/sbysrc/sby_sim.py b/sbysrc/sby_sim.py index dee99de3..0025ed8e 100644 --- a/sbysrc/sby_sim.py +++ b/sbysrc/sby_sim.py @@ -63,7 +63,10 @@ def exit_callback(retval): for assertion in summary["assertions"]: if task.design: - prop = task.design.properties_by_path[tuple(assertion["path"])] + try: + prop = task.design.properties_by_path[tuple(assertion["path"])] + except KeyError: + prop = None else: prop = None From f14aaa57c4bb20c36b8e09252443948c2b3a56c1 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 11 Jan 2023 18:36:06 +0100 Subject: [PATCH 182/220] avy: Fold aiger model using abc to support assumptions --- sbysrc/sby_core.py | 12 ++++++++++++ sbysrc/sby_engine_aiger.py | 7 +++++-- 2 files changed, 17 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index f0fcf293..ae28b6ae 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1146,6 +1146,18 @@ def instance_hierarchy_error_callback(retcode): return [proc] + if model_name == "aig_fold": + proc = SbyProc( + self, + model_name, + self.model("aig"), + f"""cd {self.workdir}/model; {self.exe_paths["abc"]} -c 'read_aiger design_aiger.aig; fold; strash; write_aiger design_aiger_fold.aig'""", + logfile=open(f"{self.workdir}/model/design_aiger_fold.log", "w") + ) + proc.checkretcode = True + + return [proc] + self.error(f"Invalid model name: {model_name}") def model(self, model_name): diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index d6e14d88..d7a5a310 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -31,6 +31,8 @@ def run(mode, task, engine_idx, engine): status_2 = "UNKNOWN" + model_variant = "" + if solver_args[0] == "suprove": if mode not in ["live", "prove"]: task.error("The aiger solver 'suprove' is only supported in live and prove modes.") @@ -39,6 +41,7 @@ def run(mode, task, engine_idx, engine): solver_cmd = " ".join([task.exe_paths["suprove"]] + solver_args[1:]) elif solver_args[0] == "avy": + model_variant = "_fold" if mode != "prove": task.error("The aiger solver 'avy' is only supported in prove mode.") solver_cmd = " ".join([task.exe_paths["avy"], "--cex", "-"] + solver_args[1:]) @@ -71,8 +74,8 @@ def run(mode, task, engine_idx, engine): proc = SbyProc( task, f"engine_{engine_idx}", - task.model("aig"), - f"cd {task.workdir}; {solver_cmd} model/design_aiger.aig", + task.model(f"aig{model_variant}"), + f"cd {task.workdir}; {solver_cmd} model/design_aiger{model_variant}.aig", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) if solver_args[0] not in ["avy"]: From 6d1ef8b5a0bf0ad9678ba152e5e2bf458aabbb55 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 6 Feb 2023 16:54:37 +0100 Subject: [PATCH 183/220] docs: Yices is still recommended --- docs/source/install.rst | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 604737b6..2b28e30b 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -102,6 +102,20 @@ To use the ``btor`` engine you will need to install btor2tools from `commit c35cf1c `_ or newer. +Yices 2 +------- + +http://yices.csl.sri.com/ + +.. code-block:: text + + git clone https://github.com/SRI-CSL/yices2.git yices2 + cd yices2 + autoconf + ./configure + make -j$(nproc) + sudo make install + Optional components ------------------- Additional solver engines can be installed as per their instructions, links are @@ -112,12 +126,6 @@ Z3 https://github.com/Z3Prover/z3 -Yices 2 -^^^^^^^ - http://yices.csl.sri.com/ - - https://github.com/SRI-CSL/yices2 - super_prove ^^^^^^^^^^^ https://github.com/sterin/super-prove-build From 7a3b88ca6734e9202ed042914750b57be3e06b92 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 6 Feb 2023 16:55:19 +0100 Subject: [PATCH 184/220] docs: Document new sim related options --- docs/source/reference.rst | 117 +++++++++++++++++++++----------------- 1 file changed, 65 insertions(+), 52 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index 29056f3f..d3dae3fa 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -129,58 +129,71 @@ Mode Description All other options have default values and thus are optional. The available options are: -+------------------+------------+---------------------------------------------------------+ -| Option | Modes | Description | -+==================+============+=========================================================+ -| ``expect`` | All | Expected result as comma-separated list of the tokens | -| | | ``pass``, ``fail``, ``unknown``, ``error``, and | -| | | ``timeout``. Unexpected results yield a nonzero return | -| | | code . Default: ``pass`` | -+------------------+------------+---------------------------------------------------------+ -| ``timeout`` | All | Timeout in seconds. Default: ``none`` (i.e. no timeout) | -+------------------+------------+---------------------------------------------------------+ -| ``multiclock`` | All | Create a model with multiple clocks and/or asynchronous | -| | | logic. Values: ``on``, ``off``. Default: ``off`` | -+------------------+------------+---------------------------------------------------------+ -| ``wait`` | All | Instead of terminating when the first engine returns, | -| | | wait for all engines to return and check for | -| | | consistency. Values: ``on``, ``off``. Default: ``off`` | -+------------------+------------+---------------------------------------------------------+ -| ``vcd`` | All | Write VCD traces for counter-example or cover traces. | -| | | Values: ``on``, ``off``. Default: ``on`` | -+------------------+------------+---------------------------------------------------------+ -| ``aigsmt`` | All | Which SMT2 solver to use for converting AIGER witnesses | -| | | to counter example traces. Use ``none`` to disable | -| | | conversion of AIGER witnesses. Default: ``yices`` | -+------------------+------------+---------------------------------------------------------+ -| ``tbtop`` | All | The top module for generated Verilog test benches, as | -| | | hierarchical path relative to the design top module. | -+------------------+------------+---------------------------------------------------------+ -| ``make_model`` | All | Force generation of the named formal models. Takes a | -| | | comma-separated list of model names. For a model | -| | | ```` this will generate the | -| | | ``model/design_.*`` files within the working | -| | | directory, even when not required to run the task. | -+------------------+------------+---------------------------------------------------------+ -| ``smtc`` | ``bmc``, | Pass this ``.smtc`` file to the smtbmc engine. All | -| | ``prove``, | other engines are disabled when this option is used. | -| | ``cover`` | Default: None | -+------------------+------------+---------------------------------------------------------+ -| ``depth`` | ``bmc``, | Depth of the bounded model check. Only the specified | -| | ``cover`` | number of cycles are considered. Default: ``20`` | -| +------------+---------------------------------------------------------+ -| | ``prove`` | Depth for the k-induction performed by the ``smtbmc`` | -| | | engine. Other engines ignore this option in ``prove`` | -| | | mode. Default: ``20`` | -+------------------+------------+---------------------------------------------------------+ -| ``skip`` | ``bmc``, | Skip the specified number of time steps. Only valid | -| | ``cover`` | with smtbmc engine. All other engines are disabled when | -| | | this option is used. Default: None | -+------------------+------------+---------------------------------------------------------+ -| ``append`` | ``bmc``, | When generating a counter-example trace, add the | -| | ``prove``, | specified number of cycles at the end of the trace. | -| | ``cover`` | Default: ``0`` | -+------------------+------------+---------------------------------------------------------+ ++-------------------+------------+---------------------------------------------------------+ +| Option | Modes | Description | ++===================+============+=========================================================+ +| ``expect`` | All | Expected result as comma-separated list of the tokens | +| | | ``pass``, ``fail``, ``unknown``, ``error``, and | +| | | ``timeout``. Unexpected results yield a nonzero return | +| | | code . Default: ``pass`` | ++-------------------+------------+---------------------------------------------------------+ +| ``timeout`` | All | Timeout in seconds. Default: ``none`` (i.e. no timeout) | ++-------------------+------------+---------------------------------------------------------+ +| ``multiclock`` | All | Create a model with multiple clocks and/or asynchronous | +| | | logic. Values: ``on``, ``off``. Default: ``off`` | ++-------------------+------------+---------------------------------------------------------+ +| ``wait`` | All | Instead of terminating when the first engine returns, | +| | | wait for all engines to return and check for | +| | | consistency. Values: ``on``, ``off``. Default: ``off`` | ++-------------------+------------+---------------------------------------------------------+ +| ``vcd`` | All | Write VCD traces for counter-example or cover traces. | +| | | Values: ``on``, ``off``. Default: ``on`` | ++-------------------+------------+---------------------------------------------------------+ +| ``vcd_sim`` | All | When generating VCD traces, use Yosys's ``sim`` | +| | | command. Replaces the engine native VCD output. | +| | | Values: ``on``, ``off``. Default: ``off`` | ++-------------------+------------+---------------------------------------------------------+ +| ``fst`` | All | Generate FST traces using Yosys's sim command. | +| | | Values: ``on``, ``off``. Default: ``off`` | ++-------------------+------------+---------------------------------------------------------+ +| ``aigsmt`` | All | Which SMT2 solver to use for converting AIGER witnesses | +| | | to counter example traces. Use ``none`` to disable | +| | | conversion of AIGER witnesses. Default: ``yices`` | ++-------------------+------------+---------------------------------------------------------+ +| ``tbtop`` | All | The top module for generated Verilog test benches, as | +| | | hierarchical path relative to the design top module. | ++-------------------+------------+---------------------------------------------------------+ +| ``make_model`` | All | Force generation of the named formal models. Takes a | +| | | comma-separated list of model names. For a model | +| | | ```` this will generate the | +| | | ``model/design_.*`` files within the working | +| | | directory, even when not required to run the task. | ++-------------------+------------+---------------------------------------------------------+ +| ``smtc`` | ``bmc``, | Pass this ``.smtc`` file to the smtbmc engine. All | +| | ``prove``, | other engines are disabled when this option is used. | +| | ``cover`` | Default: None | ++-------------------+------------+---------------------------------------------------------+ +| ``depth`` | ``bmc``, | Depth of the bounded model check. Only the specified | +| | ``cover`` | number of cycles are considered. Default: ``20`` | +| +------------+---------------------------------------------------------+ +| | ``prove`` | Depth for the k-induction performed by the ``smtbmc`` | +| | | engine. Other engines ignore this option in ``prove`` | +| | | mode. Default: ``20`` | ++-------------------+------------+---------------------------------------------------------+ +| ``skip`` | ``bmc``, | Skip the specified number of time steps. Only valid | +| | ``cover`` | with smtbmc engine. All other engines are disabled when | +| | | this option is used. Default: None | ++-------------------+------------+---------------------------------------------------------+ +| ``append`` | ``bmc``, | When generating a counter-example trace, add the | +| | ``prove``, | specified number of cycles at the end of the trace. | +| | ``cover`` | Default: ``0`` | ++-------------------+------------+---------------------------------------------------------+ +| ``append_assume`` | ``bmc``, | Uphold assumptions when appending cycles at the end of | +| | ``prove``, | the trace. Depending on the engine and options used | +| | ``cover`` | this may be implicitly on or not supported (as | +| | | indicated in SBY's log output). | +| | | Values: ``on``, ``off``. Default: ``off`` | ++-------------------+------------+---------------------------------------------------------+ Engines section --------------- From c5dce570672fb66fbc9b7a427e1492b65e8f5ca8 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 8 Feb 2023 14:18:52 +0100 Subject: [PATCH 185/220] append_assume: Make `append_assume on` the default for now Having `append_assume off` needs `vcd_sim on` to not be ignored with a warning and `vcd_sim off` is still the default. --- docs/source/reference.rst | 2 +- sbysrc/sby_core.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/reference.rst b/docs/source/reference.rst index d3dae3fa..083c7f01 100644 --- a/docs/source/reference.rst +++ b/docs/source/reference.rst @@ -192,7 +192,7 @@ options are: | | ``prove``, | the trace. Depending on the engine and options used | | | ``cover`` | this may be implicitly on or not supported (as | | | | indicated in SBY's log output). | -| | | Values: ``on``, ``off``. Default: ``off`` | +| | | Values: ``on``, ``off``. Default: ``on`` | +-------------------+------------+---------------------------------------------------------+ Engines section diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ae28b6ae..76812aea 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1238,7 +1238,7 @@ def handle_non_engine_options(self): if self.opt_mode != "live": self.handle_int_option("append", 0) - self.handle_bool_option("append_assume", False) + self.handle_bool_option("append_assume", True) self.handle_str_option("make_model", None) From 81ee5fdd5a03d24dd58b29d4228177b1f5f84b08 Mon Sep 17 00:00:00 2001 From: "N. Engelhardt" Date: Wed, 8 Feb 2023 17:59:26 +0100 Subject: [PATCH 186/220] update prerequisites --- docs/source/install.rst | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/source/install.rst b/docs/source/install.rst index 2b28e30b..ba578d20 100644 --- a/docs/source/install.rst +++ b/docs/source/install.rst @@ -39,15 +39,15 @@ other packages are only required for some engine configurations. Prerequisites ------------- -Installing prerequisites (this command is for Ubuntu 16.04): +Installing prerequisites (this command is for Ubuntu 20.04): .. code-block:: text - sudo apt-get install build-essential clang bison flex libreadline-dev \ - gawk tcl-dev libffi-dev git mercurial graphviz \ - xdot pkg-config python python3 libftdi-dev gperf \ - libboost-program-options-dev autoconf libgmp-dev \ - cmake curl + sudo apt-get install build-essential clang bison flex \ + libreadline-dev gawk tcl-dev libffi-dev git \ + graphviz xdot pkg-config python3 zlib1g-dev + + python3 -m pip install click Required components ------------------- From 8eaeca9aa4b7b7d05eff3bcd200f3ec6c0891648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miodrag=20Milanovi=C4=87?= Date: Mon, 13 Feb 2023 10:24:24 +0100 Subject: [PATCH 187/220] Update GH action --- .github/workflows/ci.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index fcbe8189..2c370bd6 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -7,6 +7,8 @@ jobs: steps: - uses: actions/checkout@v3 - - uses: YosysHQ/setup-oss-cad-suite@v1 + - uses: YosysHQ/setup-oss-cad-suite@v2 + with: + github-token: ${{ secrets.GITHUB_TOKEN }} - name: Run checks run: tabbypip install xmlschema && make ci From 2cd0f6f71c6f158ddc980b9cbb7e19164c5d9b95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miodrag=20Milanovi=C4=87?= Date: Mon, 13 Feb 2023 10:33:46 +0100 Subject: [PATCH 188/220] Try fixing GH action --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2c370bd6..133b103c 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -11,4 +11,4 @@ jobs: with: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Run checks - run: tabbypip install xmlschema && make ci + run: pip install xmlschema && make ci From fa5bc957c1e01fa84875651844654bdc32397168 Mon Sep 17 00:00:00 2001 From: "Darryl L. Miles" Date: Sat, 18 Feb 2023 08:51:23 +0000 Subject: [PATCH 189/220] -f clean: QoL improvement on Windows concerning file/dir removal locking When using the -f argument be more forgiving with the expectation of a clean workspace and the expectation of the new sby run being responsible for directory creation. This is a usability and quality of life improvement for Windows users where the OS can implement file and directory locking implicitly. In the EDA world it is common to have multiple tools in use at any one time and it can become tortious to have to close files / exit 3rd party applications to release locking so sby is happy to rerun. This change will prevent sby claiming a terminal error has occurred when it fails to create a directory that already exists. It also now considers the environment to be 'clean' (as per -f) if all the non-directory elements of the file tree have been deleted, leaving potentially an empty a skeleton of directories. --- sbysrc/sby.py | 15 +++++++++++++-- sbysrc/sby_core.py | 5 +++-- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 5052128e..20ae3452 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -412,6 +412,17 @@ def find_files(taskname): print("ERROR: Exactly one task is required when workdir is specified. Specify the task or use --prefix instead of -d.", file=sys.stderr) sys.exit(1) +# Check there are no files in this dir or any of its subdirs +def check_dirtree_empty_of_files(dir): + list = os.listdir(dir) + if list: + for fn in list: + child_dir = os.path.join(dir, fn) + if os.path.isdir(child_dir) and check_dirtree_empty_of_files(child_dir): + continue + return False + return True + def start_task(taskloop, taskname): sbyconfig, _, _, _ = read_sbyconfig(sbydata, taskname) @@ -446,10 +457,10 @@ def start_task(taskloop, taskname): if reusedir: pass - elif os.path.isdir(my_workdir): + elif os.path.isdir(my_workdir) and not check_dirtree_empty_of_files(my_workdir): print(f"ERROR: Directory '{my_workdir}' already exists, use -f to overwrite the existing directory.") sys.exit(1) - else: + elif not os.path.isdir(my_workdir): os.makedirs(my_workdir) else: diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ae28b6ae..4426f4dd 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -920,10 +920,11 @@ def error(self, logmessage): def makedirs(self, path): if self.reusedir and os.path.isdir(path): rmtree(path, ignore_errors=True) - os.makedirs(path) + if not os.path.isdir(path): + os.makedirs(path) def copy_src(self): - os.makedirs(self.workdir + "/src") + self.makedirs(self.workdir + "/src") for dstfile, lines in self.verbatim_files.items(): dstfile = self.workdir + "/src/" + dstfile From 513d0d42880af41f5aac0848c7008ed3a4ad7644 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 3 May 2023 16:56:09 +0200 Subject: [PATCH 190/220] Change Sphinx theme to "furo" --- docs/source/_templates/page.html | 43 +++++++++++++++++++++ docs/source/conf.py | 35 +++++++++++------ docs/source/requirements.txt | 2 +- docs/static/custom.css | 19 +++++++++- docs/static/yosyshq.css | 64 -------------------------------- 5 files changed, 86 insertions(+), 77 deletions(-) create mode 100644 docs/source/_templates/page.html delete mode 100644 docs/static/yosyshq.css diff --git a/docs/source/_templates/page.html b/docs/source/_templates/page.html new file mode 100644 index 00000000..de334e72 --- /dev/null +++ b/docs/source/_templates/page.html @@ -0,0 +1,43 @@ +{# + +See https://github.com/pradyunsg/furo/blob/main/src/furo/theme/furo/page.html for the original +block this is overwriting. + +The part that is customized is between the "begin of custom part" and "end of custom part" +comments below. It uses the same styles as the existing right sidebar code. + +#} +{% extends "furo/page.html" %} +{% block right_sidebar %} +
+ {# begin of custom part #} +
+ + YosysHQ + +
+ + {# end of custom part #} + {% if not furo_hide_toc %} +
+ + {{ _("On this page") }} + +
+
+
+ {{ toc }} +
+
+ {% endif %} +
+{% endblock %} diff --git a/docs/source/conf.py b/docs/source/conf.py index 1d4cbeb6..d85e198f 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,28 +1,41 @@ #!/usr/bin/env python3 project = 'YosysHQ SBY' author = 'YosysHQ GmbH' -copyright ='2021 YosysHQ GmbH' +copyright = '2023 YosysHQ GmbH' # select HTML theme -html_theme = 'press' + +templates_path = ["_templates"] +html_theme = "furo" html_logo = '../static/logo.png' html_favicon = '../static/favico.png' -html_css_files = ['yosyshq.css', 'custom.css'] -html_sidebars = {'**': ['util/searchbox.html', 'util/sidetoc.html']} +html_css_files = ['custom.css'] # These folders are copied to the documentation's HTML output -html_static_path = ['../static', "../images"] +html_static_path = ['../static'] -# code blocks style +# code blocks style pygments_style = 'colorful' highlight_language = 'systemverilog' html_theme_options = { - 'external_links' : [ - ('YosysHQ Docs', 'https://yosyshq.readthedocs.io'), - ('Blog', 'https://blog.yosyshq.com'), - ('Website', 'https://www.yosyshq.com'), - ], + "sidebar_hide_name": True, + + "light_css_variables": { + "color-brand-primary": "#d6368f", + "color-brand-content": "#4b72b8", + "color-api-name": "#8857a3", + "color-api-pre-name": "#4b72b8", + "color-link": "#8857a3", + }, + + "dark_css_variables": { + "color-brand-primary": "#e488bb", + "color-brand-content": "#98bdff", + "color-api-name": "#8857a3", + "color-api-pre-name": "#4b72b8", + "color-link": "#be95d5", + }, } extensions = ['sphinx.ext.autosectionlabel'] diff --git a/docs/source/requirements.txt b/docs/source/requirements.txt index 954b4546..a95ae18b 100644 --- a/docs/source/requirements.txt +++ b/docs/source/requirements.txt @@ -1 +1 @@ -sphinx-press-theme +furo diff --git a/docs/static/custom.css b/docs/static/custom.css index 40a8c178..b23ce2de 100644 --- a/docs/static/custom.css +++ b/docs/static/custom.css @@ -1 +1,18 @@ -/* empty */ +/* Don't hide the right sidebar as we're placing our fixed links there */ +aside.no-toc { + display: block !important; +} + +/* Colorful headings */ +h1 { + color: var(--color-brand-primary); +} + +h2, h3, h4, h5, h6 { + color: var(--color-brand-content); +} + +/* Use a different color for external links */ +a.external { + color: var(--color-brand-primary) !important; +} diff --git a/docs/static/yosyshq.css b/docs/static/yosyshq.css deleted file mode 100644 index 1ceebe91..00000000 --- a/docs/static/yosyshq.css +++ /dev/null @@ -1,64 +0,0 @@ -h1, h3, p.topic-title, .content li.toctree-l1 > a { - color: #d6368f !important; -} - -h2, p.admonition-title, dt, .content li.toctree-l2 > a { - color: #4b72b8; -} - -a { - color: #8857a3; -} - -a.current, a:hover, a.external { - color: #d6368f !important; -} - -a.external:hover { - text-decoration: underline; -} - -p { - text-align: justify; -} - -.vp-sidebar a { - color: #d6368f; -} - -.vp-sidebar li li a { - color: #4b72b8; -} - -.vp-sidebar li li li a { - color: #2c3e50; - font-weight: 400; -} - -.vp-sidebar h3 { - padding-left: 1.5rem !important; -} - -.vp-sidebar ul a { - padding-left: 1.5rem !important; -} - -.vp-sidebar ul ul a { - padding-left: 3rem !important; -} - -.vp-sidebar ul ul ul a { - padding-left: 4.5rem !important; -} - -.vp-sidebar .toctree-l1.current a { - border-left: 0.5rem solid #6ecbd7; -} - -.vp-sidebar .toctree-l1 a.current { - border-left: 0.5rem solid #8857a3; -} - -.injected .rst-current-version, .injected dt { - color: #6ecbd7 !important; -} From cb968ea2bbe735eaef34245ee9c5c7b431f63ba8 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 8 May 2023 11:33:14 +0200 Subject: [PATCH 191/220] Update .readthedocs.yaml --- .readthedocs.yaml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.readthedocs.yaml b/.readthedocs.yaml index c42f4736..b72a370a 100644 --- a/.readthedocs.yaml +++ b/.readthedocs.yaml @@ -3,6 +3,11 @@ version: 2 +build: + os: ubuntu-22.04 + tools: + python: '3.11' + formats: - pdf From 36e7a72586a185a6c5f381e980a7ffb4b1c33fc6 Mon Sep 17 00:00:00 2001 From: Krystine Sherwin <93062060+KrystalDelusion@users.noreply.github.com> Date: Tue, 16 May 2023 17:42:53 +1200 Subject: [PATCH 192/220] Adding prep mode and skip_prep option Allows for generating a design_prep model which can then be used by multiple distinct sby runs. --- sbysrc/sby_core.py | 54 +++++++++++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 24 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index b27d1a52..bd623177 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -982,28 +982,29 @@ def make_model(self, model_name): with open(f"""{self.workdir}/model/design_prep.ys""", "w") as f: print(f"# running in {self.workdir}/model/", file=f) print(f"""read_ilang design.il""", file=f) - print("scc -select; simplemap; select -clear", file=f) - print("memory_nordff", file=f) - if self.opt_multiclock: - print("clk2fflogic", file=f) - else: - print("async2sync", file=f) - print("chformal -assume -early", file=f) - print("opt_clean", file=f) - print("formalff -setundef -clk2ff -ff2anyinit -hierarchy", file=f) - if self.opt_mode in ["bmc", "prove"]: - print("chformal -live -fair -cover -remove", file=f) - if self.opt_mode == "cover": - print("chformal -live -fair -remove", file=f) - if self.opt_mode == "live": - print("chformal -assert2assume", file=f) - print("chformal -cover -remove", file=f) - print("opt_clean", file=f) - print("check", file=f) # can't detect undriven wires past this point - print("setundef -undriven -anyseq", file=f) - print("opt -fast", file=f) - print("rename -witness", file=f) - print("opt_clean", file=f) + if not self.opt_skip_prep: + print("scc -select; simplemap; select -clear", file=f) + print("memory_nordff", file=f) + if self.opt_multiclock: + print("clk2fflogic", file=f) + else: + print("async2sync", file=f) + print("chformal -assume -early", file=f) + print("opt_clean", file=f) + print("formalff -setundef -clk2ff -ff2anyinit -hierarchy", file=f) + if self.opt_mode in ["bmc", "prove"]: + print("chformal -live -fair -cover -remove", file=f) + if self.opt_mode == "cover": + print("chformal -live -fair -remove", file=f) + if self.opt_mode == "live": + print("chformal -assert2assume", file=f) + print("chformal -cover -remove", file=f) + print("opt_clean", file=f) + print("check", file=f) # can't detect undriven wires past this point + print("setundef -undriven -anyseq", file=f) + print("opt -fast", file=f) + print("rename -witness", file=f) + print("opt_clean", file=f) print(f"""write_rtlil ../model/design_prep.il""", file=f) proc = SbyProc( @@ -1210,7 +1211,7 @@ def handle_non_engine_options(self): self.handle_str_option("mode", None) - if self.opt_mode not in ["bmc", "prove", "cover", "live"]: + if self.opt_mode not in ["bmc", "prove", "cover", "live", "prep"]: self.error(f"Invalid mode: {self.opt_mode}") self.expect = ["PASS"] @@ -1242,6 +1243,7 @@ def handle_non_engine_options(self): self.handle_bool_option("append_assume", True) self.handle_str_option("make_model", None) + self.handle_bool_option("skip_prep", False) def setup_procs(self, setupmode): self.handle_non_engine_options() @@ -1258,7 +1260,7 @@ def setup_procs(self, setupmode): if engine[0] not in ["smtbmc", "btor"]: self.error("Option skip is only valid for smtbmc and btor engines.") - if len(self.engine_list()) == 0: + if len(self.engine_list()) == 0 and self.opt_mode != "prep": self.error("Config file is lacking engine configuration.") if self.reusedir: @@ -1293,6 +1295,10 @@ def setup_procs(self, setupmode): import sby_mode_cover sby_mode_cover.run(self) + elif self.opt_mode == "prep": + self.model("prep") + self.update_status("PASS") + else: assert False From f49c9ebb290728f9a22943d512123e4ca2485088 Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Fri, 2 Jun 2023 13:04:45 +0200 Subject: [PATCH 193/220] Make call to "witness -rename" optional (default=on) Signed-off-by: Claire Xenia Wolf --- sbysrc/sby_core.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index b27d1a52..820f6fbf 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1002,8 +1002,9 @@ def make_model(self, model_name): print("check", file=f) # can't detect undriven wires past this point print("setundef -undriven -anyseq", file=f) print("opt -fast", file=f) - print("rename -witness", file=f) - print("opt_clean", file=f) + if self.opt_witrename: + print("rename -witness", file=f) + print("opt_clean", file=f) print(f"""write_rtlil ../model/design_prep.il""", file=f) proc = SbyProc( @@ -1233,6 +1234,8 @@ def handle_non_engine_options(self): self.handle_bool_option("vcd_sim", False) self.handle_bool_option("fst", False) + self.handle_bool_option("witrename", True) + self.handle_str_option("smtc", None) self.handle_int_option("skip", None) self.handle_str_option("tbtop", None) From 7d60a3ba349db82f401093e767faa428dd653376 Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Sat, 3 Jun 2023 22:16:35 +0200 Subject: [PATCH 194/220] Add aigvmap and aigsyms options Signed-off-by: Claire Xenia Wolf --- sbysrc/sby_core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 820f6fbf..1213fb72 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1136,7 +1136,8 @@ def instance_hierarchy_error_callback(retcode): print("abc -g AND -fast", file=f) print("opt_clean", file=f) print("stat", file=f) - print("write_aiger -I -B -zinit -no-startoffset -map design_aiger.aim -ywmap design_aiger.ywa design_aiger.aig", file=f) + print(f"write_aiger -I -B -zinit -no-startoffset {'-vmap' if self.opt_aigvmap else '-map'} design_aiger.aim" + + f"{' -symbols' if self.opt_aigsyms else ''} -ywmap design_aiger.ywa design_aiger.aig", file=f) proc = SbyProc( self, @@ -1235,6 +1236,8 @@ def handle_non_engine_options(self): self.handle_bool_option("fst", False) self.handle_bool_option("witrename", True) + self.handle_bool_option("aigvmap", False) + self.handle_bool_option("aigsyms", False) self.handle_str_option("smtc", None) self.handle_int_option("skip", None) From 8b3ba688453c5d279e8e7ee75c7261649ba71ebe Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Wed, 7 Jun 2023 22:05:17 +0200 Subject: [PATCH 195/220] Add aigfolds option Signed-off-by: Claire Xenia Wolf --- sbysrc/sby_core.py | 3 ++- sbysrc/sby_engine_abc.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 1213fb72..48a4c2a5 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1154,7 +1154,7 @@ def instance_hierarchy_error_callback(retcode): self, model_name, self.model("aig"), - f"""cd {self.workdir}/model; {self.exe_paths["abc"]} -c 'read_aiger design_aiger.aig; fold; strash; write_aiger design_aiger_fold.aig'""", + f"""cd {self.workdir}/model; {self.exe_paths["abc"]} -c 'read_aiger design_aiger.aig; fold{" -s" if self.opt_aigfolds else ""}; strash; write_aiger design_aiger_fold.aig'""", logfile=open(f"{self.workdir}/model/design_aiger_fold.log", "w") ) proc.checkretcode = True @@ -1236,6 +1236,7 @@ def handle_non_engine_options(self): self.handle_bool_option("fst", False) self.handle_bool_option("witrename", True) + self.handle_bool_option("aigfolds", False) self.handle_bool_option("aigvmap", False) self.handle_bool_option("aigsyms", False) diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 1cb84b50..0f16fe4d 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -66,7 +66,7 @@ def run(mode, task, engine_idx, engine): task, f"engine_{engine_idx}", task.model("aig"), - f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", + f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold{" -s" if task.opt_aigfolds else ""}; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) proc.checkretcode = True From f692eff845693779cb53868b4d58b3c38c0ce0b9 Mon Sep 17 00:00:00 2001 From: Claire Xenia Wolf Date: Wed, 7 Jun 2023 22:21:06 +0200 Subject: [PATCH 196/220] Add support for "abc pdr -d" engine Signed-off-by: Claire Xenia Wolf --- sbysrc/sby_engine_abc.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 0f16fe4d..639a7ffd 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -42,7 +42,7 @@ def run(mode, task, engine_idx, engine): elif abc_command[0] == "pdr": if mode != "prove": task.error("ABC command 'pdr' is only valid in prove mode.") - abc_command[0] += f" -v" + abc_command[0] += f" -v -I engine_{engine_idx}/invariants.pla" else: task.error(f"Invalid ABC command {abc_command[0]}.") @@ -66,7 +66,9 @@ def run(mode, task, engine_idx, engine): task, f"engine_{engine_idx}", task.model("aig"), - f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold{" -s" if task.opt_aigfolds else ""}; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", + f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold{ + " -s" if task.opt_aigfolds or (abc_command[0].startswith("pdr ") and "-d" in abc_command[1:]) else "" + }; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) proc.checkretcode = True From 27e20fd5c31fc1638b9424841197cf24862a4d0a Mon Sep 17 00:00:00 2001 From: Krystine Sherwin <93062060+KrystalDelusion@users.noreply.github.com> Date: Tue, 13 Jun 2023 11:40:28 +1200 Subject: [PATCH 197/220] Add sphinx-argparse to generate usage Move parser generation into a seperate file to avoid import issues with bad python modules during docs gen. With the requirements.txt provided to readthedocs, there shouldn't need to be any other changes? Also I've never been able to run `make test` so I'm not actually sure if the changes break sby, but they shouldn't. --- docs/source/conf.py | 1 + docs/source/index.rst | 1 + docs/source/requirements.txt | 1 + docs/source/usage.rst | 11 +++++ sbysrc/sby.py | 78 ++--------------------------------- sbysrc/sby_cmdline.py | 79 ++++++++++++++++++++++++++++++++++++ 6 files changed, 96 insertions(+), 75 deletions(-) create mode 100644 docs/source/usage.rst create mode 100644 sbysrc/sby_cmdline.py diff --git a/docs/source/conf.py b/docs/source/conf.py index d85e198f..ad037b89 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -39,3 +39,4 @@ } extensions = ['sphinx.ext.autosectionlabel'] +extensions += ['sphinxarg.ext'] diff --git a/docs/source/index.rst b/docs/source/index.rst index fbe43c5f..ab67043e 100644 --- a/docs/source/index.rst +++ b/docs/source/index.rst @@ -16,6 +16,7 @@ formal tasks: install.rst quickstart.rst + usage.rst reference.rst autotune.rst verilog.rst diff --git a/docs/source/requirements.txt b/docs/source/requirements.txt index a95ae18b..0e4756ee 100644 --- a/docs/source/requirements.txt +++ b/docs/source/requirements.txt @@ -1 +1,2 @@ furo +sphinx-argparse diff --git a/docs/source/usage.rst b/docs/source/usage.rst new file mode 100644 index 00000000..5a3bf797 --- /dev/null +++ b/docs/source/usage.rst @@ -0,0 +1,11 @@ +Using `sby` +=========== + +Once SBY is installed and available on the command line as `sby`, either built from source or using +one of the available CAD suites, it can be called as follows. Note that this information is also +available via `sby --help`. For more information on installation, see :ref:`install-doc`. + +.. argparse:: + :filename: ../sbysrc/sby_cmdline.py + :func: parser_func + :prog: sby diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 20ae3452..986fb9d0 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -17,88 +17,16 @@ # OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. # -import argparse, json, os, sys, shutil, tempfile, re +import json, os, sys, shutil, tempfile, re ##yosys-sys-path## +from sby_cmdline import parser_func from sby_core import SbyConfig, SbyTask, SbyAbort, SbyTaskloop, process_filename, dress_message from sby_jobserver import SbyJobClient, process_jobserver_environment import time, platform, click process_jobserver_environment() # needs to be called early -class DictAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string=None): - assert isinstance(getattr(namespace, self.dest), dict), f"Use ArgumentParser.set_defaults() to initialize {self.dest} to dict()" - name = option_string.lstrip(parser.prefix_chars).replace("-", "_") - getattr(namespace, self.dest)[name] = values - -parser = argparse.ArgumentParser(prog="sby", - usage="%(prog)s [options] [.sby [tasknames] | ]") -parser.set_defaults(exe_paths=dict()) - -parser.add_argument("-d", metavar="", dest="workdir", - help="set workdir name. default: or _. When there is more than one task, use --prefix instead") -parser.add_argument("--prefix", metavar="", dest="workdir_prefix", - help="set the workdir name prefix. `_` will be appended to the path for each task") -parser.add_argument("-f", action="store_true", dest="force", - help="remove workdir if it already exists") -parser.add_argument("-b", action="store_true", dest="backup", - help="backup workdir if it already exists") -parser.add_argument("-t", action="store_true", dest="tmpdir", - help="run in a temporary workdir (remove when finished)") -parser.add_argument("-T", metavar="", action="append", dest="tasknames", default=list(), - help="add taskname (useful when sby file is read from stdin)") -parser.add_argument("-E", action="store_true", dest="throw_err", - help="throw an exception (incl stack trace) for most errors") -parser.add_argument("-j", metavar="", type=int, dest="jobcount", - help="maximum number of processes to run in parallel") -parser.add_argument("--sequential", action="store_true", dest="sequential", - help="run tasks in sequence, not in parallel") - -parser.add_argument("--autotune", action="store_true", dest="autotune", - help="automatically find a well performing engine and engine configuration for each task") -parser.add_argument("--autotune-config", dest="autotune_config", - help="read an autotune configuration file (overrides the sby file's autotune options)") - -parser.add_argument("--yosys", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--abc", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--smtbmc", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--witness", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--suprove", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--aigbmc", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--avy", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--btormc", metavar="", - action=DictAction, dest="exe_paths") -parser.add_argument("--pono", metavar="", - action=DictAction, dest="exe_paths", - help="configure which executable to use for the respective tool") -parser.add_argument("--dumpcfg", action="store_true", dest="dump_cfg", - help="print the pre-processed configuration file") -parser.add_argument("--dumptags", action="store_true", dest="dump_tags", - help="print the list of task tags") -parser.add_argument("--dumptasks", action="store_true", dest="dump_tasks", - help="print the list of tasks") -parser.add_argument("--dumpdefaults", action="store_true", dest="dump_defaults", - help="print the list of default tasks") -parser.add_argument("--dumptaskinfo", action="store_true", dest="dump_taskinfo", - help="output a summary of tasks as JSON") -parser.add_argument("--dumpfiles", action="store_true", dest="dump_files", - help="print the list of source files") -parser.add_argument("--setup", action="store_true", dest="setupmode", - help="set up the working directory and exit") - -parser.add_argument("--init-config-file", dest="init_config_file", - help="create a default .sby config file") -parser.add_argument("sbyfile", metavar=".sby | ", nargs="?", - help=".sby file OR directory containing config.sby file") -parser.add_argument("arg_tasknames", metavar="tasknames", nargs="*", - help="tasks to run (only valid when .sby is used)") +parser = parser_func() args = parser.parse_args() diff --git a/sbysrc/sby_cmdline.py b/sbysrc/sby_cmdline.py new file mode 100644 index 00000000..a75c2734 --- /dev/null +++ b/sbysrc/sby_cmdline.py @@ -0,0 +1,79 @@ +import argparse + +class DictAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string=None): + assert isinstance(getattr(namespace, self.dest), dict), f"Use ArgumentParser.set_defaults() to initialize {self.dest} to dict()" + name = option_string.lstrip(parser.prefix_chars).replace("-", "_") + getattr(namespace, self.dest)[name] = values + +def parser_func(): + parser = argparse.ArgumentParser(prog="sby", + usage="%(prog)s [options] [.sby [tasknames] | ]") + parser.set_defaults(exe_paths=dict()) + + parser.add_argument("-d", metavar="", dest="workdir", + help="set workdir name. default: or _. When there is more than one task, use --prefix instead") + parser.add_argument("--prefix", metavar="", dest="workdir_prefix", + help="set the workdir name prefix. `_` will be appended to the path for each task") + parser.add_argument("-f", action="store_true", dest="force", + help="remove workdir if it already exists") + parser.add_argument("-b", action="store_true", dest="backup", + help="backup workdir if it already exists") + parser.add_argument("-t", action="store_true", dest="tmpdir", + help="run in a temporary workdir (remove when finished)") + parser.add_argument("-T", metavar="", action="append", dest="tasknames", default=list(), + help="add taskname (useful when sby file is read from stdin)") + parser.add_argument("-E", action="store_true", dest="throw_err", + help="throw an exception (incl stack trace) for most errors") + parser.add_argument("-j", metavar="", type=int, dest="jobcount", + help="maximum number of processes to run in parallel") + parser.add_argument("--sequential", action="store_true", dest="sequential", + help="run tasks in sequence, not in parallel") + + parser.add_argument("--autotune", action="store_true", dest="autotune", + help="automatically find a well performing engine and engine configuration for each task") + parser.add_argument("--autotune-config", dest="autotune_config", + help="read an autotune configuration file (overrides the sby file's autotune options)") + + parser.add_argument("--yosys", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--abc", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--smtbmc", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--witness", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--suprove", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--aigbmc", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--avy", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--btormc", metavar="", + action=DictAction, dest="exe_paths") + parser.add_argument("--pono", metavar="", + action=DictAction, dest="exe_paths", + help="configure which executable to use for the respective tool") + parser.add_argument("--dumpcfg", action="store_true", dest="dump_cfg", + help="print the pre-processed configuration file") + parser.add_argument("--dumptags", action="store_true", dest="dump_tags", + help="print the list of task tags") + parser.add_argument("--dumptasks", action="store_true", dest="dump_tasks", + help="print the list of tasks") + parser.add_argument("--dumpdefaults", action="store_true", dest="dump_defaults", + help="print the list of default tasks") + parser.add_argument("--dumptaskinfo", action="store_true", dest="dump_taskinfo", + help="output a summary of tasks as JSON") + parser.add_argument("--dumpfiles", action="store_true", dest="dump_files", + help="print the list of source files") + parser.add_argument("--setup", action="store_true", dest="setupmode", + help="set up the working directory and exit") + + parser.add_argument("--init-config-file", dest="init_config_file", + help="create a default .sby config file") + parser.add_argument("sbyfile", metavar=".sby | ", nargs="?", + help=".sby file OR directory containing config.sby file") + parser.add_argument("arg_tasknames", metavar="tasknames", nargs="*", + help="tasks to run (only valid when .sby is used)") + + return parser From 0d6a70e1376a2735835e50819fabcbc7304f6771 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 19 Jun 2023 11:21:49 +0200 Subject: [PATCH 198/220] autotune: Fix crash on no-engine error path --- sbysrc/sby_autotune.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_autotune.py b/sbysrc/sby_autotune.py index 771a9a01..1d6f1020 100644 --- a/sbysrc/sby_autotune.py +++ b/sbysrc/sby_autotune.py @@ -402,7 +402,7 @@ def run(self): self.build_candidates() if not self.active_candidates: - self.error("no supported engines found for the current configuration and design") + self.task.error("no supported engines found for the current configuration and design") self.log(f"testing {len(self.active_candidates)} engine configurations...") self.start_engines() From 1a4c2a57ad44ba64c048f544cbff9f9833cafa97 Mon Sep 17 00:00:00 2001 From: Krystine Sherwin <93062060+KrystalDelusion@users.noreply.github.com> Date: Mon, 19 Jun 2023 21:39:07 +1200 Subject: [PATCH 199/220] Add sbysrc to path during docs build --- docs/source/conf.py | 6 ++++++ docs/source/usage.rst | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index ad037b89..ebebcce6 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,10 @@ #!/usr/bin/env python3 +import os +import sys + +sys.path += [os.path.join(os.path.dirname(__file__), + "..", "..", "sbysrc")] + project = 'YosysHQ SBY' author = 'YosysHQ GmbH' copyright = '2023 YosysHQ GmbH' diff --git a/docs/source/usage.rst b/docs/source/usage.rst index 5a3bf797..bc8e4e98 100644 --- a/docs/source/usage.rst +++ b/docs/source/usage.rst @@ -6,6 +6,6 @@ one of the available CAD suites, it can be called as follows. Note that this in available via `sby --help`. For more information on installation, see :ref:`install-doc`. .. argparse:: - :filename: ../sbysrc/sby_cmdline.py + :module: sby_cmdline :func: parser_func :prog: sby From c52acf2a1fb4ac863d664f5b5f6e7cdbd4c5f7dc Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 19 Jun 2023 11:39:37 +0200 Subject: [PATCH 200/220] docs: Make sphinx-argparse work independently of sphinx's cwd --- docs/source/conf.py | 5 +++++ docs/source/usage.rst | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/source/conf.py b/docs/source/conf.py index ad037b89..81d249c2 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,7 @@ #!/usr/bin/env python3 +import sys +import os + project = 'YosysHQ SBY' author = 'YosysHQ GmbH' copyright = '2023 YosysHQ GmbH' @@ -40,3 +43,5 @@ extensions = ['sphinx.ext.autosectionlabel'] extensions += ['sphinxarg.ext'] + +sys.path.append(os.path.abspath(f"{__file__}/../../../sbysrc")) diff --git a/docs/source/usage.rst b/docs/source/usage.rst index 5a3bf797..bc8e4e98 100644 --- a/docs/source/usage.rst +++ b/docs/source/usage.rst @@ -6,6 +6,6 @@ one of the available CAD suites, it can be called as follows. Note that this in available via `sby --help`. For more information on installation, see :ref:`install-doc`. .. argparse:: - :filename: ../sbysrc/sby_cmdline.py + :module: sby_cmdline :func: parser_func :prog: sby From 28c053bd948bf74bb77a92a303e2f59a2cd80ebc Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 23 Jun 2023 10:31:17 +0200 Subject: [PATCH 201/220] smtbmc: Allow using --keep-going in cover mode See YosysHQ/yosys#3816 for the smtbmc change that made --keep-going do something in cover mode --- sbysrc/sby_engine_smtbmc.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index c5f348e3..7558c094 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -69,8 +69,6 @@ def run(mode, task, engine_idx, engine): task.error("smtbmc options --basecase and --induction are exclusive.") induction_only = True elif o == "--keep-going": - if mode not in ("bmc", "prove", "prove_basecase", "prove_induction"): - task.error("smtbmc option --keep-going is only supported in bmc and prove mode.") keep_going = True elif o == "--seed": random_seed = a @@ -134,7 +132,8 @@ def run(mode, task, engine_idx, engine): if keep_going and mode != "prove_induction": smtbmc_opts.append("--keep-going") - trace_prefix += "%" + if mode != "cover": + trace_prefix += "%" if dumpsmt2: smtbmc_opts += ["--dump-smt2", trace_prefix.replace("%", "") + ".smt2"] From edbc0548afbd12fcd16d7039f80071df72b33c42 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 17 Jul 2023 15:05:54 +0200 Subject: [PATCH 202/220] Fix deadlock with parallel SBY procs each with parallel tasks When multiple SBY processes run in parallel (from a Makefile or other job-server aware tool) and each SBY process runs tasks in parallel, each with enough tasks to be limited by the total job count, it is possible for the processes to race in such a way that every SBY process's helper process is in a blocking read from the job-server but a job-token would only become available as soon as any SBY process exits. In that situation SBY doesn't actually need the job-token anymore and only previously requested it as there was opportunity for parallelism. It would immediatly return the token as soon as it is acquired. That's usually sufficient to deal with no-longer-needed-but-requested tokens, but when SBY is done, it needs to return the job-token held by the parent process ASAP which it can only do by actually exiting, so we need to interrupt the blocking read of SBY's helper process. This could be done by sending a signal to the helper process, except that Python made the decision in 3.5 to have automatic EINTR retry loops around most system calls with no opt-out. That was part of the reason to go with this specifc helper process design that avoids interrupting a blocking read in the first place. Using an exception raised from the signal handler instead might lose a token when the signal arrives after the read returns, but before the token is stored in a variable. You cannot recover from a lost token in the context of the job-server protocol, so that's not an option. (This can't happen with recent Python versions but that would depend on undocumented behavior that could plausibly change again.) Thankfully the only case where we need to interrupt the read is when SBY is about to exit and will not request any further tokens. This allows us to use a signal handler that uses dup2 to close and replace the read-from fd with one that already is at EOF, making the next retry return immediatly. (If we'd need to interrupt a read and continue running we could also do this but the fd shuffling would be more involved.) --- sbysrc/sby_jobserver.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/sbysrc/sby_jobserver.py b/sbysrc/sby_jobserver.py index a3501335..57d751db 100644 --- a/sbysrc/sby_jobserver.py +++ b/sbysrc/sby_jobserver.py @@ -80,7 +80,16 @@ def process_jobserver_environment(): def jobserver_helper(jobserver_read_fd, jobserver_write_fd, request_fd, response_fd): """Helper process to handle blocking jobserver pipes.""" + def handle_sigusr1(*args): + # Since Python doesn't allow user code to handle EINTR anymore, we replace the + # jobserver fd with an fd at EOF to interrupt a blocking read in a way that + # cannot lose any read data + r, w = os.pipe() + os.close(w) + os.dup2(r, jobserver_read_fd) + os.close(r) signal.signal(signal.SIGINT, signal.SIG_IGN) + signal.signal(signal.SIGUSR1, handle_sigusr1) pending = 0 while True: try: @@ -110,6 +119,8 @@ def jobserver_helper(jobserver_read_fd, jobserver_write_fd, request_fd, response except BlockingIOError: select.select([jobserver_read_fd], [], []) continue + if not token: + break pending -= 1 @@ -240,6 +251,10 @@ def atexit_blocking(self): # Closing the request pipe singals the helper that we want to exit os.close(self.request_write_fd) + # Additionally we send a signal to interrupt a blocking read within the + # helper + self.helper_process.send_signal(signal.SIGUSR1) + # The helper might have been in the process of sending us some tokens, which # we still need to return while True: From 884ef862cbc0f34799cd6a6152f7dd384518d9bd Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 11 Aug 2023 15:58:55 +0200 Subject: [PATCH 203/220] assume_early option to implement cross assumes in IVY Checking IVY's cross assumes requires delaying a subset of assumptions, which we don't want SBY to undo. --- sbysrc/sby_core.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ecd901ce..8d2fc764 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -989,7 +989,8 @@ def make_model(self, model_name): print("clk2fflogic", file=f) else: print("async2sync", file=f) - print("chformal -assume -early", file=f) + if self.opt_assume_early: + print("chformal -assume -early", file=f) print("opt_clean", file=f) print("formalff -setundef -clk2ff -ff2anyinit -hierarchy", file=f) if self.opt_mode in ["bmc", "prove"]: @@ -1252,6 +1253,8 @@ def handle_non_engine_options(self): self.handle_str_option("make_model", None) self.handle_bool_option("skip_prep", False) + self.handle_bool_option("assume_early", True) + def setup_procs(self, setupmode): self.handle_non_engine_options() if self.opt_smtc is not None: From 7415abfcfa8bf14f024f28e61e62f23ccd892415 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miodrag=20Milanovi=C4=87?= Date: Fri, 8 Sep 2023 14:09:29 +0200 Subject: [PATCH 204/220] Create codeql.yml --- .github/workflows/codeql.yml | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 .github/workflows/codeql.yml diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml new file mode 100644 index 00000000..76d61afc --- /dev/null +++ b/.github/workflows/codeql.yml @@ -0,0 +1,28 @@ +name: "CodeQL" + +on: + workflow_dispatch: + schedule: + - cron: '0 2 * * *' + +jobs: + analyze: + name: Analyze + runs-on: ubuntu-latest + permissions: + actions: read + contents: read + security-events: write + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + + - name: Initialize CodeQL + uses: github/codeql-action/init@v2 + with: + languages: python + queries: security-extended,security-and-quality + + - name: Perform CodeQL Analysis + uses: github/codeql-action/analyze@v2 From 36f84b8b9ff688ff1b7a67ad13989557a09c7ed7 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 28 Sep 2023 17:38:15 +0200 Subject: [PATCH 205/220] smtbmc: Use new -noinitstate option when simulating inductive cex This requires YosysHQ/yosys#3962 --- sbysrc/sby_engine_smtbmc.py | 2 +- sbysrc/sby_sim.py | 9 ++++++--- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 7558c094..7e155899 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -196,7 +196,7 @@ def output_callback(line): nonlocal procs_running if pending_sim: - sim_proc = sim_witness_trace(procname, task, engine_idx, pending_sim, append=sim_append) + sim_proc = sim_witness_trace(procname, task, engine_idx, pending_sim, append=sim_append, inductive=mode == "prove_induction") sim_proc.register_exit_callback(simple_exit_callback) procs_running += 1 pending_sim = None diff --git a/sbysrc/sby_sim.py b/sbysrc/sby_sim.py index 0025ed8e..46584075 100644 --- a/sbysrc/sby_sim.py +++ b/sbysrc/sby_sim.py @@ -20,7 +20,7 @@ from sby_core import SbyProc from sby_design import pretty_path -def sim_witness_trace(prefix, task, engine_idx, witness_file, *, append, deps=()): +def sim_witness_trace(prefix, task, engine_idx, witness_file, *, append, inductive=False, deps=()): trace_name = os.path.basename(witness_file)[:-3] formats = [] tracefile = None @@ -40,8 +40,11 @@ def sim_witness_trace(prefix, task, engine_idx, witness_file, *, append, deps=() with open(f"{task.workdir}/engine_{engine_idx}/{trace_name}.ys", "w") as f: print(f"# running in {task.workdir}/engine_{engine_idx}/", file=f) - print(f"read_rtlil ../model/design_prep.il", file=f) - print(f"sim -hdlname -summary {trace_name}.json -append {append} -r {trace_name}.yw {' '.join(formats)}", file=f) + print("read_rtlil ../model/design_prep.il", file=f) + sim_args = "" + if inductive: + sim_args += " -noinitstate" + print(f"sim -hdlname -summary {trace_name}.json -append {append}{sim_args} -r {trace_name}.yw {' '.join(formats)}", file=f) def exit_callback(retval): From 8581bd317111f359a7845b99a930cb83fea0b74d Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 28 Sep 2023 18:57:37 +0200 Subject: [PATCH 206/220] Add dft/data_diode example This requires YosysHQ/yosys#3961 and YosysHQ/sby#249 to work --- docs/examples/dft/data_diode.sby | 49 ++++++ docs/examples/dft/data_diode.sv | 278 +++++++++++++++++++++++++++++++ 2 files changed, 327 insertions(+) create mode 100644 docs/examples/dft/data_diode.sby create mode 100644 docs/examples/dft/data_diode.sv diff --git a/docs/examples/dft/data_diode.sby b/docs/examples/dft/data_diode.sby new file mode 100644 index 00000000..32092d2c --- /dev/null +++ b/docs/examples/dft/data_diode.sby @@ -0,0 +1,49 @@ +[tasks] +diode +direct + +[options] +mode prove + +diode: expect pass +direct: expect fail + +fst on + +[engines] +smtbmc + +[script] +diode: read -define USE_DIODE + +verific -sv data_diode.sv + +hierarchy -top top + +# $overwrite_tag currently requires these two passes directly after importing +# the design. Otherwise the target signals of $overwrite_tag cannot be properly +# resolved nor can `dft_tag -overwrite-only` itself detect this situation to +# report it as an error. +flatten +dft_tag -overwrite-only + +# Then the design can be prepared as usual +prep + +# And finally the tagging logic can be resolved, which requires converting all +# FFs into simple D-FFs. Here, if this isn't done `dft_tag` will produce +# warnings and tell you to run the required passes. +async2sync +dffunmap +dft_tag -tag-public + +# The `Unhandled cell` warnings produced by `dft_tag` mean that there is no +# bit-precise tag propagation model for the listed cell. The cell in question +# will still propagate tags, but `dft_tag` will use a generic model that +# assumes all inputs can propagate to all outputs independent of the value of +# other inputs on the same cell. For built-in logic cells this is a sound +# over-approximation, but may produce more false-positives than a bit-precise +# approximation would. + +[files] +data_diode.sv diff --git a/docs/examples/dft/data_diode.sv b/docs/examples/dft/data_diode.sv new file mode 100644 index 00000000..fa053a21 --- /dev/null +++ b/docs/examples/dft/data_diode.sv @@ -0,0 +1,278 @@ +// Simple sync FIFO implementation using an extra bit in the read and write +// pointers to distinguish the completely full and completely empty case. +module fifo #( + DEPTH_BITS = 4, + WIDTH = 8 +) ( + input wire clk, + input wire rst, + + input wire in_valid, + input wire [WIDTH-1:0] in_data, + output reg in_ready, + + output reg out_valid, + output reg [WIDTH-1:0] out_data, + input wire out_ready +); + + reg [WIDTH-1:0] buffer [1< rst); +endchecker + +bind top initial_reset initial_reset(clk, rst); From 040b8deef2aecb0faa09ef8f5764d7c3cd0aefd8 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Thu, 16 Nov 2023 13:46:25 +0100 Subject: [PATCH 207/220] Add aigcxemin and cexenum.py tools --- tools/README.md | 6 + tools/aigcexmin/.gitignore | 1 + tools/aigcexmin/Cargo.lock | 562 +++++++++++++++++++++++ tools/aigcexmin/Cargo.toml | 17 + tools/aigcexmin/src/aig_eval.rs | 104 +++++ tools/aigcexmin/src/care_graph.rs | 730 ++++++++++++++++++++++++++++++ tools/aigcexmin/src/main.rs | 145 ++++++ tools/aigcexmin/src/util.rs | 25 + tools/cexenum/cexenum.py | 584 ++++++++++++++++++++++++ tools/cexenum/examples/.gitignore | 0 tools/cexenum/examples/factor.sby | 49 ++ 11 files changed, 2223 insertions(+) create mode 100644 tools/README.md create mode 100644 tools/aigcexmin/.gitignore create mode 100644 tools/aigcexmin/Cargo.lock create mode 100644 tools/aigcexmin/Cargo.toml create mode 100644 tools/aigcexmin/src/aig_eval.rs create mode 100644 tools/aigcexmin/src/care_graph.rs create mode 100644 tools/aigcexmin/src/main.rs create mode 100644 tools/aigcexmin/src/util.rs create mode 100755 tools/cexenum/cexenum.py create mode 100644 tools/cexenum/examples/.gitignore create mode 100644 tools/cexenum/examples/factor.sby diff --git a/tools/README.md b/tools/README.md new file mode 100644 index 00000000..1036f1c9 --- /dev/null +++ b/tools/README.md @@ -0,0 +1,6 @@ +# SBY - Additional Tools + +This directory contains various tools that can be used in conjunction with SBY. + +* [`aigcexmin`](./aigcexmin) Counter-example minimization of AIGER witness (.aiw) files +* [`cexenum`](./cexenum) Enumeration of minimized counter-examples diff --git a/tools/aigcexmin/.gitignore b/tools/aigcexmin/.gitignore new file mode 100644 index 00000000..ea8c4bf7 --- /dev/null +++ b/tools/aigcexmin/.gitignore @@ -0,0 +1 @@ +/target diff --git a/tools/aigcexmin/Cargo.lock b/tools/aigcexmin/Cargo.lock new file mode 100644 index 00000000..828358bb --- /dev/null +++ b/tools/aigcexmin/Cargo.lock @@ -0,0 +1,562 @@ +# This file is automatically @generated by Cargo. +# It is not intended for manual editing. +version = 3 + +[[package]] +name = "addr2line" +version = "0.21.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb" +dependencies = [ + "gimli", +] + +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + +[[package]] +name = "aigcexmin" +version = "0.1.0" +dependencies = [ + "clap", + "color-eyre", + "flussab", + "flussab-aiger", + "zwohash", +] + +[[package]] +name = "anstream" +version = "0.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2ab91ebe16eb252986481c5b62f6098f3b698a45e34b5b98200cf20dd2484a44" +dependencies = [ + "anstyle", + "anstyle-parse", + "anstyle-query", + "anstyle-wincon", + "colorchoice", + "utf8parse", +] + +[[package]] +name = "anstyle" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7079075b41f533b8c61d2a4d073c4676e1f8b249ff94a393b0595db304e0dd87" + +[[package]] +name = "anstyle-parse" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "317b9a89c1868f5ea6ff1d9539a69f45dffc21ce321ac1fd1160dfa48c8e2140" +dependencies = [ + "utf8parse", +] + +[[package]] +name = "anstyle-query" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca11d4be1bab0c8bc8734a9aa7bf4ee8316d462a08c6ac5052f888fef5b494b" +dependencies = [ + "windows-sys", +] + +[[package]] +name = "anstyle-wincon" +version = "3.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0699d10d2f4d628a98ee7b57b289abbc98ff3bad977cb3152709d4bf2330628" +dependencies = [ + "anstyle", + "windows-sys", +] + +[[package]] +name = "autocfg" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" + +[[package]] +name = "backtrace" +version = "0.3.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +dependencies = [ + "addr2line", + "cc", + "cfg-if", + "libc", + "miniz_oxide", + "object", + "rustc-demangle", +] + +[[package]] +name = "bitflags" +version = "2.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07" + +[[package]] +name = "cc" +version = "1.0.83" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0" +dependencies = [ + "libc", +] + +[[package]] +name = "cfg-if" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" + +[[package]] +name = "clap" +version = "4.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2275f18819641850fa26c89acc84d465c1bf91ce57bc2748b28c420473352f64" +dependencies = [ + "clap_builder", + "clap_derive", +] + +[[package]] +name = "clap_builder" +version = "4.4.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07cdf1b148b25c1e1f7a42225e30a0d99a615cd4637eae7365548dd4529b95bc" +dependencies = [ + "anstream", + "anstyle", + "clap_lex", + "strsim", + "terminal_size", +] + +[[package]] +name = "clap_derive" +version = "4.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cf9804afaaf59a91e75b022a30fb7229a7901f60c755489cc61c9b423b836442" +dependencies = [ + "heck", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "702fc72eb24e5a1e48ce58027a675bc24edd52096d5397d4aea7c6dd9eca0bd1" + +[[package]] +name = "color-eyre" +version = "0.6.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204" +dependencies = [ + "backtrace", + "color-spantrace", + "eyre", + "indenter", + "once_cell", + "owo-colors", + "tracing-error", +] + +[[package]] +name = "color-spantrace" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce" +dependencies = [ + "once_cell", + "owo-colors", + "tracing-core", + "tracing-error", +] + +[[package]] +name = "colorchoice" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "acbf1af155f9b9ef647e42cdc158db4b64a1b61f743629225fde6f3e0be2a7c7" + +[[package]] +name = "errno" +version = "0.3.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7c18ee0ed65a5f1f81cac6b1d213b69c35fa47d4252ad41f1486dbd8226fe36e" +dependencies = [ + "libc", + "windows-sys", +] + +[[package]] +name = "eyre" +version = "0.6.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb" +dependencies = [ + "indenter", + "once_cell", +] + +[[package]] +name = "flussab" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fcd46d8f41aa1e4d79ba21282dd39a9c539d610ab336fc56a48dccdd7c82b12f" +dependencies = [ + "itoap", + "num-traits", +] + +[[package]] +name = "flussab-aiger" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "378b3a9970d0163162e8b3c9a4d9b2eef98be95d624cbac5b207278b157886d2" +dependencies = [ + "flussab", + "num-traits", + "thiserror", + "zwohash", +] + +[[package]] +name = "gimli" +version = "0.28.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0" + +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + +[[package]] +name = "indenter" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683" + +[[package]] +name = "itoap" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9028f49264629065d057f340a86acb84867925865f73bbf8d47b4d149a7e88b8" + +[[package]] +name = "lazy_static" +version = "1.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" + +[[package]] +name = "libc" +version = "0.2.150" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "89d92a4743f9a61002fae18374ed11e7973f530cb3a3255fb354818118b2203c" + +[[package]] +name = "linux-raw-sys" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "969488b55f8ac402214f3f5fd243ebb7206cf82de60d3172994707a4bcc2b829" + +[[package]] +name = "memchr" +version = "2.6.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167" + +[[package]] +name = "miniz_oxide" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7" +dependencies = [ + "adler", +] + +[[package]] +name = "num-traits" +version = "0.2.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c" +dependencies = [ + "autocfg", +] + +[[package]] +name = "object" +version = "0.32.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0" +dependencies = [ + "memchr", +] + +[[package]] +name = "once_cell" +version = "1.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d" + +[[package]] +name = "owo-colors" +version = "3.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f" + +[[package]] +name = "pin-project-lite" +version = "0.2.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" + +[[package]] +name = "proc-macro2" +version = "1.0.69" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da" +dependencies = [ + "unicode-ident", +] + +[[package]] +name = "quote" +version = "1.0.33" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae" +dependencies = [ + "proc-macro2", +] + +[[package]] +name = "rustc-demangle" +version = "0.1.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76" + +[[package]] +name = "rustix" +version = "0.38.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad981d6c340a49cdc40a1028d9c6084ec7e9fa33fcb839cab656a267071e234" +dependencies = [ + "bitflags", + "errno", + "libc", + "linux-raw-sys", + "windows-sys", +] + +[[package]] +name = "sharded-slab" +version = "0.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6" +dependencies = [ + "lazy_static", +] + +[[package]] +name = "strsim" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73473c0e59e6d5812c5dfe2a064a6444949f089e20eec9a2e5506596494e4623" + +[[package]] +name = "syn" +version = "2.0.39" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "23e78b90f2fcf45d3e842032ce32e3f2d1545ba6636271dcbf24fa306d87be7a" +dependencies = [ + "proc-macro2", + "quote", + "unicode-ident", +] + +[[package]] +name = "terminal_size" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "21bebf2b7c9e0a515f6e0f8c51dc0f8e4696391e6f1ff30379559f8365fb0df7" +dependencies = [ + "rustix", + "windows-sys", +] + +[[package]] +name = "thiserror" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9a7210f5c9a7156bb50aa36aed4c95afb51df0df00713949448cf9e97d382d2" +dependencies = [ + "thiserror-impl", +] + +[[package]] +name = "thiserror-impl" +version = "1.0.50" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "266b2e40bc00e5a6c09c3584011e08b06f123c00362c92b975ba9843aaaa14b8" +dependencies = [ + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "thread_local" +version = "1.1.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152" +dependencies = [ + "cfg-if", + "once_cell", +] + +[[package]] +name = "tracing" +version = "0.1.40" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef" +dependencies = [ + "pin-project-lite", + "tracing-core", +] + +[[package]] +name = "tracing-core" +version = "0.1.32" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54" +dependencies = [ + "once_cell", + "valuable", +] + +[[package]] +name = "tracing-error" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e" +dependencies = [ + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "tracing-subscriber" +version = "0.3.18" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ad0f048c97dbd9faa9b7df56362b8ebcaa52adb06b498c050d2f4e32f90a7a8b" +dependencies = [ + "sharded-slab", + "thread_local", + "tracing-core", +] + +[[package]] +name = "unicode-ident" +version = "1.0.12" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b" + +[[package]] +name = "utf8parse" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "711b9620af191e0cdc7468a8d14e709c3dcdb115b36f838e601583af800a370a" + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + +[[package]] +name = "windows-sys" +version = "0.48.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "677d2418bec65e3338edb076e806bc1ec15693c5d0104683f2efe857f61056a9" +dependencies = [ + "windows-targets", +] + +[[package]] +name = "windows-targets" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a2fa6e2155d7247be68c096456083145c183cbbbc2764150dda45a87197940c" +dependencies = [ + "windows_aarch64_gnullvm", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_gnullvm", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" + +[[package]] +name = "windows_aarch64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" + +[[package]] +name = "windows_i686_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" + +[[package]] +name = "windows_i686_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" + +[[package]] +name = "windows_x86_64_gnullvm" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.48.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" + +[[package]] +name = "zwohash" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "beaf63e0740cea93ca85de39611a8bc8262a50adacd6321cd209a123676d0447" diff --git a/tools/aigcexmin/Cargo.toml b/tools/aigcexmin/Cargo.toml new file mode 100644 index 00000000..7a08f347 --- /dev/null +++ b/tools/aigcexmin/Cargo.toml @@ -0,0 +1,17 @@ +[package] +name = "aigcexmin" +version = "0.1.0" +edition = "2021" +authors = ["Jannis Harder "] + +# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html + +[profile.release] +debug = true # profiling + +[dependencies] +clap = { version = "4.4.8", features = ["derive", "cargo", "wrap_help"] } +color-eyre = "0.6.2" +flussab = "0.3.1" +flussab-aiger = "0.1.0" +zwohash = "0.1.2" diff --git a/tools/aigcexmin/src/aig_eval.rs b/tools/aigcexmin/src/aig_eval.rs new file mode 100644 index 00000000..7e5543c6 --- /dev/null +++ b/tools/aigcexmin/src/aig_eval.rs @@ -0,0 +1,104 @@ +use flussab_aiger::{aig::OrderedAig, Lit}; + +use crate::util::unpack_lit; + +pub trait AigValue: Copy { + fn invert_if(self, en: bool, ctx: &mut Context) -> Self; + fn and(self, other: Self, ctx: &mut Context) -> Self; + fn constant(value: bool, ctx: &mut Context) -> Self; +} + +pub fn initial_frame( + aig: &OrderedAig, + state: &mut Vec, + mut latch_init: impl FnMut(usize, &mut Context) -> V, + mut input: impl FnMut(usize, &mut Context) -> V, + ctx: &mut Context, +) where + L: Lit, + V: AigValue, +{ + state.clear(); + state.push(V::constant(false, ctx)); + + for i in 0..aig.input_count { + state.push(input(i, ctx)); + } + + for i in 0..aig.latches.len() { + state.push(latch_init(i, ctx)); + } + + for and_gate in aig.and_gates.iter() { + let [a, b] = and_gate.inputs.map(|lit| { + let (var, polarity) = unpack_lit(lit); + state[var].invert_if(polarity, ctx) + }); + + state.push(a.and(b, ctx)); + } +} + +pub fn successor_frame( + aig: &OrderedAig, + state: &mut Vec, + mut input: impl FnMut(usize, &mut Context) -> V, + ctx: &mut Context, +) where + L: Lit, + V: AigValue, +{ + assert_eq!(state.len(), 1 + aig.max_var_index); + + for i in 0..aig.input_count { + state.push(input(i, ctx)); + } + + for latch in aig.latches.iter() { + let (var, polarity) = unpack_lit(latch.next_state); + state.push(state[var].invert_if(polarity, ctx)); + } + + state.drain(1..1 + aig.max_var_index); + + for and_gate in aig.and_gates.iter() { + let [a, b] = and_gate.inputs.map(|lit| { + let (var, polarity) = unpack_lit(lit); + state[var].invert_if(polarity, ctx) + }); + + state.push(a.and(b, ctx)); + } +} + +impl AigValue<()> for bool { + fn invert_if(self, en: bool, _ctx: &mut ()) -> Self { + self ^ en + } + + fn and(self, other: Self, _ctx: &mut ()) -> Self { + self & other + } + + fn constant(value: bool, _ctx: &mut ()) -> Self { + value + } +} + +impl AigValue<()> for Option { + fn invert_if(self, en: bool, _ctx: &mut ()) -> Self { + self.map(|b| b ^ en) + } + + fn and(self, other: Self, _ctx: &mut ()) -> Self { + match (self, other) { + (Some(true), Some(true)) => Some(true), + (Some(false), _) | (_, Some(false)) => Some(false), + _ => None, + } + } + + fn constant(value: bool, _ctx: &mut ()) -> Self { + Some(value) + } +} diff --git a/tools/aigcexmin/src/care_graph.rs b/tools/aigcexmin/src/care_graph.rs new file mode 100644 index 00000000..0f608fe2 --- /dev/null +++ b/tools/aigcexmin/src/care_graph.rs @@ -0,0 +1,730 @@ +use std::{ + cmp::Reverse, + collections::{BTreeSet, BinaryHeap}, + mem::{replace, take}, + num::NonZeroU32, +}; + +use color_eyre::eyre::bail; +use flussab::DeferredWriter; +use flussab_aiger::{aig::OrderedAig, Lit}; +use zwohash::HashMap; + +use crate::{ + aig_eval::{initial_frame, successor_frame, AigValue}, + util::{unpack_lit, write_output_bit}, +}; + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +#[repr(transparent)] +pub struct NodeRef { + code: Reverse, +} + +impl std::fmt::Debug for NodeRef { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_tuple("NodeRef::new").field(&self.index()).finish() + } +} + +impl NodeRef { + const INVALID_INDEX: usize = u32::MAX as usize; + const TRUE_INDEX: usize = Self::INVALID_INDEX - 1; + const FALSE_INDEX: usize = Self::INVALID_INDEX - 2; + + pub const TRUE: Self = Self::new(Self::TRUE_INDEX); + pub const FALSE: Self = Self::new(Self::FALSE_INDEX); + + pub const fn new(index: usize) -> Self { + assert!(index < u32::MAX as usize); + let Some(code) = NonZeroU32::new(!(index as u32)) else { + unreachable!(); + }; + Self { + code: Reverse(code), + } + } + + pub fn index(self) -> usize { + !(self.code.0.get()) as usize + } +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +enum Gate { + And, + Or, +} + +#[derive(Debug)] +enum NodeDef { + Gate([NodeRef; 2]), + Input(u32), +} + +impl NodeDef { + fn and(inputs: [NodeRef; 2]) -> Self { + assert!(inputs[0] < inputs[1]); + Self::Gate(inputs) + } + + fn or(inputs: [NodeRef; 2]) -> Self { + assert!(inputs[0] < inputs[1]); + Self::Gate([inputs[1], inputs[0]]) + } + + fn input(id: u32) -> Self { + Self::Input(id) + } + + fn as_gate(&self) -> Result<(Gate, [NodeRef; 2]), u32> { + match *self { + NodeDef::Gate(inputs) => { + if inputs[0] < inputs[1] { + Ok((Gate::And, inputs)) + } else { + Ok((Gate::Or, [inputs[1], inputs[0]])) + } + } + NodeDef::Input(input) => Err(input), + } + } +} + +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Default, Debug)] +enum NodeState { + #[default] + Unknown, + Nonselected, + Selected, + Required, +} + +#[derive(Debug)] +struct Node { + def: NodeDef, + priority: u32, + state: NodeState, + renamed: Option, +} + +impl Node { + fn update_state(&mut self, state: NodeState) -> NodeState { + let old_state = self.state; + self.state = self.state.max(state); + old_state + } +} + +#[derive(Default)] +pub struct AndOrGraph { + find_input: HashMap, + find_and: HashMap<[NodeRef; 2], NodeRef>, + find_or: HashMap<[NodeRef; 2], NodeRef>, + + // find_renamed: HashMap, + nodes: Vec, + queue: BinaryHeap, + stack: Vec, + + unknown_inputs: BTreeSet, + required_inputs: BTreeSet, + active_node_count: usize, + + input_order: Vec<(NodeRef, u32)>, + cache: bool, +} + +impl AndOrGraph { + pub fn input(&mut self, id: u32) -> NodeRef { + assert!(id <= u32::MAX - 2); + *self.find_input.entry(id).or_insert_with(|| { + let node_ref = NodeRef::new(self.nodes.len()); + let node = Node { + def: NodeDef::input(id), + priority: id, + state: NodeState::Unknown, + renamed: None, + }; + self.nodes.push(node); + self.unknown_inputs.insert(id); + node_ref + }) + } + + pub fn and(&mut self, mut inputs: [NodeRef; 2]) -> NodeRef { + inputs.sort_unstable(); + if inputs[1] == NodeRef::FALSE { + NodeRef::FALSE + } else if inputs[1] == NodeRef::TRUE || inputs[1] == inputs[0] { + inputs[0] + } else { + let [a, b] = inputs; + match inputs.map(|input| self.nodes[input.index()].def.as_gate()) { + [Ok((Gate::And, [a0, a1])), _] if b == a0 || b == a1 => { + return a; + } + [_, Ok((Gate::And, [b0, b1]))] if a == b0 || a == b1 => { + return b; + } + + [Ok((Gate::Or, [a0, a1])), _] if b == a0 || b == a1 => { + return b; + } + [_, Ok((Gate::Or, [b0, b1]))] if a == b0 || a == b1 => { + return a; + } + + _ => (), + } + + let mut mknode = || { + let node_ref = NodeRef::new(self.nodes.len()); + + let [a, b] = inputs.map(|input| self.nodes[input.index()].priority); + + let node = Node { + def: NodeDef::and(inputs), + priority: a.min(b), + state: NodeState::Unknown, + renamed: None, + }; + self.nodes.push(node); + node_ref + }; + + if self.cache { + *self.find_and.entry(inputs).or_insert_with(mknode) + } else { + mknode() + } + } + } + + pub fn or(&mut self, mut inputs: [NodeRef; 2]) -> NodeRef { + inputs.sort_unstable(); + + if inputs[1] == NodeRef::TRUE { + NodeRef::TRUE + } else if inputs[1] == NodeRef::FALSE || inputs[1] == inputs[0] { + inputs[0] + } else { + let [a, b] = inputs; + match inputs.map(|input| self.nodes[input.index()].def.as_gate()) { + [Ok((Gate::Or, [a0, a1])), _] if b == a0 || b == a1 => { + return a; + } + [_, Ok((Gate::Or, [b0, b1]))] if a == b0 || a == b1 => { + return b; + } + + [Ok((Gate::And, [a0, a1])), _] if b == a0 || b == a1 => { + return b; + } + [_, Ok((Gate::And, [b0, b1]))] if a == b0 || a == b1 => { + return a; + } + + _ => (), + } + + let mut mknode = || { + let node_ref = NodeRef::new(self.nodes.len()); + + let [a, b] = inputs.map(|input| self.nodes[input.index()].priority); + + let node = Node { + def: NodeDef::or(inputs), + priority: a.max(b), + state: NodeState::Unknown, + renamed: None, + }; + self.nodes.push(node); + node_ref + }; + + if self.cache { + *self.find_or.entry(inputs).or_insert_with(mknode) + } else { + mknode() + } + } + } + + pub fn pass(&mut self, target: NodeRef, shuffle: usize, mut enable_cache: bool) -> NodeRef { + if self.cache { + enable_cache = false; + } + + self.nodes[target.index()].state = NodeState::Required; + self.queue.push(target); + + let target_priority = self.nodes[target.index()].priority; + + 'queue: while let Some(current) = self.queue.pop() { + let node = &self.nodes[current.index()]; + let state = node.state; + + self.stack.push(current); + + match node.def.as_gate() { + Ok((Gate::And, inputs)) => { + if enable_cache { + self.find_and.insert(inputs, current); + } + for input in inputs { + let node = &mut self.nodes[input.index()]; + if node.update_state(state) == NodeState::Unknown { + self.queue.push(input); + } + } + } + Ok((Gate::Or, inputs)) => { + if enable_cache { + self.find_or.insert(inputs, current); + } + for input in inputs { + let node = &mut self.nodes[input.index()]; + if node.update_state(NodeState::Nonselected) == NodeState::Unknown { + self.queue.push(input); + } + } + + if state <= NodeState::Nonselected { + continue; + } + + let input_priorities = inputs.map(|input| self.nodes[input.index()].priority); + + for (i, input_priority) in input_priorities.into_iter().enumerate() { + if input_priority < target_priority { + // The other input will be false, so propagate the state + self.nodes[inputs[i ^ 1].index()].update_state(state); + continue; + } + } + + for input in inputs { + let input_state = self.nodes[input.index()].state; + if input_state >= NodeState::Selected { + // One input of the or is already marked, no need to mark the other + continue 'queue; + } + } + + // Mark the highest priority input + let input = inputs[(input_priorities[1] > input_priorities[0]) as usize]; + self.nodes[input.index()].update_state(NodeState::Selected); + } + Err(_input) => (), + } + } + + if enable_cache { + self.cache = true; + } + + let mut stack = take(&mut self.stack); + + self.active_node_count = stack.len(); + + self.unknown_inputs.clear(); + + for current in stack.drain(..).rev() { + let node = &mut self.nodes[current.index()]; + let state = replace(&mut node.state, NodeState::Unknown); + let priority = node.priority; + + match node.def.as_gate() { + Ok((gate, inputs)) => { + let new_inputs = inputs.map(|input| self.nodes[input.index()].renamed.unwrap()); + + let output = if new_inputs == inputs { + current + } else { + match gate { + Gate::And => self.and(new_inputs), + Gate::Or => self.or(new_inputs), + } + }; + + if shuffle > 0 && output != NodeRef::FALSE && output != NodeRef::TRUE { + if let Ok((gate, inputs)) = self.nodes[output.index()].def.as_gate() { + let [a, b] = inputs.map(|input| self.nodes[input.index()].priority); + + self.nodes[output.index()].priority = match gate { + Gate::And => a.min(b), + Gate::Or => a.max(b), + }; + } + } + + self.nodes[current.index()].renamed = Some(output); + } + Err(input) => match priority.cmp(&target_priority) { + std::cmp::Ordering::Less => { + self.nodes[current.index()].renamed = Some(NodeRef::FALSE); + } + std::cmp::Ordering::Equal => { + self.required_inputs.insert(input); + self.nodes[current.index()].renamed = Some(NodeRef::TRUE); + } + std::cmp::Ordering::Greater => match state { + NodeState::Required => { + self.required_inputs.insert(input); + self.nodes[current.index()].renamed = Some(NodeRef::TRUE); + } + NodeState::Selected => { + self.unknown_inputs.insert(input); + self.nodes[current.index()].renamed = Some(current); + + if shuffle > 0 { + let priority = &mut self.nodes[current.index()].priority; + let mask = !(u64::MAX << 32.min(shuffle - 1)) as u32; + + *priority ^= + !(*priority ^ priority.wrapping_mul(0x2c9277b5)) & mask; + } + } + NodeState::Nonselected => { + self.nodes[current.index()].renamed = Some(NodeRef::FALSE); + } + NodeState::Unknown => { + unreachable!(); + } + }, + }, + } + } + + self.input_order.clear(); + + let result = self.nodes[target.index()].renamed.unwrap(); + self.stack = stack; + + result + } +} + +impl AigValue for (Option, NodeRef) { + fn invert_if(self, en: bool, _: &mut AndOrGraph) -> Self { + let (value, care) = self; + (value.map(|b| b ^ en), care) + } + + fn and(self, other: Self, ctx: &mut AndOrGraph) -> Self { + let (value_a, care_a) = self; + let (value_b, care_b) = other; + + match (value_a, value_b) { + (Some(true), Some(true)) => (Some(true), ctx.and([care_a, care_b])), + (Some(false), Some(false)) => (Some(false), ctx.or([care_a, care_b])), + (Some(false), _) => (Some(false), care_a), + (_, Some(false)) => (Some(false), care_b), + _ => (None, NodeRef::FALSE), + } + } + + fn constant(value: bool, _: &mut AndOrGraph) -> Self { + (Some(value), NodeRef::TRUE) + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug)] +pub enum Verification { + Cex, + Full, +} + +pub struct MinimizationOptions { + pub fixed_init: bool, + pub verify: Option, +} + +pub fn minimize( + aig: &OrderedAig, + latch_init: &[Option], + frame_inputs: &[Vec>], + writer: &mut DeferredWriter, + options: &MinimizationOptions, +) -> color_eyre::Result<()> { + let Some(initial_inputs) = frame_inputs.first() else { + bail!("no inputs found"); + }; + + let mut state = vec![]; + + let mut graph = AndOrGraph::default(); + + let input_id = |frame: Option, index: usize| -> u32 { + (if let Some(frame) = frame { + latch_init.len() + frame * initial_inputs.len() + index + } else { + index + }) + .try_into() + .unwrap() + }; + + let decode_input_id = |id: u32| -> (Option, usize) { + let id = id as usize; + if id < latch_init.len() { + (None, id) + } else { + let id = id - latch_init.len(); + let frame = id / initial_inputs.len(); + let index = id % initial_inputs.len(); + (Some(frame), index) + } + }; + + initial_frame( + aig, + &mut state, + |i, ctx| { + ( + latch_init[i], + if latch_init[i].is_some() { + if options.fixed_init { + NodeRef::TRUE + } else { + ctx.input(input_id(None, i)) + } + } else { + NodeRef::FALSE + }, + ) + }, + |i, ctx| { + ( + initial_inputs[i], + if initial_inputs[i].is_some() { + ctx.input(input_id(Some(0), i)) + } else { + NodeRef::FALSE + }, + ) + }, + &mut graph, + ); + + let mut minimization_target = 'minimization_target: { + for (t, inputs) in frame_inputs.iter().enumerate() { + if t > 0 { + successor_frame( + aig, + &mut state, + |i, ctx| { + ( + inputs[i], + if inputs[i].is_some() { + ctx.input(input_id(Some(t), i)) + } else { + NodeRef::FALSE + }, + ) + }, + &mut graph, + ); + } + let mut good_state = (Some(true), NodeRef::TRUE); + + for (i, bad) in aig.bad_state_properties.iter().enumerate() { + let (var, polarity) = unpack_lit(*bad); + let inv_bad = state[var].invert_if(!polarity, &mut graph); + + if inv_bad.0 == Some(false) { + println!("bad state property {i} active in frame {t}"); + } + + good_state = good_state.and(inv_bad, &mut graph); + } + if good_state.0 == Some(false) { + println!("bad state found in frame {t}"); + + break 'minimization_target good_state.1; + } + + if t > 0 && t % 500 == 0 { + println!( + "traced frame {t}/{frames}: node count = {node_count}", + frames = frame_inputs.len(), + node_count = graph.nodes.len(), + ); + } + } + + bail!("no bad state found"); + }; + + let node_count_width = (graph.nodes.len().max(2) - 1).ilog10() as usize + 1; + let input_count_width = (graph.unknown_inputs.len().max(2) - 1).ilog10() as usize + 1; + + println!( + "input: node count = {node_count:w0$}, defined inputs = {defined_inputs:w1$}", + node_count = graph.nodes.len(), + defined_inputs = graph.unknown_inputs.len(), + w0 = node_count_width, + w1 = input_count_width, + ); + + let mut shuffle = 0; + + let mut iteration = 0; + + while minimization_target != NodeRef::TRUE { + let prev_unknown_inputs = graph.unknown_inputs.len(); + minimization_target = graph.pass(minimization_target, shuffle, iteration >= 1); + let unknown_inputs = graph.unknown_inputs.len(); + let required_inputs = graph.required_inputs.len(); + println!( + concat!( + "iter: node count = {node_count:w0$}, defined inputs = {defined_inputs:w1$}, ", + "required inputs = {required_inputs:w1$}, shuffle = {shuffle}" + ), + node_count = graph.active_node_count, + required_inputs = required_inputs, + defined_inputs = unknown_inputs + required_inputs, + shuffle = shuffle, + w0 = node_count_width, + w1 = input_count_width, + ); + + if unknown_inputs + (unknown_inputs / 4) < prev_unknown_inputs { + shuffle = 0; + } else { + shuffle += 1; + } + iteration += 1; + } + + println!("minimization complete"); + + for i in 0..aig.latches.len() { + let bit = if options.fixed_init || graph.required_inputs.contains(&input_id(None, i)) { + latch_init[i] + } else { + None + }; + + write_output_bit(writer, bit); + } + + writer.write_all_defer_err(b"\n"); + + for (t, inputs) in frame_inputs.iter().enumerate() { + for i in 0..aig.input_count { + let bit = if graph.required_inputs.contains(&input_id(Some(t), i)) { + inputs[i] + } else { + None + }; + + write_output_bit(writer, bit); + } + writer.write_all_defer_err(b"\n"); + } + + writer.write_all_defer_err(b"# DONE\n"); + writer.flush_defer_err(); + writer.check_io_error()?; + + let Some(verify) = options.verify else { + return Ok(()); + }; + + let mut check_state: Vec> = vec![]; + + let empty_set = BTreeSet::new(); + + let verify_from = match verify { + Verification::Cex => &empty_set, + Verification::Full => &graph.required_inputs, + }; + + for check in [None] + .into_iter() + .chain(verify_from.iter().copied().map(Some)) + { + check_state.clear(); + + initial_frame( + aig, + &mut check_state, + |i, _| { + let input = input_id(None, i); + if options.fixed_init + || (Some(input) != check && graph.required_inputs.contains(&input)) + { + latch_init[i] + } else { + None + } + }, + |i, _| { + let input = input_id(Some(0), i); + if Some(input) != check && graph.required_inputs.contains(&input) { + initial_inputs[i] + } else { + None + } + }, + &mut (), + ); + + let mut bad_state = false; + + 'frame: for (t, inputs) in frame_inputs.iter().enumerate() { + if t > 0 { + successor_frame( + aig, + &mut check_state, + |i, _| { + let input = input_id(Some(t), i); + if Some(input) != check && graph.required_inputs.contains(&input) { + inputs[i] + } else { + None + } + }, + &mut (), + ); + } + + for bad in aig.bad_state_properties.iter() { + let (var, polarity) = unpack_lit(*bad); + let bad_output = check_state[var].invert_if(polarity, &mut ()); + if bad_output == Some(true) { + bad_state = true; + break 'frame; + } + } + } + + if bad_state != check.is_none() { + if let Some(check) = check { + let (frame, input) = decode_input_id(check); + if let Some(frame) = frame { + bail!("minimality verification wrt. frame {frame} input {input} failed"); + } else { + bail!("minimality verification wrt. initial latch {input} failed"); + } + } else { + bail!("counter example verification failed"); + } + } + + if let Some(check) = check { + let (frame, input) = decode_input_id(check); + if let Some(frame) = frame { + println!("verified minimality wrt. frame {frame} input {input}"); + } else { + println!("verified minimality wrt. initial latch {input}"); + } + } else { + println!("verified counter example"); + } + } + + Ok(()) +} diff --git a/tools/aigcexmin/src/main.rs b/tools/aigcexmin/src/main.rs new file mode 100644 index 00000000..30874db9 --- /dev/null +++ b/tools/aigcexmin/src/main.rs @@ -0,0 +1,145 @@ +#![allow(clippy::needless_range_loop)] + +use std::{fs, mem::replace, path::PathBuf}; + +use clap::{Parser, ValueEnum}; +use color_eyre::eyre::bail; + +use flussab_aiger::binary; + +pub mod aig_eval; +pub mod care_graph; +pub mod util; + +/// AIG counter example minimization +#[derive(clap::Parser)] +#[command(author, version, about, long_about = None, help_template="\ +{before-help}{name} {version} +{author-with-newline}{about-with-newline} +{usage-heading} {usage} + +{all-args}{after-help} +")] +pub struct Options { + /// Input AIGER file + aig: PathBuf, + /// Input AIGER witness file + witness: PathBuf, + /// Output AIGER witness file + output: PathBuf, + + /// Verify the minimized counter example + #[clap(long, default_value = "cex")] + verify: VerificationOption, + + /// Minimize latch initialization values + /// + /// Without this option the latch initialization values of the witness file are assumed to be + /// fixed and will remain as-is in the minimized witness file. + /// + /// Note that some tools (including the current Yosys/SBY flow) do not use AIGER native latch + /// initialization but instead perform initialization using inputs in the first frame. + #[clap(long)] + latches: bool, +} + +#[derive(Copy, Clone, ValueEnum)] +enum VerificationOption { + /// Skip verification + Off, + /// Verify the counter example + Cex, + /// Verify the counter example and that it is minimal (expensive) + Full, +} + +fn main() -> color_eyre::Result<()> { + let options = Options::parse(); + + color_eyre::install()?; + + let file_input = fs::File::open(options.aig)?; + let file_witness = fs::File::open(options.witness)?; + let file_output = fs::File::create(options.output)?; + + let mut writer_output = flussab::DeferredWriter::from_write(file_output); + + let mut read_witness_owned = flussab::DeferredReader::from_read(file_witness); + let read_witness = &mut read_witness_owned; + + let aig_reader = binary::Parser::::from_read(file_input, binary::Config::default())?; + + let aig = aig_reader.parse()?; + + let mut offset = 0; + offset = flussab::text::next_newline(read_witness, offset); + + if offset == 2 { + read_witness.advance(replace(&mut offset, 0)); + offset = flussab::text::next_newline(read_witness, offset); + read_witness.advance(replace(&mut offset, 0)); + + offset = flussab::text::next_newline(read_witness, offset); + } + + if offset != aig.latches.len() + 1 { + bail!( + "unexpected number of initial latch states, found {} expected {}", + offset.saturating_sub(1), + aig.latches.len() + ); + } + + let latch_init = read_witness.buf()[..aig.latches.len()] + .iter() + .copied() + .map(util::parse_input_bit) + .collect::, _>>()?; + + read_witness.advance(replace(&mut offset, 0)); + + let mut frame_inputs = vec![]; + + loop { + offset = flussab::text::next_newline(read_witness, offset); + + if matches!(read_witness.buf().first(), None | Some(b'.') | Some(b'#')) { + read_witness.check_io_error()?; + break; + } + + if offset != aig.input_count + 1 { + bail!( + "unexpected number of input bits, found {} expected {}", + offset.saturating_sub(1), + aig.input_count + ); + } + + frame_inputs.push( + read_witness.buf()[..aig.input_count] + .iter() + .copied() + .map(util::parse_input_bit) + .collect::, _>>()?, + ); + read_witness.advance(replace(&mut offset, 0)); + } + + care_graph::minimize( + &aig, + &latch_init, + &frame_inputs, + &mut writer_output, + &care_graph::MinimizationOptions { + fixed_init: !options.latches, + verify: match options.verify { + VerificationOption::Off => None, + VerificationOption::Cex => Some(care_graph::Verification::Cex), + VerificationOption::Full => Some(care_graph::Verification::Full), + }, + }, + )?; + + Ok(()) +} diff --git a/tools/aigcexmin/src/util.rs b/tools/aigcexmin/src/util.rs new file mode 100644 index 00000000..fb24bc89 --- /dev/null +++ b/tools/aigcexmin/src/util.rs @@ -0,0 +1,25 @@ +use color_eyre::eyre::bail; +use flussab::DeferredWriter; +use flussab_aiger::Lit; + +pub fn unpack_lit(lit: L) -> (usize, bool) { + let lit = lit.code(); + (lit >> 1, lit & 1 != 0) +} + +pub fn parse_input_bit(byte: u8) -> color_eyre::Result> { + Ok(match byte { + b'0' => Some(false), + b'1' => Some(true), + b'x' => None, + _ => bail!("unexpected input bit {byte:?}"), + }) +} + +pub fn write_output_bit(writer: &mut DeferredWriter, bit: Option) { + writer.write_all_defer_err(match bit { + Some(false) => b"0", + Some(true) => b"1", + None => b"x", + }) +} diff --git a/tools/cexenum/cexenum.py b/tools/cexenum/cexenum.py new file mode 100755 index 00000000..3ac88916 --- /dev/null +++ b/tools/cexenum/cexenum.py @@ -0,0 +1,584 @@ +#!/usr/bin/env tabbypy3 +from __future__ import annotations +import asyncio + +import json +import traceback +import argparse +import shutil +import shlex +import os +from pathlib import Path +from typing import Any, Awaitable, Literal + +import yosys_mau.task_loop.job_server as job +from yosys_mau import task_loop as tl + + +libexec = Path(__file__).parent.resolve() / "libexec" + +if libexec.exists(): + os.environb[b"PATH"] = bytes(libexec) + b":" + os.environb[b"PATH"] + + +def arg_parser(): + parser = argparse.ArgumentParser( + prog="cexenum", usage="%(prog)s [options] " + ) + + parser.add_argument( + "work_dir", + metavar="", + help="existing SBY work directory", + type=Path, + ) + + parser.add_argument( + "--depth", + type=int, + metavar="N", + help="BMC depth for the initial assertion failure (default: %(default)s)", + default=100, + ) + + parser.add_argument( + "--enum-depth", + type=int, + metavar="N", + help="number of time steps to run enumeration for, starting with" + " and including the time step of the first assertion failure" + " (default: %(default)s)", + default=10, + ) + + parser.add_argument( + "--no-sim", + dest="sim", + action="store_false", + help="do not run sim to obtain .fst traces for the enumerated counter examples", + ) + + parser.add_argument( + "--smtbmc-options", + metavar='"..."', + type=shlex.split, + help='command line options to pass to smtbmc (default: "%(default)s")', + default="-s yices --unroll", + ) + + parser.add_argument("--debug", action="store_true", help="enable debug logging") + parser.add_argument( + "--debug-events", action="store_true", help="enable debug event logging" + ) + + parser.add_argument( + "-j", + metavar="", + type=int, + dest="jobs", + help="maximum number of processes to run in parallel", + default=None, + ) + + return parser + + +def lines(*args): + return "".join(f"{line}\n" for line in args) + + +@tl.task_context +class App: + raw_args: argparse.Namespace + + debug: bool = False + debug_events: bool = False + + depth: int + enum_depth: int + sim: bool + + smtbmc_options: list[str] + + work_dir: Path + + work_subdir: Path + trace_dir_full: Path + trace_dir_min: Path + cache_dir: Path + + +def main() -> None: + args = arg_parser().parse_args() + + job.global_client(args.jobs) + + # Move command line arguments into the App context + for name in dir(args): + if name in type(App).__mro__[1].__annotations__: + setattr(App, name, getattr(args, name)) + + App.raw_args = args + + try: + tl.run_task_loop(task_loop_main) + except tl.TaskCancelled: + exit(1) + except BaseException as e: + if App.debug or App.debug_events: + traceback.print_exc() + tl.log_exception(e, raise_error=False) # Automatically avoids double logging + exit(1) + + +def setup_logging(): + tl.LogContext.app_name = "CEXENUM" + tl.logging.start_logging() + + if App.debug_events: + tl.logging.start_debug_event_logging() + if App.debug: + tl.LogContext.level = "debug" + + def error_handler(err: BaseException): + if isinstance(err, tl.TaskCancelled): + return + tl.log_exception(err, raise_error=True) + + tl.current_task().set_error_handler(None, error_handler) + + +async def batch(*args): + result = None + for arg in args: + result = await arg + return result + + +async def task_loop_main() -> None: + setup_logging() + + cached = False + + App.cache_dir = App.work_dir / "cexenum_cache" + try: + App.cache_dir.mkdir() + except FileExistsError: + if (App.cache_dir / "done").exists(): + cached = True + else: + shutil.rmtree(App.cache_dir) + App.cache_dir.mkdir() + + App.work_subdir = App.work_dir / "cexenum" + try: + App.work_subdir.mkdir() + except FileExistsError: + shutil.rmtree(App.work_subdir) + App.work_subdir.mkdir() + + App.trace_dir_full = App.work_subdir / "full" + App.trace_dir_full.mkdir() + App.trace_dir_min = App.work_subdir / "min" + App.trace_dir_min.mkdir() + + if cached: + tl.log("Reusing cached AIGER model") + aig_model = tl.Task() + else: + aig_model = AigModel() + + Enumeration(aig_model) + + +class AigModel(tl.process.Process): + def __init__(self): + self[tl.LogContext].scope = "aiger" + (App.cache_dir / "design_aiger.ys").write_text( + lines( + "read_ilang ../model/design_prep.il", + "hierarchy -simcheck", + "flatten", + "setundef -undriven -anyseq", + "setattr -set keep 1 w:\*", + "delete -output", + "opt -full", + "techmap", + "opt -fast", + "memory_map -formal", + "formalff -clk2ff -ff2anyinit", + "simplemap", + "dffunmap", + "abc -g AND -fast", + "opt_clean", + "stat", + "write_rtlil design_aiger.il", + "write_aiger -I -B -zinit" + " -map design_aiger.aim -ywmap design_aiger.ywa design_aiger.aig", + ) + ) + super().__init__( + ["yosys", "-ql", "design_aiger.log", "design_aiger.ys"], cwd=App.cache_dir + ) + self.name = "aiger" + self.log_output() + + async def on_run(self) -> None: + await super().on_run() + (App.cache_dir / "done").write_text("") + + +class MinimizeTrace(tl.Task): + def __init__(self, trace_name: str, aig_model: tl.Task): + super().__init__() + self.trace_name = trace_name + + full_yw = App.trace_dir_full / self.trace_name + min_yw = App.trace_dir_min / self.trace_name + + stem = full_yw.stem + + full_aiw = full_yw.with_suffix(".aiw") + min_aiw = min_yw.with_suffix(".aiw") + + yw2aiw = YosysWitness( + "yw2aiw", + full_yw, + App.cache_dir / "design_aiger.ywa", + full_aiw, + cwd=App.trace_dir_full, + ) + yw2aiw.depends_on(aig_model) + yw2aiw[tl.LogContext].scope = f"yw2aiw[{stem}]" + + aigcexmin = AigCexMin( + App.cache_dir / "design_aiger.aig", + full_aiw, + min_aiw, + cwd=App.trace_dir_min, + ) + aigcexmin.depends_on(yw2aiw) + aigcexmin[tl.LogContext].scope = f"aigcexmin[{stem}]" + + self.aiw2yw = aiw2yw = YosysWitness( + "aiw2yw", + min_aiw, + App.cache_dir / "design_aiger.ywa", + min_yw, + cwd=App.trace_dir_min, + ) + aiw2yw[tl.LogContext].scope = f"aiw2yw[{stem}]" + aiw2yw.depends_on(aigcexmin) + + if App.sim: + sim = SimTrace( + App.cache_dir / "design_aiger.il", + min_yw, + min_yw.with_suffix(".fst"), + cwd=App.trace_dir_min, + ) + + sim[tl.LogContext].scope = f"sim[{stem}]" + sim.depends_on(aiw2yw) + + +def relative_to(target: Path, cwd: Path) -> Path: + prefix = Path("") + target = target.resolve() + cwd = cwd.resolve() + while True: + try: + return prefix / (target.relative_to(cwd)) + except ValueError: + prefix = prefix / ".." + if cwd == cwd.parent: + return target + cwd = cwd.parent + + +class YosysWitness(tl.process.Process): + def __init__( + self, + mode: Literal["yw2aiw"] | Literal["aiw2yw"], + input: Path, + mapfile: Path, + output: Path, + cwd: Path, + ): + super().__init__( + [ + "yosys-witness", + mode, + str(relative_to(input, cwd)), + str(relative_to(mapfile, cwd)), + str(relative_to(output, cwd)), + ], + cwd=cwd, + ) + + def handler(event: tl.process.OutputEvent): + tl.log_debug(event.output.rstrip("\n")) + + self.sync_handle_events(tl.process.OutputEvent, handler) + + +class AigCexMin(tl.process.Process): + def __init__(self, design_aig: Path, input_aiw: Path, output_aiw: Path, cwd: Path): + super().__init__( + [ + "aigcexmin", + str(relative_to(design_aig, cwd)), + str(relative_to(input_aiw, cwd)), + str(relative_to(output_aiw, cwd)), + ], + cwd=cwd, + ) + + self.log_path = output_aiw.with_suffix(".log") + self.log_file = None + + def handler(event: tl.process.OutputEvent): + if self.log_file is None: + self.log_file = self.log_path.open("w") + self.log_file.write(event.output) + self.log_file.flush() + tl.log_debug(event.output.rstrip("\n")) + + self.sync_handle_events(tl.process.OutputEvent, handler) + + def on_cleanup(self): + if self.log_file is not None: + self.log_file.close() + super().on_cleanup() + + +class SimTrace(tl.process.Process): + def __init__(self, design_il: Path, input_yw: Path, output_fst: Path, cwd: Path): + self[tl.LogContext].scope = "sim" + + script_file = output_fst.with_suffix(".fst.ys") + log_file = output_fst.with_suffix(".fst.log") + + script_file.write_text( + lines( + f"read_rtlil {relative_to(design_il, cwd)}", + "logger -nowarn" + ' "Yosys witness trace has an unexpected value for the clock input"', + f"sim -zinit -r {relative_to(input_yw, cwd)} -hdlname" + f" -fst {relative_to(output_fst, cwd)}", + ) + ) + super().__init__( + [ + "yosys", + "-ql", + str(relative_to(log_file, cwd)), + str(relative_to(script_file, cwd)), + ], + cwd=cwd, + ) + self.name = "sim" + self.log_output() + + +class Enumeration(tl.Task): + def __init__(self, aig_model: tl.Task): + self.aig_model = aig_model + super().__init__() + + async def on_run(self) -> None: + smtbmc = Smtbmc(App.work_dir / "model" / "design_smt2.smt2") + + await smtbmc.ping() + + pred = None + + i = 0 + limit = App.depth + first_failure = None + + while i <= limit: + tl.log(f"Checking assumptions in step {i}..") + presat_checked = await batch( + smtbmc.bmc_step(i, initial=i == 0, assertions=None, pred=pred), + smtbmc.check(), + ) + if presat_checked != "sat": + if first_failure is None: + tl.log_error("Assumptions are not satisfiable") + else: + tl.log("No further counter-examples are reachable") + return + + tl.log(f"Checking assertions in step {i}..") + checked = await batch( + smtbmc.push(), + smtbmc.assertions(i, False), + smtbmc.check(), + ) + pred = i + if checked != "unsat": + if first_failure is None: + first_failure = i + limit = i + App.enum_depth + tl.log("BMC failed! Enumerating counter-examples..") + counter = 0 + + assert checked == "sat" + path = App.trace_dir_full / f"trace{i}_{counter}.yw" + + while checked == "sat": + await smtbmc.incremental_command( + cmd="write_yw_trace", path=str(path) + ) + tl.log(f"Written counter-example to {path.name}") + + minimize = MinimizeTrace(path.name, self.aig_model) + minimize.depends_on(self.aig_model) + + await minimize.aiw2yw.finished + + min_path = App.trace_dir_min / f"trace{i}_{counter}.yw" + + checked = await batch( + smtbmc.incremental_command( + cmd="read_yw_trace", + name="last", + path=str(min_path), + skip_x=True, + ), + smtbmc.assert_( + ["not", ["and", *(["yw", "last", k] for k in range(i + 1))]] + ), + smtbmc.check(), + ) + + counter += 1 + path = App.trace_dir_full / f"trace{i}_{counter}.yw" + + await batch(smtbmc.pop(), smtbmc.assertions(i)) + + i += 1 + + smtbmc.close_stdin() + + +class Smtbmc(tl.process.Process): + def __init__(self, smt2_model: Path): + self[tl.LogContext].scope = "smtbmc" + super().__init__( + [ + "yosys-smtbmc", + "--incremental", + *App.smtbmc_options, + str(smt2_model), + ], + interact=True, + ) + self.name = "smtbmc" + + self.expected_results = [] + + async def on_run(self) -> None: + def output_handler(event: tl.process.StderrEvent): + result = json.loads(event.output) + tl.log_debug(f"smtbmc > {result!r}") + if "err" in result: + exception = tl.logging.LoggedError( + tl.log_error(result["err"], raise_error=False) + ) + self.expected_results.pop(0).set_exception(exception) + if "msg" in result: + tl.log(result["msg"]) + if "ok" in result: + assert self.expected_results + self.expected_results.pop(0).set_result(result["ok"]) + + self.sync_handle_events(tl.process.StdoutEvent, output_handler) + + return await super().on_run() + + def ping(self) -> Awaitable[None]: + return self.incremental_command(cmd="ping") + + def incremental_command(self, **command: dict[Any]) -> Awaitable[Any]: + tl.log_debug(f"smtbmc < {command!r}") + self.write(json.dumps(command)) + self.write("\n") + result = asyncio.Future() + self.expected_results.append(result) + + return result + + def new_step(self, step: int) -> Awaitable[None]: + return self.incremental_command(cmd="new_step", step=step) + + def push(self) -> Awaitable[None]: + return self.incremental_command(cmd="push") + + def pop(self) -> Awaitable[None]: + return self.incremental_command(cmd="pop") + + def check(self) -> Awaitable[str]: + return self.incremental_command(cmd="check") + + def assert_antecedent(self, expr: Any) -> Awaitable[None]: + return self.incremental_command(cmd="assert_antecedent", expr=expr) + + def assert_consequent(self, expr: Any) -> Awaitable[None]: + return self.incremental_command(cmd="assert_consequent", expr=expr) + + def assert_(self, expr: Any) -> Awaitable[None]: + return self.incremental_command(cmd="assert", expr=expr) + + def hierarchy(self, step: int) -> Awaitable[None]: + return self.assert_antecedent(["mod_h", ["step", step]]) + + def assumptions(self, step: int, valid: bool = True) -> Awaitable[None]: + expr = ["mod_u", ["step", step]] + if not valid: + expr = ["not", expr] + return self.assert_consequent(expr) + + def assertions(self, step: int, valid: bool = True) -> Awaitable[None]: + expr = ["mod_a", ["step", step]] + if not valid: + expr = ["not", expr] + return self.assert_(expr) + + def initial(self, step: int, initial: bool) -> Awaitable[None]: + if initial: + return batch( + self.assert_antecedent(["mod_i", ["step", step]]), + self.assert_antecedent(["mod_is", ["step", step]]), + ) + else: + return self.assert_antecedent(["not", ["mod_is", ["step", step]]]) + + def transition(self, pred: int, succ: int) -> Awaitable[None]: + return self.assert_antecedent(["mod_t", ["step", pred], ["step", succ]]) + + def bmc_step( + self, + step: int, + initial: bool = False, + assertions: bool | None = True, + pred: int | None = None, + ) -> Awaitable[None]: + futures = [] + futures.append(self.new_step(step)) + futures.append(self.hierarchy(step)) + futures.append(self.assumptions(step)) + futures.append(self.initial(step, initial)) + + if pred is not None: + futures.append(self.transition(pred, step)) + + if assertions is not None: + futures.append(self.assertions(assertions)) + + return batch(*futures) + + +if __name__ == "__main__": + main() diff --git a/tools/cexenum/examples/.gitignore b/tools/cexenum/examples/.gitignore new file mode 100644 index 00000000..e69de29b diff --git a/tools/cexenum/examples/factor.sby b/tools/cexenum/examples/factor.sby new file mode 100644 index 00000000..5fe21fcd --- /dev/null +++ b/tools/cexenum/examples/factor.sby @@ -0,0 +1,49 @@ +# Run using: +# +# sby -f factor.sby +# tabbypy3 cexenum.py factor --enum-depth=0 +# +[options] +mode bmc +make_model prep,smt2 +expect unknown + +[engines] +none + +[script] +read_verilog -sv top.sv +prep -top top + +[file top.sv] +module top(input clk, input b_bit, output [15:0] acc); + reg [7:0] a; + reg [7:0] b_mask = 8'hff; + + + reg [15:0] a_shift = 0; + reg [15:0] acc = 0; + + + always @(posedge clk) begin + assume (!clk); + if ($initstate) begin + a_shift <= a; + acc <= 0; + end else begin + + if (b_bit) begin + acc <= acc + a_shift; + end + a_shift <= a_shift << 1; + b_mask <= b_mask >> 1; + end + + if (b_mask == 0) begin + a <= 0; + assert (acc != 100); + end; + + end + +endmodule From 6e97cea07f8e5fee88a18aad9a9952e596e12929 Mon Sep 17 00:00:00 2001 From: Dave Keeshan Date: Sat, 2 Dec 2023 00:05:05 +0000 Subject: [PATCH 208/220] Fix PREFIX in makefile to accept environment variable, if set --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 7a86f6be..28f05657 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ DESTDIR = -PREFIX = /usr/local +PREFIX ?= /usr/local PROGRAM_PREFIX = # On Windows, manually setting absolute path to Python binary may be required From 6f0f2645c21b3c3ce36ea08dcbc1b5df3248bd52 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Fri, 19 Jan 2024 14:51:16 +0100 Subject: [PATCH 209/220] tests: Support testing an installed SBY using the SBY_CMD make variable --- tests/Makefile | 5 +++++ tests/make/run_sby.py | 4 ++++ 2 files changed, 9 insertions(+) create mode 100644 tests/make/run_sby.py diff --git a/tests/Makefile b/tests/Makefile index 805d1909..6a586b6e 100644 --- a/tests/Makefile +++ b/tests/Makefile @@ -18,7 +18,12 @@ help: export SBY_WORKDIR_GITIGNORE := 1 +ifeq ($(SBY_CMD),) SBY_MAIN := $(realpath $(dir $(firstword $(MAKEFILE_LIST)))/../sbysrc/sby.py) +else +SBY_MAIN := $(realpath $(dir $(firstword $(MAKEFILE_LIST)))/make/run_sby.py) +endif + ifeq (nt-unix-like,$(OS_NAME)) SBY_MAIN := $(shell cygpath -w $(SBY_MAIN)) endif diff --git a/tests/make/run_sby.py b/tests/make/run_sby.py new file mode 100644 index 00000000..9fb7de46 --- /dev/null +++ b/tests/make/run_sby.py @@ -0,0 +1,4 @@ +import os +import sys +prog = os.environ.get("SBY_CMD", "sby") +os.execvp(prog, [prog, *sys.argv[1:]]) From 1eeb6f3f0bbea9af1ae862f0419149d0bc8e483c Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 22 Jan 2024 18:10:00 +0100 Subject: [PATCH 210/220] Delete `$print` cells in the backend flows They are only useful and supported for the simulation that is run with the output of the prep flow, not the output of the backend flows. --- sbysrc/sby_core.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 8d2fc764..d59ffa13 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1055,6 +1055,7 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print(f"""read_ilang design_prep.il""", file=f) print("hierarchy -smtcheck", file=f) + print("delete */t:$print", file=f) print("formalff -assume", file=f) if "_nomem" in model_name: print("memory_map -formal", file=f) @@ -1088,6 +1089,7 @@ def instance_hierarchy_error_callback(retcode): print(f"# running in {self.workdir}/model/", file=f) print(f"""read_ilang design_prep.il""", file=f) print("hierarchy -simcheck", file=f) + print("delete */t:$print", file=f) print("formalff -assume", file=f) if "_nomem" in model_name: print("memory_map -formal", file=f) @@ -1122,6 +1124,7 @@ def instance_hierarchy_error_callback(retcode): with open(f"{self.workdir}/model/design_aiger.ys", "w") as f: print(f"# running in {self.workdir}/model/", file=f) print("read_ilang design_prep.il", file=f) + print("delete */t:$print", file=f) print("hierarchy -simcheck", file=f) print("formalff -assume", file=f) print("flatten", file=f) From 881082c9908aa81101fea52c7c8473607cb1b119 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 22 Jan 2024 18:11:16 +0100 Subject: [PATCH 211/220] sby_design: Discover properties represented using `$check` cells --- sbysrc/sby_design.py | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index 88b63954..a449a9b6 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -71,6 +71,18 @@ def from_cell(c, name): return c.LIVE raise ValueError("Unknown property type: " + name) + @classmethod + def from_flavor(c, name): + if name == "assume": + return c.ASSUME + if name == "assert": + return c.ASSERT + if name == "cover": + return c.COVER + if name == "live": + return c.LIVE + raise ValueError("Unknown property type: " + name) + name: str path: Tuple[str, ...] type: Type @@ -199,6 +211,20 @@ def make_mod_hier(instance_name, module_name, hierarchy="", path=()): location=location, hierarchy=sub_hierarchy) mod.properties.append(p) + if sort["type"] == "$check": + for cell in sort["cells"]: + try: + location = cell["attributes"]["src"] + except KeyError: + location = "" + p = SbyProperty( + name=cell["name"], + path=(*path, *cell_path(cell)), + type=SbyProperty.Type.from_flavor(cell["parameters"]["FLAVOR"]), + location=location, + hierarchy=sub_hierarchy) + mod.properties.append(p) + if sort["type"][0] != '$' or sort["type"].startswith("$paramod"): for cell in sort["cells"]: mod.submodules[cell["name"]] = make_mod_hier( From 40bf8fcb87f8d3c89f88fad728c4ae03921bc329 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 24 Jan 2024 16:08:31 +0100 Subject: [PATCH 212/220] sby_design: Also track fairness assumptions --- sbysrc/sby_design.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index a449a9b6..df2977fd 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -55,6 +55,7 @@ class Type(Enum): ASSERT = auto() COVER = auto() LIVE = auto() + FAIR = auto() def __str__(self): return self.name @@ -69,6 +70,8 @@ def from_cell(c, name): return c.COVER if name == "$live": return c.LIVE + if name == "$fair": + return c.FAIR raise ValueError("Unknown property type: " + name) @classmethod @@ -81,6 +84,8 @@ def from_flavor(c, name): return c.COVER if name == "live": return c.LIVE + if name == "fair": + return c.FAIR raise ValueError("Unknown property type: " + name) name: str @@ -198,7 +203,7 @@ def make_mod_hier(instance_name, module_name, hierarchy="", path=()): raise ValueError(f"Cannot find module {module_name}") for sort in cell_sorts: - if sort["type"] in ["$assume", "$assert", "$cover", "$live"]: + if sort["type"] in ["$assume", "$assert", "$cover", "$live", "$fair"]: for cell in sort["cells"]: try: location = cell["attributes"]["src"] From 44ccad38823fc99424bf54d74069e4c12cd87da8 Mon Sep 17 00:00:00 2001 From: Miodrag Milanovic Date: Mon, 29 Jan 2024 08:48:29 +0100 Subject: [PATCH 213/220] Update workflows --- .github/workflows/ci.yml | 4 ++-- .github/workflows/codeql.yml | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 133b103c..4025950e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -6,8 +6,8 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - uses: YosysHQ/setup-oss-cad-suite@v2 + - uses: actions/checkout@v4 + - uses: YosysHQ/setup-oss-cad-suite@v3 with: github-token: ${{ secrets.GITHUB_TOKEN }} - name: Run checks diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 76d61afc..888cbb35 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -16,13 +16,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@v3 + uses: actions/checkout@v4 - name: Initialize CodeQL - uses: github/codeql-action/init@v2 + uses: github/codeql-action/init@v3 with: languages: python queries: security-extended,security-and-quality - name: Perform CodeQL Analysis - uses: github/codeql-action/analyze@v2 + uses: github/codeql-action/analyze@v3 From 52184e5bf0e02b879d39e5f891bd6c45810abefb Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 19 Feb 2024 21:06:26 +0100 Subject: [PATCH 214/220] Initial support for a multi-task property status database This adds initial support for an sqlite database that is shared across multiple tasks of a single SBY file and that can track the status of individual properties. The amount of information tracked in the database is currently quite minimal and depends on the engine and options used. This can be incrementally extended in the future. The ways in which the information in the database can be queries is even more limited for this initial version, consisting of a single '--status' option which lists all properties and their status. --- sbysrc/sby.py | 45 +++++ sbysrc/sby_autotune.py | 1 + sbysrc/sby_cmdline.py | 5 + sbysrc/sby_core.py | 54 +++++- sbysrc/sby_design.py | 7 + sbysrc/sby_engine_smtbmc.py | 5 + sbysrc/sby_status.py | 344 ++++++++++++++++++++++++++++++++++++ tests/make/collect_tests.py | 6 +- 8 files changed, 462 insertions(+), 5 deletions(-) create mode 100644 sbysrc/sby_status.py diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 986fb9d0..99dcc6c9 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -22,6 +22,7 @@ from sby_cmdline import parser_func from sby_core import SbyConfig, SbyTask, SbyAbort, SbyTaskloop, process_filename, dress_message from sby_jobserver import SbyJobClient, process_jobserver_environment +from sby_status import SbyStatusDb import time, platform, click process_jobserver_environment() # needs to be called early @@ -55,6 +56,39 @@ sequential = args.sequential jobcount = args.jobcount init_config_file = args.init_config_file +status_show = args.status +status_reset = args.status_reset + +if status_show or status_reset: + target = workdir_prefix or workdir or sbyfile + if not os.path.isdir(target) and target.endswith('.sby'): + target = target[:-4] + if not os.path.isdir(target): + print(f"ERROR: No directory found at {target!r}.", file=sys.stderr) + sys.exit(1) + + try: + with open(f"{target}/status.path", "r") as status_path_file: + status_path = f"{target}/{status_path_file.read().rstrip()}" + except FileNotFoundError: + status_path = f"{target}/status.sqlite" + + if not os.path.exists(status_path): + print(f"ERROR: No status database found at {status_path!r}.", file=sys.stderr) + sys.exit(1) + + status_db = SbyStatusDb(status_path, task=None) + + if status_show: + status_db.print_status_summary() + sys.exit(0) + + if status_reset: + status_db.reset() + + status_db.db.close() + sys.exit(0) + if sbyfile is not None: if os.path.isdir(sbyfile): @@ -356,6 +390,7 @@ def start_task(taskloop, taskname): my_opt_tmpdir = opt_tmpdir my_workdir = None + my_status_db = None if workdir is not None: my_workdir = workdir @@ -364,10 +399,12 @@ def start_task(taskloop, taskname): my_workdir = workdir_prefix else: my_workdir = workdir_prefix + "_" + taskname + my_status_db = f"../{os.path.basename(workdir_prefix)}/status.sqlite" if my_workdir is None and sbyfile is not None and not my_opt_tmpdir: my_workdir = sbyfile[:-4] if taskname is not None: + my_status_db = f"../{os.path.basename(my_workdir)}/status.sqlite" my_workdir += "_" + taskname if my_workdir is not None: @@ -399,6 +436,14 @@ def start_task(taskloop, taskname): with open(f"{my_workdir}/.gitignore", "w") as gitignore: print("*", file=gitignore) + if my_status_db is not None: + os.makedirs(f"{my_workdir}/{os.path.dirname(my_status_db)}", exist_ok=True) + if os.getenv("SBY_WORKDIR_GITIGNORE"): + with open(f"{my_workdir}/{os.path.dirname(my_status_db)}/.gitignore", "w") as gitignore: + print("*", file=gitignore) + with open(f"{my_workdir}/status.path", "w") as status_path: + print(my_status_db, file=status_path) + junit_ts_name = os.path.basename(sbyfile[:-4]) if sbyfile is not None else workdir if workdir is not None else "stdin" junit_tc_name = taskname if taskname is not None else "default" diff --git a/sbysrc/sby_autotune.py b/sbysrc/sby_autotune.py index 1d6f1020..b861890a 100644 --- a/sbysrc/sby_autotune.py +++ b/sbysrc/sby_autotune.py @@ -378,6 +378,7 @@ def log(self, message): def run(self): self.task.handle_non_engine_options() + self.task.setup_status_db(':memory:') self.config = self.task.autotune_config or SbyAutotuneConfig() if "expect" not in self.task.options: diff --git a/sbysrc/sby_cmdline.py b/sbysrc/sby_cmdline.py index a75c2734..bc45b4a5 100644 --- a/sbysrc/sby_cmdline.py +++ b/sbysrc/sby_cmdline.py @@ -69,6 +69,11 @@ def parser_func(): parser.add_argument("--setup", action="store_true", dest="setupmode", help="set up the working directory and exit") + parser.add_argument("--status", action="store_true", dest="status", + help="summarize the contents of the status database") + parser.add_argument("--statusreset", action="store_true", dest="status_reset", + help="reset the contents of the status database") + parser.add_argument("--init-config-file", dest="init_config_file", help="create a default .sby config file") parser.add_argument("sbyfile", metavar=".sby | ", nargs="?", diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index d59ffa13..ecbd081a 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -27,6 +27,7 @@ from select import select from time import monotonic, localtime, sleep, strftime from sby_design import SbyProperty, SbyModule, design_hierarchy +from sby_status import SbyStatusDb all_procs_running = [] @@ -674,20 +675,41 @@ def engine_summary(self, engine_idx): self.engine_summaries[engine_idx] = SbyEngineSummary(engine_idx) return self.engine_summaries[engine_idx] - def add_event(self, *args, **kwargs): + def add_event(self, *args, update_status=True, **kwargs): event = SbySummaryEvent(*args, **kwargs) + + engine = self.engine_summary(event.engine_idx) + + if update_status: + status_metadata = dict(source="summary_event", engine=engine.engine) + if event.prop: if event.type == "$assert": event.prop.status = "FAIL" if event.path: event.prop.tracefiles.append(event.path) + if update_status: + self.task.status_db.add_task_property_data( + event.prop, + "trace", + data=dict(path=event.path, step=event.step, **status_metadata), + ) if event.prop: if event.type == "$cover": event.prop.status = "PASS" if event.path: event.prop.tracefiles.append(event.path) - - engine = self.engine_summary(event.engine_idx) + if update_status: + self.task.status_db.add_task_property_data( + event.prop, + "trace", + data=dict(path=event.path, step=event.step, **status_metadata), + ) + if event.prop and update_status: + self.task.status_db.set_task_property_status( + event.prop, + data=status_metadata + ) if event.trace not in engine.traces: engine.traces[event.trace] = SbyTraceSummary(event.trace, path=event.path, engine_case=event.engine_case) @@ -1041,6 +1063,10 @@ def instance_hierarchy_callback(retcode): if self.design == None: with open(f"{self.workdir}/model/design.json") as f: self.design = design_hierarchy(f) + self.status_db.create_task_properties([ + prop for prop in self.design.properties_by_path.values() + if not prop.type.assume_like + ]) def instance_hierarchy_error_callback(retcode): self.precise_prop_status = False @@ -1186,8 +1212,13 @@ def proc_failed(self, proc): self.status = "ERROR" self.terminate() + def pass_unknown_asserts(self, data): + for prop in self.design.pass_unknown_asserts(): + self.status_db.set_task_property_status(prop, data=data) + def update_status(self, new_status): assert new_status in ["PASS", "FAIL", "UNKNOWN", "ERROR"] + self.status_db.set_task_status(new_status) if new_status == "UNKNOWN": return @@ -1199,7 +1230,7 @@ def update_status(self, new_status): assert self.status != "FAIL" self.status = "PASS" if self.opt_mode in ("bmc", "prove") and self.design: - self.design.pass_unknown_asserts() + self.pass_unknown_asserts(dict(source="task_status")) elif new_status == "FAIL": assert self.status != "PASS" @@ -1258,6 +1289,19 @@ def handle_non_engine_options(self): self.handle_bool_option("assume_early", True) + def setup_status_db(self, status_path=None): + if hasattr(self, 'status_db'): + return + + if status_path is None: + try: + with open(f"{self.workdir}/status.path", "r") as status_path_file: + status_path = f"{self.workdir}/{status_path_file.read().rstrip()}" + except FileNotFoundError: + status_path = f"{self.workdir}/status.sqlite" + + self.status_db = SbyStatusDb(status_path, self) + def setup_procs(self, setupmode): self.handle_non_engine_options() if self.opt_smtc is not None: @@ -1285,6 +1329,8 @@ def setup_procs(self, setupmode): self.retcode = 0 return + self.setup_status_db() + if self.opt_make_model is not None: for name in self.opt_make_model.split(","): self.model(name.strip()) diff --git a/sbysrc/sby_design.py b/sbysrc/sby_design.py index df2977fd..d93d17da 100644 --- a/sbysrc/sby_design.py +++ b/sbysrc/sby_design.py @@ -88,6 +88,10 @@ def from_flavor(c, name): return c.FAIR raise ValueError("Unknown property type: " + name) + @property + def assume_like(self): + return self in [self.ASSUME, self.FAIR] + name: str path: Tuple[str, ...] type: Type @@ -171,9 +175,12 @@ class SbyDesign: properties_by_path: dict = field(default_factory=dict) def pass_unknown_asserts(self): + updated = [] for prop in self.hierarchy: if prop.type == prop.Type.ASSERT and prop.status == "UNKNOWN": prop.status = "PASS" + updated.append(prop) + return updated def cell_path(cell): diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 7e155899..3b431130 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -233,6 +233,7 @@ def output_callback(line): cell_name = match[3] or match[2] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" + task.status_db.set_task_property_status(prop, data=dict(source="smtbmc", engine=f"engine_{engine_idx}")) last_prop.append(prop) return line @@ -241,6 +242,7 @@ def output_callback(line): cell_name = match[2] or match[1] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "PASS" + task.status_db.set_task_property_status(prop, data=dict(source="smtbmc", engine=f"engine_{engine_idx}")) last_prop.append(prop) return line @@ -271,6 +273,7 @@ def output_callback(line): cell_name = match[2] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" + task.status_db.set_task_property_status(prop, data=dict(source="smtbmc", engine=f"engine_{engine_idx}")) return line @@ -288,6 +291,8 @@ def exit_callback(retcode): def last_exit_callback(): if mode == "bmc" or mode == "cover": task.update_status(proc_status) + if proc_status == "FAIL" and mode == "bmc" and keep_going: + task.pass_unknown_asserts(dict(source="smtbmc", keep_going=True, engine=f"engine_{engine_idx}")) proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status task.summary.set_engine_status(engine_idx, proc_status_lower) diff --git a/sbysrc/sby_status.py b/sbysrc/sby_status.py new file mode 100644 index 00000000..e4722c3c --- /dev/null +++ b/sbysrc/sby_status.py @@ -0,0 +1,344 @@ +from __future__ import annotations + +import sqlite3 +import os +import time +import json +from collections import defaultdict +from functools import wraps +from pathlib import Path +from typing import Any, Callable, TypeVar, Optional, Iterable +from sby_design import SbyProperty, pretty_path + + +Fn = TypeVar("Fn", bound=Callable[..., Any]) + + +def transaction(method: Fn) -> Fn: + @wraps(method) + def wrapper(self: SbyStatusDb, *args: Any, **kwargs: Any) -> Any: + if self._transaction_active: + return method(self, *args, **kwargs) + + try: + self.log_debug(f"begin {method.__name__!r} transaction") + self.db.execute("begin") + self._transaction_active = True + result = method(self, *args, **kwargs) + self.db.execute("commit") + self._transaction_active = False + self.log_debug(f"comitted {method.__name__!r} transaction") + return result + except sqlite3.OperationalError as err: + self.log_debug(f"failed {method.__name__!r} transaction {err}") + self.db.rollback() + self._transaction_active = False + except Exception as err: + self.log_debug(f"failed {method.__name__!r} transaction {err}") + self.db.rollback() + self._transaction_active = False + raise + try: + self.log_debug( + f"retrying {method.__name__!r} transaction once in immediate mode" + ) + self.db.execute("begin immediate") + self._transaction_active = True + result = method(self, *args, **kwargs) + self.db.execute("commit") + self._transaction_active = False + self.log_debug(f"comitted {method.__name__!r} transaction") + return result + except Exception as err: + self.log_debug(f"failed {method.__name__!r} transaction {err}") + self.db.rollback() + self._transaction_active = False + raise + + return wrapper # type: ignore + + +class SbyStatusDb: + def __init__(self, path: Path, task, timeout: float = 5.0): + self.debug = False + self.task = task + self._transaction_active = False + + setup = not os.path.exists(path) + + self.db = sqlite3.connect(path, isolation_level=None, timeout=timeout) + self.db.row_factory = sqlite3.Row + self.db.execute("PRAGMA journal_mode=WAL") + self.db.execute("PRAGMA synchronous=0") + + if setup: + self._setup() + + if task is not None: + self.task_id = self.create_task(workdir=task.workdir, mode=task.opt_mode) + + def log_debug(self, *args): + if self.debug: + if self.task: + self.task.log(" ".join(str(arg) for arg in args)) + else: + print(*args) + + @transaction + def _setup(self): + script = """ + CREATE TABLE task ( + id INTEGER PRIMARY KEY, + workdir TEXT, + mode TEXT, + created REAL + ); + CREATE TABLE task_status ( + id INTEGER PRIMARY KEY, + task INTEGER, + status TEXT, + data TEXT, + created REAL, + FOREIGN KEY(task) REFERENCES task(id) + ); + CREATE TABLE task_property ( + id INTEGER PRIMARY KEY, + task INTEGER, + src TEXT, + name TEXT, + created REAL, + FOREIGN KEY(task) REFERENCES task(id) + ); + CREATE TABLE task_property_status ( + id INTEGER PRIMARY KEY, + task_property INTEGER, + status TEXT, + data TEXT, + created REAL, + FOREIGN KEY(task_property) REFERENCES task_property(id) + ); + CREATE TABLE task_property_data ( + id INTEGER PRIMARY KEY, + task_property INTEGER, + kind TEXT, + data TEXT, + created REAL, + FOREIGN KEY(task_property) REFERENCES task_property(id) + ); + """ + for statement in script.split(";\n"): + statement = statement.strip() + if statement: + self.db.execute(statement) + + @transaction + def create_task(self, workdir: str, mode: str) -> int: + return self.db.execute( + """ + INSERT INTO task (workdir, mode, created) + VALUES (:workdir, :mode, :now) + """, + dict(workdir=workdir, mode=mode, now=time.time()), + ).lastrowid + + @transaction + def create_task_properties( + self, properties: Iterable[SbyProperty], *, task_id: Optional[int] = None + ): + if task_id is None: + task_id = self.task_id + now = time.time() + self.db.executemany( + """ + INSERT INTO task_property (name, src, task, created) + VALUES (:name, :src, :task, :now) + """, + [ + dict( + name=json.dumps(prop.path), + src=prop.location or "", + task=task_id, + now=now, + ) + for prop in properties + ], + ) + + @transaction + def set_task_status( + self, + status: Optional[str] = None, + data: Any = None, + ): + if status is None: + status = property.status + + now = time.time() + self.db.execute( + """ + INSERT INTO task_status ( + task, status, data, created + ) + VALUES ( + :task, :status, :data, :now + ) + """, + dict( + task=self.task_id, + status=status, + data=json.dumps(data), + now=now, + ), + ) + + @transaction + def set_task_property_status( + self, + property: SbyProperty, + status: Optional[str] = None, + data: Any = None, + ): + if status is None: + status = property.status + + now = time.time() + self.db.execute( + """ + INSERT INTO task_property_status ( + task_property, status, data, created + ) + VALUES ( + (SELECT id FROM task_property WHERE task = :task AND name = :name), + :status, :data, :now + ) + """, + dict( + task=self.task_id, + name=json.dumps(property.path), + status=status, + data=json.dumps(data), + now=now, + ), + ) + + @transaction + def add_task_property_data(self, property: SbyProperty, kind: str, data: Any): + now = time.time() + self.db.execute( + """ + INSERT INTO task_property_data ( + task_property, kind, data, created + ) + VALUES ( + (SELECT id FROM task_property WHERE task = :task AND name = :name), + :kind, :data, :now + ) + """, + dict( + task=self.task_id, + name=json.dumps(property.path), + kind=kind, + data=json.dumps(data), + now=now, + ), + ) + + @transaction + def all_tasks(self): + rows = self.db.execute( + """ + SELECT id, workdir, created FROM task + """ + ).fetchall() + + return {row["id"]: dict(row) for row in rows} + + @transaction + def all_task_properties(self): + rows = self.db.execute( + """ + SELECT id, task, src, name, created FROM task_property + """ + ).fetchall() + + def get_result(row): + row = dict(row) + row["name"] = tuple(json.loads(row.get("name", "[]"))) + row["data"] = json.loads(row.get("data", "null")) + return row + + return {row["id"]: get_result(row) for row in rows} + + @transaction + def all_task_property_statuses(self): + rows = self.db.execute( + """ + SELECT id, task_property, status, data, created + FROM task_property_status + """ + ).fetchall() + + def get_result(row): + row = dict(row) + row["data"] = json.loads(row.get("data", "null")) + return row + + return {row["id"]: get_result(row) for row in rows} + + @transaction + def all_status_data(self): + return ( + self.all_tasks(), + self.all_task_properties(), + self.all_task_property_statuses(), + ) + + @transaction + def reset(self): + self.db.execute("""DELETE FROM task_property_status""") + self.db.execute("""DELETE FROM task_property_data""") + self.db.execute("""DELETE FROM task_property""") + self.db.execute("""DELETE FROM task_status""") + self.db.execute("""DELETE FROM task""") + + def print_status_summary(self): + tasks, task_properties, task_property_statuses = self.all_status_data() + properties = defaultdict(set) + + uniquify_paths = defaultdict(dict) + + def add_status(task_property, status): + + display_name = task_property["name"] + if display_name[-1].startswith("$"): + counters = uniquify_paths[task_property["src"]] + counter = counters.setdefault(display_name[-1], len(counters) + 1) + if task_property["src"]: + if counter < 2: + path_based = f"" + else: + path_based = f"" + else: + path_based = f"" + display_name = (*display_name[:-1], path_based) + + properties[display_name].add(status) + + for task_property in task_properties.values(): + add_status(task_property, "UNKNOWN") + + for status in task_property_statuses.values(): + task_property = task_properties[status["task_property"]] + add_status(task_property, status["status"]) + + for display_name, statuses in sorted(properties.items()): + print(pretty_path(display_name), combine_statuses(statuses)) + + +def combine_statuses(statuses): + statuses = set(statuses) + + if len(statuses) > 1: + statuses.discard("UNKNOWN") + + return ",".join(sorted(statuses)) diff --git a/tests/make/collect_tests.py b/tests/make/collect_tests.py index 2aecceee..636ecb61 100644 --- a/tests/make/collect_tests.py +++ b/tests/make/collect_tests.py @@ -9,7 +9,11 @@ def collect(path): # don't pick up any paths that need escaping nor any sby workdirs - if not SAFE_PATH.match(str(path)) or (path / "config.sby").exists(): + if ( + not SAFE_PATH.match(str(path)) + or (path / "config.sby").exists() + or (path / "status.sqlite").exists() + ): return checked_dirs.append(path) From 6ba762db4cc37247c05513fb41e02da733ec3240 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Wed, 7 Feb 2024 18:43:23 +0100 Subject: [PATCH 215/220] Support for "abc --keep-going pdr" via new "pdr -X" mode --- sbysrc/sby_core.py | 2 +- sbysrc/sby_engine_abc.py | 152 +++++++++++++++++++-- sbysrc/sby_engine_aiger.py | 161 ++++++++++++++--------- tests/keepgoing/check_output.py | 2 +- tests/keepgoing/keepgoing_multi_step.py | 35 +++-- tests/keepgoing/keepgoing_multi_step.sby | 4 +- tests/keepgoing/keepgoing_multi_step.sv | 1 + 7 files changed, 268 insertions(+), 89 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index ecbd081a..609be950 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -787,8 +787,8 @@ def summarize(self, short): event = same_events[0] steps = sorted(e.step for e in same_events) if short and len(steps) > step_limit: - steps = [str(step) for step in steps[:step_limit]] excess = len(steps) - step_limit + steps = [str(step) for step in steps[:step_limit]] omitted_excess = True steps[-1] += f" and {excess} further step{'s' if excess != 1 else ''}" diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 639a7ffd..79923774 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -17,32 +17,101 @@ # import re, getopt +import json from sby_core import SbyProc -from sby_engine_aiger import aigsmt_exit_callback +from sby_engine_aiger import aigsmt_exit_callback, aigsmt_trace_callback + + +def abc_getopt(args, long): + long = set(long) + output = [] + parsed = [] + toggles = set() + pos = 0 + + while pos < len(args): + arg = args[pos] + pos += 1 + if not arg.startswith('-'): + output.append(arg) + elif arg == '--': + output.extend(args[pos:]) + break + elif arg.startswith('--'): + if '=' in arg: + prefix, param = arg.split('=', 1) + if prefix + "=" in long: + parsed.append(prefix, param) + elif arg[2:] in long: + parsed.append((arg, '')) + elif arg[2:] + "=" in long: + parsed.append((arg, args[pos])) + pos += 1 + else: + output.append(arg) + elif arg.startswith('-'): + output.append(arg) + for c in arg[1:]: + if 'A' <= c <= 'Z': + if pos < len(args): + output.append(args[pos]) + pos += 1 + else: + toggles.symmetric_difference_update([c]) + + return output, parsed, toggles + def run(mode, task, engine_idx, engine): - abc_opts, abc_command = getopt.getopt(engine[1:], "", []) + keep_going = False + + fold_command = "fold" + if task.opt_aigfolds: + fold_command += " -s" + + abc_command, custom_options, toggles = abc_getopt(engine[1:], [ + "keep-going", + ]) if len(abc_command) == 0: task.error("Missing ABC command.") - for o, a in abc_opts: - task.error("Unexpected ABC engine options.") + if abc_command[0].startswith('-'): + task.error(f"Unexpected ABC engine option '{abc_command[0]}'.") if abc_command[0] == "bmc3": if mode != "bmc": task.error("ABC command 'bmc3' is only valid in bmc mode.") + for o, a in custom_options: + task.error(f"Option {o} not supported by 'abc {abc_command[0]}'") abc_command[0] += f" -F {task.opt_depth} -v" elif abc_command[0] == "sim3": if mode != "bmc": task.error("ABC command 'sim3' is only valid in bmc mode.") + for o, a in custom_options: + task.error(f"Option {o} not supported by 'abc {abc_command[0]}'") abc_command[0] += f" -F {task.opt_depth} -v" elif abc_command[0] == "pdr": if mode != "prove": task.error("ABC command 'pdr' is only valid in prove mode.") - abc_command[0] += f" -v -I engine_{engine_idx}/invariants.pla" + + for o, a in custom_options: + if o == '--keep-going': + keep_going = True + else: + task.error(f"Option {o} not supported by 'abc {abc_command[0]}'") + + abc_command[0] += " -v" + + if keep_going: + abc_command += ["-a", "-X", f"engine_{engine_idx}/trace_"] + + if 'd' in toggles: + abc_command += ["-I", f"engine_{engine_idx}/invariants.pla"] + if not task.opt_aigfolds: + fold_command += " -s" else: task.error(f"Invalid ABC command {abc_command[0]}.") @@ -66,9 +135,8 @@ def run(mode, task, engine_idx, engine): task, f"engine_{engine_idx}", task.model("aig"), - f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; fold{ - " -s" if task.opt_aigfolds or (abc_command[0].startswith("pdr ") and "-d" in abc_command[1:]) else "" - }; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", + f"""cd {task.workdir}; {task.exe_paths["abc"]} -c 'read_aiger model/design_aiger.aig; { + fold_command}; strash; {" ".join(abc_command)}; write_cex -a engine_{engine_idx}/trace.aiw'""", logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile.txt", "w") ) proc.checkretcode = True @@ -76,11 +144,42 @@ def run(mode, task, engine_idx, engine): proc.noprintregex = re.compile(r"^\.+$") proc_status = None + procs_running = 1 + + aiger_props = None + disproved = [] + def output_callback(line): nonlocal proc_status - - match = re.match(r"^Output [0-9]+ of miter .* was asserted in frame [0-9]+.", line) - if match: proc_status = "FAIL" + nonlocal procs_running + nonlocal aiger_props + + if keep_going and aiger_props is None: + with open(f"{task.workdir}/model/design_aiger.ywa") as ywa_file: + ywa = json.load(ywa_file) + aiger_props = [] + for path in ywa["asserts"]: + aiger_props.append(task.design.properties_by_path[tuple(path)]) + + if keep_going: + match = re.match(r"Writing CEX for output ([0-9]+) to engine_[0-9]+/(.*)\.aiw", line) + if match: + output = int(match[1]) + prop = aiger_props[output] + prop.status = "FAIL" + + task.status_db.set_task_property_status(prop, data=dict(source="abc pdr", engine=f"engine_{engine_idx}")) + disproved.append(output) + proc_status = "FAIL" + proc = aigsmt_trace_callback(task, engine_idx, proc_status, + run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append, + name=match[2], + ) + proc.register_exit_callback(exit_callback) + procs_running += 1 + else: + match = re.match(r"^Output [0-9]+ of miter .* was asserted in frame [0-9]+.", line) + if match: proc_status = "FAIL" match = re.match(r"^Simulation of [0-9]+ frames for [0-9]+ rounds with [0-9]+ restarts did not assert POs.", line) if match: proc_status = "UNKNOWN" @@ -94,11 +193,38 @@ def output_callback(line): match = re.match(r"^Property proved.", line) if match: proc_status = "PASS" + if keep_going: + match = re.match(r"^Properties: All = (\d+). Proved = (\d+). Disproved = (\d+). Undecided = (\d+).", line) + if match: + all_count = int(match[1]) + proved_count = int(match[2]) + disproved_count = int(match[3]) + undecided_count = int(match[4]) + if ( + all_count == len(aiger_props) and + all_count == proved_count + disproved_count + undecided_count and + disproved_count == len(disproved) and + not undecided_count + ): + for i, prop in enumerate(aiger_props): + if i not in disproved: + prop.status = "PASS" + return line def exit_callback(retcode): - aigsmt_exit_callback(task, engine_idx, proc_status, - run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append, ) + nonlocal procs_running + if keep_going: + procs_running -= 1 + if not procs_running: + if proc_status == "FAIL" and mode == "bmc" and keep_going: + task.pass_unknown_asserts(dict(source="abc pdr", keep_going=True, engine=f"engine_{engine_idx}")) + task.update_status(proc_status) + task.summary.set_engine_status(engine_idx, proc_status) + task.terminate() + else: + aigsmt_exit_callback(task, engine_idx, proc_status, + run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append) proc.output_callback = output_callback proc.register_exit_callback(exit_callback) diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index d7a5a310..acba3a21 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -121,70 +121,107 @@ def exit_callback(retcode): def aigsmt_exit_callback(task, engine_idx, proc_status, *, run_aigsmt, smtbmc_vcd, smtbmc_append, sim_append): - if proc_status is None: - task.error(f"engine_{engine_idx}: Could not determine engine status.") + if proc_status is None: + task.error(f"engine_{engine_idx}: Could not determine engine status.") - task.update_status(proc_status) - task.summary.set_engine_status(engine_idx, proc_status) - task.terminate() + task.update_status(proc_status) + task.summary.set_engine_status(engine_idx, proc_status) + task.terminate() + if proc_status == "FAIL" and (not run_aigsmt or task.opt_aigsmt != "none"): + aigsmt_trace_callback(task, engine_idx, proc_status, run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append) - if proc_status == "FAIL" and (not run_aigsmt or task.opt_aigsmt != "none"): - trace_prefix = f"engine_{engine_idx}/trace" +def aigsmt_trace_callback(task, engine_idx, proc_status, *, run_aigsmt, smtbmc_vcd, smtbmc_append, sim_append, name="trace"): - aiw2yw_suffix = '_aiw' if run_aigsmt else '' + trace_prefix = f"engine_{engine_idx}/{name}" - witness_proc = SbyProc( - task, f"engine_{engine_idx}", [], - f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/trace.aiw model/design_aiger.ywa engine_{engine_idx}/trace{aiw2yw_suffix}.yw", - ) - yw_proc = witness_proc + aiw2yw_suffix = '_aiw' if run_aigsmt else '' - if run_aigsmt: - smtbmc_opts = [] - smtbmc_opts += ["-s", task.opt_aigsmt] - if task.opt_tbtop is not None: - smtbmc_opts += ["--vlogtb-top", task.opt_tbtop] - smtbmc_opts += ["--noprogress", f"--append {smtbmc_append}"] - if smtbmc_vcd: - smtbmc_opts += [f"--dump-vcd {trace_prefix}.vcd"] - smtbmc_opts += [f"--dump-yw {trace_prefix}.yw", f"--dump-vlogtb {trace_prefix}_tb.v", f"--dump-smtc {trace_prefix}.smtc"] - - proc2 = SbyProc( - task, - f"engine_{engine_idx}", - [*task.model("smt2"), witness_proc], - f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/trace{aiw2yw_suffix}.yw model/design_smt2.smt2", - logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w") - ) - - proc2_status = None - - def output_callback2(line): - nonlocal proc2_status - - match = re.match(r"^## [0-9: ]+ Status: FAILED", line) - if match: proc2_status = "FAIL" - - match = re.match(r"^## [0-9: ]+ Status: PASSED", line) - if match: proc2_status = "PASS" - - return line - - def exit_callback2(retcode): - if proc2_status is None: - task.error(f"engine_{engine_idx}: Could not determine aigsmt status.") - if proc2_status != "FAIL": - task.error(f"engine_{engine_idx}: Unexpected aigsmt status.") - if os.path.exists(f"{task.workdir}/engine_{engine_idx}/trace.vcd"): - task.summary.add_event(engine_idx, trace="trace", path=f"engine_{engine_idx}/trace.vcd", type="$assert") - - proc2.output_callback = output_callback2 - proc2.register_exit_callback(exit_callback2) - - yw_proc = proc2 - - if task.opt_fst or (task.opt_vcd and task.opt_vcd_sim): - sim_witness_trace(f"engine_{engine_idx}", task, engine_idx, f"engine_{engine_idx}/trace.yw", append=sim_append, deps=[yw_proc]) - - else: - task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Engine did not produce a counter example.") + witness_proc = SbyProc( + task, f"engine_{engine_idx}", [], + f"cd {task.workdir}; {task.exe_paths['witness']} aiw2yw engine_{engine_idx}/{name}.aiw model/design_aiger.ywa engine_{engine_idx}/{name}{aiw2yw_suffix}.yw", + ) + final_proc = witness_proc + + if run_aigsmt: + smtbmc_opts = [] + smtbmc_opts += ["-s", task.opt_aigsmt] + if task.opt_tbtop is not None: + smtbmc_opts += ["--vlogtb-top", task.opt_tbtop] + smtbmc_opts += ["--noprogress", f"--append {smtbmc_append}"] + if smtbmc_vcd: + smtbmc_opts += [f"--dump-vcd {trace_prefix}.vcd"] + smtbmc_opts += [f"--dump-yw {trace_prefix}.yw", f"--dump-vlogtb {trace_prefix}_tb.v", f"--dump-smtc {trace_prefix}.smtc"] + + proc2 = SbyProc( + task, + f"engine_{engine_idx}", + [*task.model("smt2"), witness_proc], + f"cd {task.workdir}; {task.exe_paths['smtbmc']} {' '.join(smtbmc_opts)} --yw engine_{engine_idx}/{name}{aiw2yw_suffix}.yw model/design_smt2.smt2", + logfile=open(f"{task.workdir}/engine_{engine_idx}/logfile2.txt", "w"), + ) + + proc2_status = None + + last_prop = [] + current_step = None + + def output_callback2(line): + nonlocal proc2_status + nonlocal last_prop + nonlocal current_step + + smt2_trans = {'\\':'/', '|':'/'} + + match = re.match(r"^## [0-9: ]+ .* in step ([0-9]+)\.\.", line) + if match: + current_step = int(match[1]) + return line + + match = re.match(r"^## [0-9: ]+ Status: FAILED", line) + if match: proc2_status = "FAIL" + + match = re.match(r"^## [0-9: ]+ Status: PASSED", line) + if match: proc2_status = "PASS" + + match = re.match(r"^## [0-9: ]+ Assert failed in (\S+): (\S+)(?: \((\S+)\))?", line) + if match: + cell_name = match[3] or match[2] + prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) + prop.status = "FAIL" + last_prop.append(prop) + return line + + match = re.match(r"^## [0-9: ]+ Writing trace to VCD file: (\S+)", line) + if match: + tracefile = match[1] + trace = os.path.basename(tracefile)[:-4] + task.summary.add_event(engine_idx=engine_idx, trace=trace, path=tracefile) + + if match and last_prop: + for p in last_prop: + task.summary.add_event( + engine_idx=engine_idx, trace=trace, + type=p.celltype, hdlname=p.hdlname, src=p.location, step=current_step) + p.tracefiles.append(tracefile) + last_prop = [] + return line + + return line + + def exit_callback2(retcode): + if proc2_status is None: + task.error(f"engine_{engine_idx}: Could not determine aigsmt status.") + if proc2_status != "FAIL": + task.error(f"engine_{engine_idx}: Unexpected aigsmt status.") + + proc2.output_callback = output_callback2 + proc2.register_exit_callback(exit_callback2) + + final_proc = proc2 + + if task.opt_fst or (task.opt_vcd and task.opt_vcd_sim): + final_proc = sim_witness_trace(f"engine_{engine_idx}", task, engine_idx, f"engine_{engine_idx}/{name}.yw", append=sim_append, deps=[final_proc]) + elif not run_aigsmt: + task.log(f"{click.style(f'engine_{engine_idx}', fg='magenta')}: Engine did not produce a counter example.") + + return final_proc diff --git a/tests/keepgoing/check_output.py b/tests/keepgoing/check_output.py index ab531ebc..fb2b969b 100644 --- a/tests/keepgoing/check_output.py +++ b/tests/keepgoing/check_output.py @@ -12,6 +12,6 @@ def line_ref(dir, filename, pattern): for number, line in enumerate(file, 1): if pattern_re.search(line): # Needs to match source locations for both verilog frontends - return fr"{filename}:(?:{number}|\d+.\d+-{number}.\d+)" + return fr"{filename}:(?:{number}|\d+\.\d+-{number}\.\d+)" raise RuntimeError("%s: pattern `%s` not found" % (filename, pattern)) diff --git a/tests/keepgoing/keepgoing_multi_step.py b/tests/keepgoing/keepgoing_multi_step.py index 548f9d2b..d250614b 100644 --- a/tests/keepgoing/keepgoing_multi_step.py +++ b/tests/keepgoing/keepgoing_multi_step.py @@ -11,21 +11,34 @@ step_7 = line_ref(workdir, src, "step 7") log = open(workdir + "/logfile.txt").read() -log_per_trace = log.split("Writing trace to Yosys witness file")[:-1] -assert len(log_per_trace) == 4 +if "_abc]" not in log: + log_per_trace = log.split("Writing trace to Yosys witness file")[:-1] + assert len(log_per_trace) == 4 + assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[0], re.M) + + for i in range(1, 4): + assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[i], re.M) -assert re.search(r"Assert failed in test: %s \(.*\)$" % assert_0, log_per_trace[0], re.M) + assert re.search(r"Assert failed in test: %s \(.*\)$" % step_3_7, log_per_trace[1], re.M) + assert re.search(r"Assert failed in test: %s \(.*\)$" % step_5, log_per_trace[2], re.M) + assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) + assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) -for i in range(1, 4): - assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % assert_0, log_per_trace[i], re.M) + pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.(vcd|fst)" + assert re.search(pattern, open(f"{workdir}/{workdir}.xml").read()) + +log_per_trace = log.split("summary: counterexample trace")[1:] +assert len(log_per_trace) == 4 +for i in range(4): + assert re.search(r"failed assertion test\..* at %s" % assert_0, log_per_trace[i], re.M) -assert re.search(r"Assert failed in test: %s \(.*\)$" % step_3_7, log_per_trace[1], re.M) -assert re.search(r"Assert failed in test: %s \(.*\)$" % step_5, log_per_trace[2], re.M) -assert re.search(r"Assert failed in test: %s \(.*\) \[failed before\]$" % step_3_7, log_per_trace[3], re.M) -assert re.search(r"Assert failed in test: %s \(.*\)$" % step_7, log_per_trace[3], re.M) +step_3_7_traces = [i for i, t in enumerate(log_per_trace) if re.search(r"failed assertion test\..* at %s" % step_3_7, t, re.M)] +step_5_traces = [i for i, t in enumerate(log_per_trace) if re.search(r"failed assertion test\..* at %s" % step_5, t, re.M)] +step_7_traces = [i for i, t in enumerate(log_per_trace) if re.search(r"failed assertion test\..* at %s" % step_7, t, re.M)] -pattern = f"Property ASSERT in test at {assert_0} failed. Trace file: engine_0/trace0.(vcd|fst)" -assert re.search(pattern, open(f"{workdir}/{workdir}.xml").read()) +assert len(step_3_7_traces) == 2 +assert len(step_5_traces) == 1 +assert len(step_7_traces) == 1 diff --git a/tests/keepgoing/keepgoing_multi_step.sby b/tests/keepgoing/keepgoing_multi_step.sby index b9b0ba58..5e6a985e 100644 --- a/tests/keepgoing/keepgoing_multi_step.sby +++ b/tests/keepgoing/keepgoing_multi_step.sby @@ -1,6 +1,7 @@ [tasks] bmc prove +abc : prove [options] bmc: mode bmc @@ -8,7 +9,8 @@ prove: mode prove expect fail [engines] -smtbmc --keep-going boolector +~abc: smtbmc --keep-going boolector +abc: abc --keep-going pdr [script] read -sv keepgoing_multi_step.sv diff --git a/tests/keepgoing/keepgoing_multi_step.sv b/tests/keepgoing/keepgoing_multi_step.sv index 8d5d8e36..553b13ca 100644 --- a/tests/keepgoing/keepgoing_multi_step.sv +++ b/tests/keepgoing/keepgoing_multi_step.sv @@ -18,5 +18,6 @@ module test ( if (counter == 7) begin assert(a); // step 7 end + assert(1); end endmodule From d3a6f2d75837e71aa41f133008f94f0a01c9902b Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 20 Feb 2024 13:56:44 +0100 Subject: [PATCH 216/220] Emit status db update from aigsmt --- sbysrc/sby_engine_aiger.py | 1 + 1 file changed, 1 insertion(+) diff --git a/sbysrc/sby_engine_aiger.py b/sbysrc/sby_engine_aiger.py index acba3a21..fbdf999e 100644 --- a/sbysrc/sby_engine_aiger.py +++ b/sbysrc/sby_engine_aiger.py @@ -188,6 +188,7 @@ def output_callback2(line): cell_name = match[3] or match[2] prop = task.design.hierarchy.find_property_by_cellname(cell_name, trans_dict=smt2_trans) prop.status = "FAIL" + task.status_db.set_task_property_status(prop, data=dict(source="aigsmt", engine=f"engine_{engine_idx}")) last_prop.append(prop) return line From b6e41a388b6e929abaf97ae141a11ff8ade04c0a Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Tue, 27 Feb 2024 20:09:45 +0100 Subject: [PATCH 217/220] Support for the new anytime schedule in yosys-abc's pdr --- sbysrc/sby_core.py | 4 +++ sbysrc/sby_engine_abc.py | 50 ++++++++++++++++++++++++------------- sbysrc/sby_engine_smtbmc.py | 7 +++--- 3 files changed, 41 insertions(+), 20 deletions(-) diff --git a/sbysrc/sby_core.py b/sbysrc/sby_core.py index 609be950..c366e1be 100644 --- a/sbysrc/sby_core.py +++ b/sbysrc/sby_core.py @@ -1027,6 +1027,7 @@ def make_model(self, model_name): print("setundef -undriven -anyseq", file=f) print("opt -fast", file=f) if self.opt_witrename: + # we need to run this a second time to handle anything added by prep print("rename -witness", file=f) print("opt_clean", file=f) print(f"""write_rtlil ../model/design_prep.il""", file=f) @@ -1048,6 +1049,9 @@ def make_model(self, model_name): print(cmd, file=f) # the user must designate a top module in [script] print("hierarchy -smtcheck", file=f) + # we need to give flatten-preserved names before write_jny + if self.opt_witrename: + print("rename -witness", file=f) print(f"""write_jny -no-connections ../model/design.json""", file=f) print(f"""write_rtlil ../model/design.il""", file=f) diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 79923774..54ff1698 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -103,7 +103,7 @@ def run(mode, task, engine_idx, engine): else: task.error(f"Option {o} not supported by 'abc {abc_command[0]}'") - abc_command[0] += " -v" + abc_command[0] += " -v -l" if keep_going: abc_command += ["-a", "-X", f"engine_{engine_idx}/trace_"] @@ -142,34 +142,35 @@ def run(mode, task, engine_idx, engine): proc.checkretcode = True proc.noprintregex = re.compile(r"^\.+$") - proc_status = None + proc_status = "UNKNOWN" procs_running = 1 aiger_props = None - disproved = [] + disproved = set() + proved = set() def output_callback(line): nonlocal proc_status nonlocal procs_running nonlocal aiger_props - if keep_going and aiger_props is None: + if aiger_props is None: with open(f"{task.workdir}/model/design_aiger.ywa") as ywa_file: ywa = json.load(ywa_file) aiger_props = [] for path in ywa["asserts"]: - aiger_props.append(task.design.properties_by_path[tuple(path)]) + aiger_props.append(task.design.properties_by_path.get(tuple(path))) if keep_going: match = re.match(r"Writing CEX for output ([0-9]+) to engine_[0-9]+/(.*)\.aiw", line) if match: output = int(match[1]) prop = aiger_props[output] - prop.status = "FAIL" - - task.status_db.set_task_property_status(prop, data=dict(source="abc pdr", engine=f"engine_{engine_idx}")) - disproved.append(output) + if prop: + prop.status = "FAIL" + task.status_db.set_task_property_status(prop, data=dict(source="abc pdr", engine=f"engine_{engine_idx}")) + disproved.add(output) proc_status = "FAIL" proc = aigsmt_trace_callback(task, engine_idx, proc_status, run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append, @@ -181,6 +182,15 @@ def output_callback(line): match = re.match(r"^Output [0-9]+ of miter .* was asserted in frame [0-9]+.", line) if match: proc_status = "FAIL" + match = re.match(r"^Proved output +([0-9]+) in frame +[0-9]+", line) + if match: + output = int(match[1]) + prop = aiger_props[output] + if prop: + prop.status = "PASS" + task.status_db.set_task_property_status(prop, data=dict(source="abc pdr", engine=f"engine_{engine_idx}")) + proved.add(output) + match = re.match(r"^Simulation of [0-9]+ frames for [0-9]+ rounds with [0-9]+ restarts did not assert POs.", line) if match: proc_status = "UNKNOWN" @@ -201,14 +211,19 @@ def output_callback(line): disproved_count = int(match[3]) undecided_count = int(match[4]) if ( - all_count == len(aiger_props) and - all_count == proved_count + disproved_count + undecided_count and - disproved_count == len(disproved) and - not undecided_count + all_count != len(aiger_props) or + all_count != proved_count + disproved_count + undecided_count or + disproved_count != len(disproved) or + proved_count != len(proved) ): - for i, prop in enumerate(aiger_props): - if i not in disproved: - prop.status = "PASS" + log("WARNING: inconsistent status output") + proc_status = "UNKNOWN" + elif proved_count == all_count: + proc_status = "PASS" + elif disproved_count == 0: + proc_status = "UNKNOWN" + else: + proc_status = "FAIL" return line @@ -221,7 +236,8 @@ def exit_callback(retcode): task.pass_unknown_asserts(dict(source="abc pdr", keep_going=True, engine=f"engine_{engine_idx}")) task.update_status(proc_status) task.summary.set_engine_status(engine_idx, proc_status) - task.terminate() + if proc_status != "UNKNOWN" and not keep_going: + task.terminate() else: aigsmt_exit_callback(task, engine_idx, proc_status, run_aigsmt=run_aigsmt, smtbmc_vcd=smtbmc_vcd, smtbmc_append=smtbmc_append, sim_append=sim_append) diff --git a/sbysrc/sby_engine_smtbmc.py b/sbysrc/sby_engine_smtbmc.py index 3b431130..5fb0b898 100644 --- a/sbysrc/sby_engine_smtbmc.py +++ b/sbysrc/sby_engine_smtbmc.py @@ -295,8 +295,8 @@ def last_exit_callback(): task.pass_unknown_asserts(dict(source="smtbmc", keep_going=True, engine=f"engine_{engine_idx}")) proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status task.summary.set_engine_status(engine_idx, proc_status_lower) - - task.terminate() + if not keep_going: + task.terminate() elif mode in ["prove_basecase", "prove_induction"]: proc_status_lower = proc_status.lower() if proc_status == "PASS" else proc_status @@ -326,7 +326,8 @@ def last_exit_callback(): if task.basecase_pass and task.induction_pass: task.update_status("PASS") task.summary.append("successful proof by k-induction.") - task.terminate() + if not keep_going: + task.terminate() else: assert False From fd381ade05e66a94f29a64965db20be625a7cf97 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 11 Mar 2024 15:37:39 +0100 Subject: [PATCH 218/220] Print an error message when using "--status" with no project specified --- sbysrc/sby.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index 99dcc6c9..e781d6d4 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -61,6 +61,9 @@ if status_show or status_reset: target = workdir_prefix or workdir or sbyfile + if target is None: + print("ERROR: Specify a .sby config file or working directory to use --status.") + sys.exit(1) if not os.path.isdir(target) and target.endswith('.sby'): target = target[:-4] if not os.path.isdir(target): From cba77083c382a02054c0c4d0830a8a64827c13d9 Mon Sep 17 00:00:00 2001 From: Jannis Harder Date: Mon, 11 Mar 2024 16:35:03 +0100 Subject: [PATCH 219/220] Print a message when SBY is waiting for a config on stdin --- sbysrc/sby.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sbysrc/sby.py b/sbysrc/sby.py index e781d6d4..c21ab0ba 100644 --- a/sbysrc/sby.py +++ b/sbysrc/sby.py @@ -297,6 +297,8 @@ def handle_line(line): sbydata = list() +if sbyfile is None: + print("Reading .sby configuration from stdin:") with (open(sbyfile, "r") if sbyfile is not None else sys.stdin) as f: for line in f: sbydata.append(line) From 6c8b838eb3c9cbc2f3452855fb6fda3362fefdb6 Mon Sep 17 00:00:00 2001 From: KrystalDelusion <93062060+KrystalDelusion@users.noreply.github.com> Date: Tue, 12 Mar 2024 10:48:26 +1300 Subject: [PATCH 220/220] Update sby_engine_abc.py ABC will sometimes return negative frame numbers when proving by convergence, e.g. ``` engine_0: Proved output 1 in frame -698905656 (converged). engine_0: Proved output 4 in frame -698905656 (converged). ``` This change fixes these properties being missed and causing the engine status to return UNKNOWN due to `proved_count != len(proved)`. --- sbysrc/sby_engine_abc.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sbysrc/sby_engine_abc.py b/sbysrc/sby_engine_abc.py index 54ff1698..1fabe6fa 100644 --- a/sbysrc/sby_engine_abc.py +++ b/sbysrc/sby_engine_abc.py @@ -182,7 +182,7 @@ def output_callback(line): match = re.match(r"^Output [0-9]+ of miter .* was asserted in frame [0-9]+.", line) if match: proc_status = "FAIL" - match = re.match(r"^Proved output +([0-9]+) in frame +[0-9]+", line) + match = re.match(r"^Proved output +([0-9]+) in frame +-?[0-9]+", line) if match: output = int(match[1]) prop = aiger_props[output]