From 4d0282df0289352e6789e84c17aa4211f43d6577 Mon Sep 17 00:00:00 2001 From: Sandi Fatic Date: Tue, 19 May 2020 20:04:00 +0100 Subject: [PATCH] Added a yapf file and formated the scripts and pytest code (#2675) Summary: Currently there was no formater in the scripts folder. I have added the yapf configuration file and formated the python folders. YAPF is a code formatter mainainted and developed at Google: https://github.com/google/yapf and I've set it up to use the codeformat they use there, but open to other options. Test Plan: ``` yapf -i scripts/*.py git status ``` --- .style.yapf | 4 + pytest/lib/branches.py | 20 +- pytest/lib/cluster.py | 228 +++++-- pytest/lib/lightclient.py | 109 ++-- pytest/lib/network.py | 32 +- pytest/lib/retrying.py | 2 +- pytest/lib/serializer.py | 12 +- pytest/lib/transaction.py | 278 ++++++--- pytest/lib/utils.py | 99 ++- pytest/tests/adversarial/fork_sync.py | 11 +- pytest/tests/adversarial/gc_rollback.py | 5 +- pytest/tests/adversarial/malicious_chain.py | 11 +- .../tests/adversarial/start_from_genesis.py | 12 +- pytest/tests/adversarial/wrong_sync_info.py | 4 +- .../contracts/deploy_call_smart_contract.py | 19 +- pytest/tests/contracts/gibberish.py | 30 +- pytest/tests/mocknet/sanity.py | 28 +- pytest/tests/sanity/backward_compatible.py | 24 +- pytest/tests/sanity/block_production.py | 23 +- pytest/tests/sanity/block_sync.py | 43 +- pytest/tests/sanity/epoch_switches.py | 30 +- pytest/tests/sanity/garbage_collection.py | 33 +- pytest/tests/sanity/garbage_collection1.py | 35 +- pytest/tests/sanity/gc_after_sync.py | 36 +- pytest/tests/sanity/gc_sync_after_sync.py | 32 +- pytest/tests/sanity/lightclnt.py | 61 +- pytest/tests/sanity/one_val.py | 28 +- pytest/tests/sanity/restaked.py | 13 +- pytest/tests/sanity/restart.py | 27 +- pytest/tests/sanity/rpc_finality.py | 19 +- pytest/tests/sanity/rpc_query.py | 17 +- pytest/tests/sanity/rpc_state_changes.py | 572 ++++++++++-------- pytest/tests/sanity/rpc_tx_forwarding.py | 22 +- pytest/tests/sanity/rpc_tx_submission.py | 25 +- pytest/tests/sanity/skip_epoch.py | 44 +- pytest/tests/sanity/staking1.py | 22 +- pytest/tests/sanity/staking2.py | 41 +- pytest/tests/sanity/staking_repro1.py | 25 +- pytest/tests/sanity/staking_repro2.py | 1 - pytest/tests/sanity/state_migration.py | 53 +- pytest/tests/sanity/state_sync.py | 31 +- pytest/tests/sanity/state_sync1.py | 19 +- pytest/tests/sanity/state_sync2.py | 11 +- pytest/tests/sanity/state_sync3.py | 28 +- pytest/tests/sanity/state_sync_late.py | 30 +- pytest/tests/sanity/state_sync_routed.py | 40 +- pytest/tests/sanity/transactions.py | 20 +- pytest/tests/sanity/validator_switch.py | 26 +- pytest/tests/stress/network_stress.py | 13 +- pytest/tests/stress/stress.py | 135 +++-- scripts/create_service.py | 15 +- scripts/migrations/10-gas-price-fix.py | 3 +- .../migrations/11-runtime-cost-adjustment.py | 210 ++++--- scripts/migrations/12-fix-inflation.py | 3 +- scripts/migrations/13-block-merkle-root.py | 3 +- scripts/migrations/5-preserve-height.py | 9 +- scripts/migrations/6-state-stake.py | 6 +- scripts/migrations/7-account-registrar.py | 15 +- scripts/migrations/8-fraction.py | 4 +- scripts/migrations/9-state-record-data.py | 13 +- scripts/nodelib.py | 228 +++++-- scripts/parallel_coverage.py | 52 +- scripts/parallel_run_tests.py | 19 +- scripts/start_localnet.py | 34 +- scripts/start_staging_testnet.py | 41 +- scripts/start_stakewars.py | 64 +- scripts/start_testnet.py | 41 +- scripts/start_unittest.py | 33 +- scripts/state/mega-migrate.py | 12 +- scripts/state/split-genesis.py | 6 +- scripts/state/update_res.py | 31 +- scripts/stop.py | 1 - scripts/testlib.py | 19 +- 73 files changed, 2250 insertions(+), 1095 deletions(-) create mode 100644 .style.yapf diff --git a/.style.yapf b/.style.yapf new file mode 100644 index 00000000000..f34c280592d --- /dev/null +++ b/.style.yapf @@ -0,0 +1,4 @@ +[style] +based_on_style = google +indent_width = 4 +column_limit = 80 diff --git a/pytest/lib/branches.py b/pytest/lib/branches.py index 1dbee7a3fc1..2d8341eb872 100644 --- a/pytest/lib/branches.py +++ b/pytest/lib/branches.py @@ -1,11 +1,11 @@ - import os import subprocess def current_branch(): return os.environ.get('BUILDKITE_BRANCH') or subprocess.check_output([ - "git", "rev-parse", "--symbolic-full-name", "--abbrev-ref", "HEAD"]).strip().decode() + "git", "rev-parse", "--symbolic-full-name", "--abbrev-ref", "HEAD" + ]).strip().decode() def compile_binary(branch): @@ -46,19 +46,25 @@ def compile_current(): def download_binary(branch): url = f'https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/Linux/{branch}/near' - subprocess.check_output(['curl', '--proto', '=https', '--tlsv1.2', - '-sSfL', url, '-o', f'../target/debug/near-{branch}']) + subprocess.check_output([ + 'curl', '--proto', '=https', '--tlsv1.2', '-sSfL', url, '-o', + f'../target/debug/near-{branch}' + ]) subprocess.check_output(['chmod', '+x', f'../target/debug/near-{branch}']) url = f'https://s3-us-west-1.amazonaws.com/build.nearprotocol.com/nearcore/Linux/{branch}/state-viewer' - subprocess.check_output(['curl', '--proto', '=https', '--tlsv1.2', - '-sSfL', url, '-o', f'../target/debug/state-viewer-{branch}']) + subprocess.check_output([ + 'curl', '--proto', '=https', '--tlsv1.2', '-sSfL', url, '-o', + f'../target/debug/state-viewer-{branch}' + ]) subprocess.check_output( ['chmod', '+x', f'../target/debug/state-viewer-{branch}']) def prepare_ab_test(other_branch): compile_current() - if os.environ.get('BUILDKITE') and other_branch in ['master', 'beta', 'stable']: + if os.environ.get('BUILDKITE') and other_branch in [ + 'master', 'beta', 'stable' + ]: download_binary(other_branch) else: compile_binary(other_branch) diff --git a/pytest/lib/cluster.py b/pytest/lib/cluster.py index 777a95b784e..264e4faca60 100644 --- a/pytest/lib/cluster.py +++ b/pytest/lib/cluster.py @@ -24,6 +24,7 @@ remote_nodes_lock = threading.Lock() cleanup_remote_nodes_atexit_registered = False + class DownloadException(Exception): pass @@ -46,6 +47,7 @@ def atexit_cleanup_remote(): class Key(object): + def __init__(self, account_id, pk, sk): super(Key, self).__init__() self.account_id = account_id @@ -71,14 +73,27 @@ def from_json_file(self, jf): class BaseNode(object): - def _get_command_line(self, near_root, node_dir, boot_key, boot_node_addr, binary_name='neard'): + + def _get_command_line(self, + near_root, + node_dir, + boot_key, + boot_node_addr, + binary_name='neard'): if boot_key is None: assert boot_node_addr is None - return [os.path.join(near_root, binary_name), "--verbose", "", "--home", node_dir, "run"] + return [ + os.path.join(near_root, binary_name), "--verbose", "", "--home", + node_dir, "run" + ] else: assert boot_node_addr is not None boot_key = boot_key.split(':')[1] - return [os.path.join(near_root, binary_name), "--verbose", "", "--home", node_dir, "run", '--boot-nodes', "%s@%s:%s" % (boot_key, boot_node_addr[0], boot_node_addr[1])] + return [ + os.path.join(near_root, binary_name), "--verbose", "", "--home", + node_dir, "run", '--boot-nodes', + "%s@%s:%s" % (boot_key, boot_node_addr[0], boot_node_addr[1]) + ] def wait_for_rpc(self, timeout=1): retrying.retry(lambda: self.get_status(), timeout=timeout) @@ -90,15 +105,20 @@ def json_rpc(self, method, params, timeout=2): 'id': 'dontcare', 'jsonrpc': '2.0' } - r = requests.post("http://%s:%s" % self.rpc_addr(), json=j, timeout=timeout) + r = requests.post("http://%s:%s" % self.rpc_addr(), + json=j, + timeout=timeout) r.raise_for_status() return json.loads(r.content) def send_tx(self, signed_tx): - return self.json_rpc('broadcast_tx_async', [base64.b64encode(signed_tx).decode('utf8')]) + return self.json_rpc('broadcast_tx_async', + [base64.b64encode(signed_tx).decode('utf8')]) def send_tx_and_wait(self, signed_tx, timeout): - return self.json_rpc('broadcast_tx_commit', [base64.b64encode(signed_tx).decode('utf8')], timeout=timeout) + return self.json_rpc('broadcast_tx_commit', + [base64.b64encode(signed_tx).decode('utf8')], + timeout=timeout) def get_status(self): r = requests.get("http://%s:%s/status" % self.rpc_addr(), timeout=2) @@ -114,7 +134,8 @@ def get_all_heights(self): while True: block = self.get_block(hash_) - if 'error' in block and 'data' in block['error'] and 'Block Missing' in block['error']['data']: + if 'error' in block and 'data' in block[ + 'error'] and 'Block Missing' in block['error']['data']: break elif 'result' not in block: print(block) @@ -131,19 +152,27 @@ def get_validators(self): return self.json_rpc('validators', [None]) def get_account(self, acc, finality='optimistic'): - return self.json_rpc('query', {"request_type": "view_account", "account_id": acc, "finality": finality}) + return self.json_rpc('query', { + "request_type": "view_account", + "account_id": acc, + "finality": finality + }) def get_access_key_list(self, acc, finality='optimistic'): - return self.json_rpc('query', {"request_type": "view_access_key_list", "account_id": acc, "finality": finality}) + return self.json_rpc( + 'query', { + "request_type": "view_access_key_list", + "account_id": acc, + "finality": finality + }) def get_nonce_for_pk(self, acc, pk, finality='optimistic'): - for access_key in self.get_access_key_list(acc, finality)['result']['keys']: + for access_key in self.get_access_key_list(acc, + finality)['result']['keys']: if access_key['public_key'] == pk: return access_key['access_key']['nonce'] return None - - def get_block(self, block_id): return self.json_rpc('block', [block_id]) @@ -154,13 +183,16 @@ def get_tx(self, tx_hash, tx_recipient_id): return self.json_rpc('tx', [tx_hash, tx_recipient_id]) def get_changes_in_block(self, changes_in_block_request): - return self.json_rpc('EXPERIMENTAL_changes_in_block', changes_in_block_request) + return self.json_rpc('EXPERIMENTAL_changes_in_block', + changes_in_block_request) def get_changes(self, changes_request): return self.json_rpc('EXPERIMENTAL_changes', changes_request) def validators(self): - return set(map(lambda v: v['account_id'], self.get_status()['validators'])) + return set( + map(lambda v: v['account_id'], + self.get_status()['validators'])) def stop_checking_refmap(self): self.is_check_refmap = False @@ -177,7 +209,9 @@ def check_refmap(self): else: self.refmap_tests += 1 if res['result'] != 1: - print("ERROR: Block Reference Map for %s:%s in inconsistent state, stopping" % self.addr()) + print( + "ERROR: Block Reference Map for %s:%s in inconsistent state, stopping" + % self.addr()) self.kill() def check_store(self): @@ -192,9 +226,9 @@ def check_store(self): self.kill() self.store_tests += res['result'] - class RpcNode(BaseNode): """ A running node only interact by rpc queries """ + def __init__(self, host, rpc_port): super(RpcNode, self).__init__() self.host = host @@ -205,7 +239,14 @@ def rpc_addr(self): class LocalNode(BaseNode): - def __init__(self, port, rpc_port, near_root, node_dir, blacklist, binary_name='neard'): + + def __init__(self, + port, + rpc_port, + near_root, + node_dir, + blacklist, + binary_name='neard'): super(LocalNode, self).__init__() self.port = port self.rpc_port = rpc_port @@ -222,7 +263,8 @@ def __init__(self, port, rpc_port, near_root, node_dir, blacklist, binary_name=' # assert config_json['network']['addr'] == '0.0.0.0:24567', config_json['network']['addr'] # assert config_json['rpc']['addr'] == '0.0.0.0:3030', config_json['rpc']['addr'] # just a sanity assert that the setting name didn't change - assert 0 <= config_json['consensus']['min_num_peers'] <= 3, config_json['consensus']['min_num_peers'] + assert 0 <= config_json['consensus']['min_num_peers'] <= 3, config_json[ + 'consensus']['min_num_peers'] config_json['network']['addr'] = '0.0.0.0:%s' % port config_json['network']['blacklist'] = blacklist config_json['rpc']['addr'] = '0.0.0.0:%s' % rpc_port @@ -230,9 +272,12 @@ def __init__(self, port, rpc_port, near_root, node_dir, blacklist, binary_name=' with open(os.path.join(node_dir, "config.json"), 'w') as f: f.write(json.dumps(config_json, indent=2)) - self.validator_key = Key.from_json_file(os.path.join(node_dir, "validator_key.json")) - self.node_key = Key.from_json_file(os.path.join(node_dir, "node_key.json")) - self.signer_key = Key.from_json_file(os.path.join(node_dir, "validator_key.json")) + self.validator_key = Key.from_json_file( + os.path.join(node_dir, "validator_key.json")) + self.node_key = Key.from_json_file( + os.path.join(node_dir, "node_key.json")) + self.signer_key = Key.from_json_file( + os.path.join(node_dir, "validator_key.json")) self.pid = multiprocessing.Value('i', 0) @@ -252,14 +297,18 @@ def start(self, boot_key, boot_node_addr): self.stderr_name = os.path.join(self.node_dir, 'stderr') self.stdout = open(self.stdout_name, 'a') self.stderr = open(self.stderr_name, 'a') - cmd = self._get_command_line( - self.near_root, self.node_dir, boot_key, boot_node_addr, self.binary_name) - self.pid.value = subprocess.Popen( - cmd, stdout=self.stdout, stderr=self.stderr, env=env).pid + cmd = self._get_command_line(self.near_root, self.node_dir, boot_key, + boot_node_addr, self.binary_name) + self.pid.value = subprocess.Popen(cmd, + stdout=self.stdout, + stderr=self.stderr, + env=env).pid try: self.wait_for_rpc(10) except: - print('=== Error: failed to start node, rpc does not ready in 10 seconds') + print( + '=== Error: failed to start node, rpc does not ready in 10 seconds' + ) self.stdout.close() self.stderr.close() if os.environ.get('BUILDKITE'): @@ -268,7 +317,6 @@ def start(self, boot_key, boot_node_addr): print('=== stderr: ') print(open(self.stderr_name).read()) - def kill(self): if self.pid.value != 0: os.kill(self.pid.value, signal.SIGKILL) @@ -302,6 +350,7 @@ class BotoNode(BaseNode): class GCloudNode(BaseNode): + def __init__(self, *args): if len(args) == 1: # Get existing instance assume it's ready to run @@ -340,10 +389,10 @@ def __init__(self, *args): else: raise Exception() - def _upload_config_files(self, node_dir): self.machine.run('bash', input='mkdir -p ~/.near') - self.machine.upload(os.path.join(node_dir, '*.json'), f'/home/{self.machine.username}/.near/') + self.machine.upload(os.path.join(node_dir, '*.json'), + f'/home/{self.machine.username}/.near/') self.validator_key = Key.from_json_file( os.path.join(node_dir, "validator_key.json")) self.node_key = Key.from_json_file( @@ -353,7 +402,8 @@ def _upload_config_files(self, node_dir): @retry.retry(delay=1, tries=3) def _download_binary(self, binary): - p = self.machine.run('bash', input=f''' + p = self.machine.run('bash', + input=f''' /snap/bin/gsutil cp gs://nearprotocol_nearcore_release/{binary} near chmod +x near ''') @@ -367,7 +417,9 @@ def rpc_addr(self): return (self.ip, self.rpc_port) def start(self, boot_key, boot_node_addr): - self.machine.run_detach_tmux("RUST_BACKTRACE=1 "+" ".join(self._get_command_line('.', '.near', boot_key, boot_node_addr)).replace("--verbose", '--verbose ""')) + self.machine.run_detach_tmux("RUST_BACKTRACE=1 " + " ".join( + self._get_command_line('.', '.near', boot_key, boot_node_addr)). + replace("--verbose", '--verbose ""')) self.wait_for_rpc(timeout=30) def kill(self): @@ -388,7 +440,9 @@ def cleanup(self): # Get log and delete machine rc.run(f'mkdir -p /tmp/pytest_remote_log') - self.machine.download('/tmp/python-rc.log', f'/tmp/pytest_remote_log/{self.machine.name}.log') + self.machine.download( + '/tmp/python-rc.log', + f'/tmp/pytest_remote_log/{self.machine.name}.log') self.destroy_machine() def json_rpc(self, method, params, timeout=10): @@ -400,29 +454,47 @@ def get_status(self): return json.loads(r.content) def stop_network(self): - rc.run(f'gcloud compute firewall-rules create {self.machine.name}-stop --direction=EGRESS --priority=1000 --network=default --action=DENY --rules=all --target-tags={self.machine.name}') + rc.run( + f'gcloud compute firewall-rules create {self.machine.name}-stop --direction=EGRESS --priority=1000 --network=default --action=DENY --rules=all --target-tags={self.machine.name}' + ) def resume_network(self): - rc.run(f'gcloud compute firewall-rules delete {self.machine.name}-stop', input='yes\n') + rc.run(f'gcloud compute firewall-rules delete {self.machine.name}-stop', + input='yes\n') -def spin_up_node(config, near_root, node_dir, ordinal, boot_key, boot_addr, blacklist=[]): +def spin_up_node(config, + near_root, + node_dir, + ordinal, + boot_key, + boot_addr, + blacklist=[]): is_local = config['local'] - print("Starting node %s %s" % (ordinal, ("as BOOT NODE" if boot_addr is None else ( - "with boot=%s@%s:%s" % (boot_key, boot_addr[0], boot_addr[1]))))) + print("Starting node %s %s" % (ordinal, + ("as BOOT NODE" if boot_addr is None else + ("with boot=%s@%s:%s" % + (boot_key, boot_addr[0], boot_addr[1]))))) if is_local: - blacklist = ["127.0.0.1:%s" % (24567 + 10 + bl_ordinal) for bl_ordinal in blacklist] - node = LocalNode(24567 + 10 + ordinal, 3030 + - 10 + ordinal, near_root, node_dir, blacklist, config.get('binary_name', 'near')) + blacklist = [ + "127.0.0.1:%s" % (24567 + 10 + bl_ordinal) + for bl_ordinal in blacklist + ] + node = LocalNode(24567 + 10 + ordinal, 3030 + 10 + ordinal, near_root, + node_dir, blacklist, config.get('binary_name', 'near')) else: # TODO: Figure out how to know IP address beforehand for remote deployment. - assert len(blacklist) == 0, "Blacklist is only supported in LOCAL deployment." + assert len( + blacklist) == 0, "Blacklist is only supported in LOCAL deployment." - instance_name = '{}-{}-{}'.format(config['remote'].get('instance_name', 'near-pytest'), ordinal, uuid.uuid4()) + instance_name = '{}-{}-{}'.format( + config['remote'].get('instance_name', 'near-pytest'), ordinal, + uuid.uuid4()) zones = config['remote']['zones'] zone = zones[ordinal % len(zones)] - node = GCloudNode(instance_name, zone, node_dir, config['remote']['binary']) + node = GCloudNode(instance_name, zone, node_dir, + config['remote']['binary']) with remote_nodes_lock: remote_nodes.append(node) print(f"node {ordinal} machine created") @@ -438,18 +510,24 @@ def connect_to_mocknet(config): config = load_config() if 'local' in config: - print("Attempt to launch a mocknet test with a regular config", file=sys.stderr) + print("Attempt to launch a mocknet test with a regular config", + file=sys.stderr) sys.exit(1) - return [RpcNode(node['ip'], node['port']) for node in config['nodes']], [Key(account['account_id'], account['pk'], account['sk']) for account in config['accounts']] + return [RpcNode(node['ip'], node['port']) for node in config['nodes']], [ + Key(account['account_id'], account['pk'], account['sk']) + for account in config['accounts'] + ] -def init_cluster(num_nodes, num_observers, num_shards, config, genesis_config_changes, client_config_changes): +def init_cluster(num_nodes, num_observers, num_shards, config, + genesis_config_changes, client_config_changes): """ Create cluster configuration """ if 'local' not in config and 'nodes' in config: - print("Attempt to launch a regular test with a mocknet config", file=sys.stderr) + print("Attempt to launch a regular test with a mocknet config", + file=sys.stderr) sys.exit(1) is_local = config['local'] @@ -458,15 +536,26 @@ def init_cluster(num_nodes, num_observers, num_shards, config, genesis_config_ch print("Creating %s cluster configuration with %s nodes" % ("LOCAL" if is_local else "REMOTE", num_nodes + num_observers)) - - process = subprocess.Popen([os.path.join(near_root, "near"), "testnet", "--v", str(num_nodes), "--shards", str( - num_shards), "--n", str(num_observers), "--prefix", "test"], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen([ + os.path.join(near_root, "near"), "testnet", "--v", + str(num_nodes), "--shards", + str(num_shards), "--n", + str(num_observers), "--prefix", "test" + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) out, err = process.communicate() assert 0 == process.returncode, err - node_dirs = [line.split()[-1] - for line in err.decode('utf8').split('\n') if '/test' in line] - assert len(node_dirs) == num_nodes + num_observers, "node dirs: %s num_nodes: %s num_observers: %s" % (len(node_dirs), num_nodes, num_observers) + node_dirs = [ + line.split()[-1] + for line in err.decode('utf8').split('\n') + if '/test' in line + ] + assert len( + node_dirs + ) == num_nodes + num_observers, "node dirs: %s num_nodes: %s num_observers: %s" % ( + len(node_dirs), num_nodes, num_observers) print("Search for stdout and stderr in %s" % node_dirs) # apply config changes @@ -513,22 +602,28 @@ def apply_config_changes(node_dir, client_config_change): f.write(json.dumps(config_json, indent=2)) -def start_cluster(num_nodes, num_observers, num_shards, config, genesis_config_changes, client_config_changes): +def start_cluster(num_nodes, num_observers, num_shards, config, + genesis_config_changes, client_config_changes): if not config: config = load_config() if not os.path.exists(os.path.expanduser("~/.near/test0")): - near_root, node_dirs = init_cluster( - num_nodes, num_observers, num_shards, config, genesis_config_changes, client_config_changes) + near_root, node_dirs = init_cluster(num_nodes, num_observers, + num_shards, config, + genesis_config_changes, + client_config_changes) else: near_root = config['near_root'] - node_dirs = subprocess.check_output("find ~/.near/test* -maxdepth 0", shell=True).decode('utf-8').strip().split('\n') - node_dirs = list(filter(lambda n: not n.endswith('_finished'), node_dirs)) + node_dirs = subprocess.check_output( + "find ~/.near/test* -maxdepth 0", + shell=True).decode('utf-8').strip().split('\n') + node_dirs = list( + filter(lambda n: not n.endswith('_finished'), node_dirs)) ret = [] def spin_up_node_and_push(i, boot_key, boot_addr): - node = spin_up_node(config, near_root, - node_dirs[i], i, boot_key, boot_addr) + node = spin_up_node(config, near_root, node_dirs[i], i, boot_key, + boot_addr) while len(ret) < i: time.sleep(0.01) ret.append(node) @@ -538,8 +633,9 @@ def spin_up_node_and_push(i, boot_key, boot_addr): handles = [] for i in range(1, num_nodes + num_observers): - handle = threading.Thread(target=spin_up_node_and_push, args=( - i, boot_node.node_key.pk, boot_node.addr())) + handle = threading.Thread(target=spin_up_node_and_push, + args=(i, boot_node.node_key.pk, + boot_node.addr())) handle.start() handles.append(handle) @@ -549,7 +645,11 @@ def spin_up_node_and_push(i, boot_key, boot_addr): return ret -DEFAULT_CONFIG = {'local': True, 'near_root': '../target/debug/', 'binary_name': 'neard'} +DEFAULT_CONFIG = { + 'local': True, + 'near_root': '../target/debug/', + 'binary_name': 'neard' +} CONFIG_ENV_VAR = 'NEAR_PYTEST_CONFIG' @@ -567,5 +667,3 @@ def load_config(): else: print(f"Use default config {config}") return config - - diff --git a/pytest/lib/lightclient.py b/pytest/lib/lightclient.py index 937ce11de14..542dbdb688f 100644 --- a/pytest/lib/lightclient.py +++ b/pytest/lib/lightclient.py @@ -1,23 +1,34 @@ from serializer import BinarySerializer import hashlib, base58 + class BlockHeaderInnerLite: pass -inner_lite_schema = dict([[BlockHeaderInnerLite, {'kind': 'struct', 'fields': [ - ['height', 'u64'], - ['epoch_id', [32]], - ['next_epoch_id', [32]], - ['prev_state_root', [32]], - ['outcome_root', [32]], - ['timestamp', 'u64'], - ['next_bp_hash', [32]], -]} ], + +inner_lite_schema = dict([ + [ + BlockHeaderInnerLite, { + 'kind': + 'struct', + 'fields': [ + ['height', 'u64'], + ['epoch_id', [32]], + ['next_epoch_id', [32]], + ['prev_state_root', [32]], + ['outcome_root', [32]], + ['timestamp', 'u64'], + ['next_bp_hash', [32]], + ] + } + ], ]) + def combine_hash(hash1, hash2): return hashlib.sha256(hash1 + hash2).digest() + def compute_block_hash(inner_lite_view, inner_rest_hash, prev_hash): inner_rest_hash = base58.b58decode(inner_rest_hash) prev_hash = base58.b58decode(prev_hash) @@ -25,8 +36,10 @@ def compute_block_hash(inner_lite_view, inner_rest_hash, prev_hash): inner_lite = BlockHeaderInnerLite() inner_lite.height = inner_lite_view['height'] inner_lite.epoch_id = base58.b58decode(inner_lite_view['epoch_id']) - inner_lite.next_epoch_id = base58.b58decode(inner_lite_view['next_epoch_id']) - inner_lite.prev_state_root = base58.b58decode(inner_lite_view['prev_state_root']) + inner_lite.next_epoch_id = base58.b58decode( + inner_lite_view['next_epoch_id']) + inner_lite.prev_state_root = base58.b58decode( + inner_lite_view['prev_state_root']) inner_lite.outcome_root = base58.b58decode(inner_lite_view['outcome_root']) inner_lite.timestamp = inner_lite_view['timestamp'] inner_lite.next_bp_hash = base58.b58decode(inner_lite_view['next_bp_hash']) @@ -38,23 +51,36 @@ def compute_block_hash(inner_lite_view, inner_rest_hash, prev_hash): return base58.b58encode(final_hash) -# follows the spec from NEP 25 (https://github.com/nearprotocol/NEPs/pull/25) -def validate_light_client_block(last_known_block, new_block, block_producers_map, panic=False): - new_block_hash = compute_block_hash(new_block['inner_lite'], new_block['inner_rest_hash'], new_block['prev_hash']) - if new_block['inner_lite']['epoch_id'] not in [last_known_block['inner_lite']['epoch_id'], last_known_block['inner_lite']['next_epoch_id']]: - if panic: assert False +# follows the spec from NEP 25 (https://github.com/nearprotocol/NEPs/pull/25) +def validate_light_client_block(last_known_block, + new_block, + block_producers_map, + panic=False): + new_block_hash = compute_block_hash(new_block['inner_lite'], + new_block['inner_rest_hash'], + new_block['prev_hash']) + + if new_block['inner_lite']['epoch_id'] not in [ + last_known_block['inner_lite']['epoch_id'], + last_known_block['inner_lite']['next_epoch_id'] + ]: + if panic: + assert False return False block_producers = block_producers_map[new_block['inner_lite']['epoch_id']] - if len(new_block['qv_approvals']) != len(block_producers) or len(new_block['qc_approvals']) != len(block_producers): - if panic: assert False + if len(new_block['qv_approvals']) != len(block_producers) or len( + new_block['qc_approvals']) != len(block_producers): + if panic: + assert False return False if len(new_block['future_inner_hashes']) > 50: - if panic: assert False + if panic: + assert False return False - + qv_blocks = set() qc_blocks = set() @@ -72,7 +98,8 @@ def validate_light_client_block(last_known_block, new_block, block_producers_map prev_hash = cur_hash if not passed_qv: - if panic: assert False + if panic: + assert False return False qv_blocks = [base58.b58encode(x).decode('ascii') for x in qv_blocks] @@ -82,14 +109,19 @@ def validate_light_client_block(last_known_block, new_block, block_producers_map qv_stake = 0 qc_stake = 0 - for qv_approval, qc_approval, stake in zip(new_block['qv_approvals'], new_block['qc_approvals'], block_producers): + for qv_approval, qc_approval, stake in zip(new_block['qv_approvals'], + new_block['qc_approvals'], + block_producers): if qv_approval is not None: qv_stake += int(stake['stake']) - if qv_approval['parent_hash'] not in qv_blocks and qv_approval['parent_hash'] != new_block_hash.decode('ascii'): - if panic: assert False + if qv_approval['parent_hash'] not in qv_blocks and qv_approval[ + 'parent_hash'] != new_block_hash.decode('ascii'): + if panic: + assert False return False if qv_approval['reference_hash'] in qv_blocks: - if panic: assert False + if panic: + assert False return False #if not validate_signature(qv_approval.signature, hash(qv_approval), stake.public_key): # if panic: assert False @@ -98,10 +130,13 @@ def validate_light_client_block(last_known_block, new_block, block_producers_map if qc_approval is not None: qc_stake += int(stake['stake']) if qc_approval['parent_hash'] not in qc_blocks: - if panic: assert False + if panic: + assert False return False - if qc_approval['reference_hash'] in qc_blocks or qc_approval['reference_hash'] in qv_blocks: - if panic: assert False + if qc_approval['reference_hash'] in qc_blocks or qc_approval[ + 'reference_hash'] in qv_blocks: + if panic: + assert False return False #if not validate_signature(qc_approval.signature, hash(qc_approval), stake.public_key): # return false @@ -110,18 +145,22 @@ def validate_light_client_block(last_known_block, new_block, block_producers_map threshold = total_stake * 2 // 3 if qv_stake <= threshold: - if panic: assert False + if panic: + assert False return False if qc_stake <= threshold: - if panic: assert False + if panic: + assert False return False - if new_block['inner_lite']['epoch_id'] == last_known_block['inner_lite']['next_epoch_id']: + if new_block['inner_lite']['epoch_id'] == last_known_block['inner_lite'][ + 'next_epoch_id']: if new_block['next_bps'] is None: - if panic: assert False + if panic: + assert False return False - - # TODO: MOO check hash - block_producers_map[new_block['inner_lite']['next_epoch_id']] = new_block['next_bps'] + # TODO: MOO check hash + block_producers_map[new_block['inner_lite'] + ['next_epoch_id']] = new_block['next_bps'] diff --git a/pytest/lib/network.py b/pytest/lib/network.py index cc0ef833d25..1f3d7373c40 100644 --- a/pytest/lib/network.py +++ b/pytest/lib/network.py @@ -1,10 +1,14 @@ import subprocess, sys + def _run_process(cmd): - process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + process = subprocess.Popen(cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) out, err = process.communicate() return (process.returncode, out, err) + def init_network_pillager(): _run_process(["mkdir", "-p", "/sys/fs/cgroup/net_cls/block"]) try: @@ -12,18 +16,29 @@ def init_network_pillager(): f.write("42") except IOError as e: if e[0] == 13: - print("Failed to modify `/sys/fs/cgroup/net_cls/block/net_cls.classid`.") - print("Make sure the current user has access to it, e.g. by changing the owner:") + print( + "Failed to modify `/sys/fs/cgroup/net_cls/block/net_cls.classid`." + ) + print( + "Make sure the current user has access to it, e.g. by changing the owner:" + ) print("") - print(" chown . /sys/fs/cgroup/net_cls/block/net_cls.classid") + print( + " chown . /sys/fs/cgroup/net_cls/block/net_cls.classid" + ) print("") sys.exit(1) - _run_process(["iptables", "-A", "OUTPUT", "-m", "cgroup", "--cgroup", "42", "-j", "DROP"]) + _run_process([ + "iptables", "-A", "OUTPUT", "-m", "cgroup", "--cgroup", "42", "-j", + "DROP" + ]) + def stop_network(pid): with open('/sys/fs/cgroup/net_cls/block/tasks', 'w') as f: f.write(str(pid)) + def resume_network(pid): try: with open('/sys/fs/cgroup/net_cls/tasks', 'w') as f: @@ -32,10 +47,14 @@ def resume_network(pid): # the process was killed in the meantime pass + if __name__ == "__main__": import time init_network_pillager() - handle = subprocess.Popen(["ping", "8.8.8.8"], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + handle = subprocess.Popen(["ping", "8.8.8.8"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) print(handle.pid) time.sleep(3) stop_network(handle.pid) @@ -48,4 +67,3 @@ def resume_network(pid): print(out) print("STDERR (expect ~3 entries if all goes well):") print(err) - diff --git a/pytest/lib/retrying.py b/pytest/lib/retrying.py index 7a01bda9aea..b5186d62785 100644 --- a/pytest/lib/retrying.py +++ b/pytest/lib/retrying.py @@ -1,5 +1,6 @@ import time + def retry(fn, timeout): started = time.time() delay = 0.05 @@ -11,4 +12,3 @@ def retry(fn, timeout): raise time.sleep(delay) delay *= 1.2 - diff --git a/pytest/lib/serializer.py b/pytest/lib/serializer.py index 3c446d455dd..04d9b97f97c 100644 --- a/pytest/lib/serializer.py +++ b/pytest/lib/serializer.py @@ -1,4 +1,5 @@ class BinarySerializer: + def __init__(self, schema): self.array = bytearray() self.schema = schema @@ -24,7 +25,8 @@ def serialize_field(self, value, fieldType): assert len(fieldType) == 1 if type(fieldType[0]) == int: assert type(value) == bytes - assert len(value) == fieldType[0], "len(%s) = %s != %s" % (value, len(value), fieldType[0]) + assert len(value) == fieldType[0], "len(%s) = %s != %s" % ( + value, len(value), fieldType[0]) self.array += bytearray(value) else: self.serialize_num(len(value), 4) @@ -38,10 +40,11 @@ def serialize_field(self, value, fieldType): self.serialize_num(1, 1) self.serialize_field(value, fieldType['type']) elif type(fieldType) == type: - assert type(value) == fieldType, "%s != type(%s)" % (fieldType, value) + assert type(value) == fieldType, "%s != type(%s)" % (fieldType, + value) self.serialize_struct(value) else: - assert False, type(fieldType) + assert False, type(fieldType) def serialize_struct(self, obj): structSchema = self.schema[type(obj)] @@ -50,7 +53,8 @@ def serialize_struct(self, obj): self.serialize_field(getattr(obj, fieldName), fieldType) elif structSchema['kind'] == 'enum': name = getattr(obj, structSchema['field']) - for idx, (fieldName, fieldType) in enumerate(structSchema['values']): + for idx, (fieldName, + fieldType) in enumerate(structSchema['values']): if fieldName == name: self.serialize_num(idx, 1) self.serialize_field(getattr(obj, fieldName), fieldType) diff --git a/pytest/lib/transaction.py b/pytest/lib/transaction.py index 9dbc376d5d0..b42a568ee96 100644 --- a/pytest/lib/transaction.py +++ b/pytest/lib/transaction.py @@ -6,128 +6,203 @@ class Signature: pass + class SignedTransaction: pass + class Transaction: pass + class PublicKey: pass + class AccessKey: pass + class AccessKeyPermission: pass + class FunctionCallPermission: pass + class FullAccessPermission: pass + class Action: pass + class CreateAccount: pass + class DeployContract: pass + class FunctionCall: pass + class Transfer: pass + class Stake: pass + class AddKey: pass + class DeleteKey: pass + class DeleteAccount: pass -tx_schema = dict([[Signature, { 'kind': 'struct', 'fields': [ - ['keyType', 'u8'], - ['data', [64]] - ] }], -[SignedTransaction, { 'kind': 'struct', 'fields': [ - ['transaction', Transaction], - ['signature', Signature] - ] }], -[Transaction, { 'kind': 'struct', 'fields': [ - ['signerId', 'string'], - ['publicKey', PublicKey], - ['nonce', 'u64'], - ['receiverId', 'string'], - ['blockHash', [32]], - ['actions', [Action]] - ] }], -[PublicKey, { 'kind': 'struct', 'fields': [ - ['keyType', 'u8'], - ['data', [32]] - ] }], -[AccessKey, { 'kind': 'struct', 'fields': [ - ['nonce', 'u64'], - ['permission', AccessKeyPermission], - ] }], -[AccessKeyPermission, { 'kind': 'enum', 'field': 'enum', 'values': [ - ['functionCall', FunctionCallPermission], - ['fullAccess', FullAccessPermission], - ] }], -[FunctionCallPermission, { 'kind': 'struct', 'fields': [ - ['allowance', { 'kind': 'option', type: 'u128' }], - ['receiverId', 'string'], - ['methodNames', ['string']], - ] }], -[FullAccessPermission, { 'kind': 'struct', 'fields': [] }], -[Action, { 'kind': 'enum', 'field': 'enum', 'values': [ - ['createAccount', CreateAccount], - ['deployContract', DeployContract], - ['functionCall', FunctionCall], - ['transfer', Transfer], - ['stake', Stake], - ['addKey', AddKey], - ['deleteKey', DeleteKey], - ['deleteAccount', DeleteAccount], - ] }], -[CreateAccount, { 'kind': 'struct', 'fields': [] }], -[DeployContract, { 'kind': 'struct', 'fields': [ - ['code', ['u8']] - ] }], -[FunctionCall, { 'kind': 'struct', 'fields': [ - ['methodName', 'string'], - ['args', ['u8']], - ['gas', 'u64'], - ['deposit', 'u128'] - ] }], -[Transfer, { 'kind': 'struct', 'fields': [ - ['deposit', 'u128'] - ] }], -[Stake, { 'kind': 'struct', 'fields': [ - ['stake', 'u128'], - ['publicKey', PublicKey] - ] }], -[AddKey, { 'kind': 'struct', 'fields': [ - ['publicKey', PublicKey], - ['accessKey', AccessKey] - ] }], -[DeleteKey, { 'kind': 'struct', 'fields': [ - ['publicKey', PublicKey] - ] }], -[DeleteAccount, { 'kind': 'struct', 'fields': [ - ['beneficiaryId', 'string'] - ] }], + +tx_schema = dict([ + [ + Signature, { + 'kind': 'struct', + 'fields': [['keyType', 'u8'], ['data', [64]]] + } + ], + [ + SignedTransaction, { + 'kind': 'struct', + 'fields': [['transaction', Transaction], ['signature', Signature]] + } + ], + [ + Transaction, { + 'kind': + 'struct', + 'fields': [['signerId', 'string'], ['publicKey', PublicKey], + ['nonce', 'u64'], ['receiverId', 'string'], + ['blockHash', [32]], ['actions', [Action]]] + } + ], + [ + PublicKey, { + 'kind': 'struct', + 'fields': [['keyType', 'u8'], ['data', [32]]] + } + ], + [ + AccessKey, { + 'kind': 'struct', + 'fields': [ + ['nonce', 'u64'], + ['permission', AccessKeyPermission], + ] + } + ], + [ + AccessKeyPermission, { + 'kind': + 'enum', + 'field': + 'enum', + 'values': [ + ['functionCall', FunctionCallPermission], + ['fullAccess', FullAccessPermission], + ] + } + ], + [ + FunctionCallPermission, { + 'kind': + 'struct', + 'fields': [ + ['allowance', { + 'kind': 'option', + type: 'u128' + }], + ['receiverId', 'string'], + ['methodNames', ['string']], + ] + } + ], + [FullAccessPermission, { + 'kind': 'struct', + 'fields': [] + }], + [ + Action, { + 'kind': + 'enum', + 'field': + 'enum', + 'values': [ + ['createAccount', CreateAccount], + ['deployContract', DeployContract], + ['functionCall', FunctionCall], + ['transfer', Transfer], + ['stake', Stake], + ['addKey', AddKey], + ['deleteKey', DeleteKey], + ['deleteAccount', DeleteAccount], + ] + } + ], + [CreateAccount, { + 'kind': 'struct', + 'fields': [] + }], + [DeployContract, { + 'kind': 'struct', + 'fields': [['code', ['u8']]] + }], + [ + FunctionCall, { + 'kind': + 'struct', + 'fields': [['methodName', 'string'], ['args', ['u8']], + ['gas', 'u64'], ['deposit', 'u128']] + } + ], + [Transfer, { + 'kind': 'struct', + 'fields': [['deposit', 'u128']] + }], + [ + Stake, { + 'kind': 'struct', + 'fields': [['stake', 'u128'], ['publicKey', PublicKey]] + } + ], + [ + AddKey, { + 'kind': 'struct', + 'fields': [['publicKey', PublicKey], ['accessKey', AccessKey]] + } + ], + [DeleteKey, { + 'kind': 'struct', + 'fields': [['publicKey', PublicKey]] + }], + [ + DeleteAccount, { + 'kind': 'struct', + 'fields': [['beneficiaryId', 'string']] + } + ], ]) -def sign_and_serialize_transaction(receiverId, nonce, actions, blockHash, accountId, pk, sk): + +def sign_and_serialize_transaction(receiverId, nonce, actions, blockHash, + accountId, pk, sk): tx = Transaction() tx.signerId = accountId tx.publicKey = PublicKey() @@ -151,6 +226,7 @@ def sign_and_serialize_transaction(receiverId, nonce, actions, blockHash, accoun return BinarySerializer(tx_schema).serialize(signedTx) + def create_create_account_action(): createAccount = CreateAccount() action = Action() @@ -158,6 +234,7 @@ def create_create_account_action(): action.createAccount = createAccount return action + def create_full_access_key_action(pk): permission = AccessKeyPermission() permission.enum = 'fullAccess' @@ -176,6 +253,7 @@ def create_full_access_key_action(pk): action.addKey = addKey return action + def create_delete_access_key_action(pk): publicKey = PublicKey() publicKey.keyType = 0 @@ -187,6 +265,7 @@ def create_delete_access_key_action(pk): action.deleteKey = deleteKey return action + def create_payment_action(amount): transfer = Transfer() transfer.deposit = amount @@ -195,6 +274,7 @@ def create_payment_action(amount): action.transfer = transfer return action + def create_staking_action(amount, pk): stake = Stake() stake.stake = amount @@ -206,6 +286,7 @@ def create_staking_action(amount, pk): action.stake = stake return action + def create_deploy_contract_action(code): deployContract = DeployContract() deployContract.code = code @@ -214,6 +295,7 @@ def create_deploy_contract_action(code): action.deployContract = deployContract return action + def create_function_call_action(methodName, args, gas, deposit): functionCall = FunctionCall() functionCall.methodName = methodName @@ -225,33 +307,65 @@ def create_function_call_action(methodName, args, gas, deposit): action.functionCall = functionCall return action + def sign_create_account_tx(creator_key, new_account_id, nonce, block_hash): action = create_create_account_action() - return sign_and_serialize_transaction(new_account_id, nonce, [action], block_hash, creator_key.account_id, creator_key.decoded_pk(), creator_key.decoded_sk()) + return sign_and_serialize_transaction(new_account_id, nonce, [action], + block_hash, creator_key.account_id, + creator_key.decoded_pk(), + creator_key.decoded_sk()) -def sign_create_account_with_full_access_key_and_balance_tx(creator_key, new_account_id, new_key, balance, nonce, block_hash): + +def sign_create_account_with_full_access_key_and_balance_tx( + creator_key, new_account_id, new_key, balance, nonce, block_hash): create_account_action = create_create_account_action() full_access_key_action = create_full_access_key_action(new_key.decoded_pk()) payment_action = create_payment_action(balance) actions = [create_account_action, full_access_key_action, payment_action] - return sign_and_serialize_transaction(new_account_id, nonce, actions, block_hash, creator_key.account_id, creator_key.decoded_pk(), creator_key.decoded_sk()) + return sign_and_serialize_transaction(new_account_id, nonce, actions, + block_hash, creator_key.account_id, + creator_key.decoded_pk(), + creator_key.decoded_sk()) + -def sign_delete_access_key_tx(signer_key, target_account_id, key_for_deletion, nonce, block_hash): +def sign_delete_access_key_tx(signer_key, target_account_id, key_for_deletion, + nonce, block_hash): action = create_delete_access_key_action(key_for_deletion.decoded_pk()) - return sign_and_serialize_transaction(target_account_id, nonce, [action], block_hash, signer_key.account_id, signer_key.decoded_pk(), signer_key.decoded_sk()) + return sign_and_serialize_transaction(target_account_id, nonce, [action], + block_hash, signer_key.account_id, + signer_key.decoded_pk(), + signer_key.decoded_sk()) + def sign_payment_tx(key, to, amount, nonce, blockHash): action = create_payment_action(amount) - return sign_and_serialize_transaction(to, nonce, [action], blockHash, key.account_id, key.decoded_pk(), key.decoded_sk()) + return sign_and_serialize_transaction(to, nonce, [action], blockHash, + key.account_id, key.decoded_pk(), + key.decoded_sk()) + def sign_staking_tx(signer_key, validator_key, amount, nonce, blockHash): action = create_staking_action(amount, validator_key.decoded_pk()) - return sign_and_serialize_transaction(signer_key.account_id, nonce, [action], blockHash, signer_key.account_id, signer_key.decoded_pk(), signer_key.decoded_sk()) + return sign_and_serialize_transaction(signer_key.account_id, nonce, + [action], blockHash, + signer_key.account_id, + signer_key.decoded_pk(), + signer_key.decoded_sk()) + def sign_deploy_contract_tx(signer_key, code, nonce, blockHash): action = create_deploy_contract_action(code) - return sign_and_serialize_transaction(signer_key.account_id, nonce, [action], blockHash, signer_key.account_id, signer_key.decoded_pk(), signer_key.decoded_sk()) + return sign_and_serialize_transaction(signer_key.account_id, nonce, + [action], blockHash, + signer_key.account_id, + signer_key.decoded_pk(), + signer_key.decoded_sk()) + -def sign_function_call_tx(signer_key, contract_id, methodName, args, gas, deposit, nonce, blockHash): +def sign_function_call_tx(signer_key, contract_id, methodName, args, gas, + deposit, nonce, blockHash): action = create_function_call_action(methodName, args, gas, deposit) - return sign_and_serialize_transaction(contract_id, nonce, [action], blockHash, signer_key.account_id, signer_key.decoded_pk(), signer_key.decoded_sk()) + return sign_and_serialize_transaction(contract_id, nonce, [action], + blockHash, signer_key.account_id, + signer_key.decoded_pk(), + signer_key.decoded_sk()) diff --git a/pytest/lib/utils.py b/pytest/lib/utils.py index fdc157f0cba..c1d9c6cb1aa 100644 --- a/pytest/lib/utils.py +++ b/pytest/lib/utils.py @@ -9,7 +9,9 @@ import json from pprint import pprint + class TxContext: + def __init__(self, act_to_val, nodes): self.next_nonce = 2 self.num_nodes = len(nodes) @@ -26,10 +28,7 @@ def get_balance(self, whose): return int(r['result']['amount']) + int(r['result']['locked']) def get_balances(self): - return [ - self.get_balance(i) - for i in range(self.num_nodes) - ] + return [self.get_balance(i) for i in range(self.num_nodes)] def send_moar_txs(self, last_block_hash, num, use_routing): last_balances = [x for x in self.expected_balances] @@ -43,8 +42,11 @@ def send_moar_txs(self, last_block_hash, num, use_routing): to += 1 amt = random.randint(0, 500) if self.expected_balances[from_] >= amt: - print("Sending a tx from %s to %s for %s" % (from_, to, amt)); - tx = sign_payment_tx(self.nodes[from_].signer_key, 'test%s' % to, amt, self.next_nonce, base58.b58decode(last_block_hash.encode('utf8'))) + print("Sending a tx from %s to %s for %s" % (from_, to, amt)) + tx = sign_payment_tx( + self.nodes[from_].signer_key, 'test%s' % to, amt, + self.next_nonce, + base58.b58decode(last_block_hash.encode('utf8'))) if use_routing: self.nodes[0].send_tx(tx) else: @@ -58,6 +60,7 @@ def send_moar_txs(self, last_block_hash, num, use_routing): # a particular line appeared (or didn't) between the last time it was # checked and now class LogTracker: + def __init__(self, node): self.node = node if type(node) is LocalNode: @@ -66,7 +69,9 @@ def __init__(self, node): f.seek(0, 2) self.offset = f.tell() elif type(node) is GCloudNode: - self.offset = int(node.machine.run("python3", input=''' + self.offset = int( + node.machine.run("python3", + input=''' with open('/tmp/python-rc.log') as f: f.seek(0, 2) print(f.tell()) @@ -84,7 +89,10 @@ def check(self, pattern): self.offset = f.tell() return ret elif type(self.node) is GCloudNode: - ret, offset = map(int, node.machine.run("python3", input=f''' + ret, offset = map( + int, + node.machine.run("python3", + input=f''' pattern={pattern} with open('/tmp/python-rc.log') as f: f.seek({self.offset}) @@ -108,7 +116,8 @@ def count(self, pattern): self.offset = f.tell() return ret elif type(self.node) == GCloudNode: - ret, offset = node.machine.run("python3", input=f''' + ret, offset = node.machine.run("python3", + input=f''' with open('/tmp/python-rc.log') as f: f.seek({self.offset}) print(f.read().count({pattern}) @@ -121,7 +130,6 @@ def count(self, pattern): raise NotImplementedError() - def chain_query(node, block_handler, *, block_hash=None, max_blocks=-1): """ Query chain block approvals and chunks preceding of block of block_hash. @@ -138,7 +146,9 @@ def chain_query(node, block_handler, *, block_hash=None, max_blocks=-1): while True: validators = node.validators() if validators != initial_validators: - print(f'Fatal: validator set of node {node} changes, from {initial_validators} to {validators}') + print( + f'Fatal: validator set of node {node} changes, from {initial_validators} to {validators}' + ) sys.exit(1) block = node.get_block(block_hash)['result'] block_handler(block) @@ -150,7 +160,9 @@ def chain_query(node, block_handler, *, block_hash=None, max_blocks=-1): for _ in range(max_blocks): validators = node.validators() if validators != initial_validators: - print(f'Fatal: validator set of node {node} changes, from {initial_validators} to {validators}') + print( + f'Fatal: validator set of node {node} changes, from {initial_validators} to {validators}' + ) sys.exit(1) block = node.get_block(block_hash)['result'] block_handler(block) @@ -159,12 +171,15 @@ def chain_query(node, block_handler, *, block_hash=None, max_blocks=-1): if block_height == 0: break + def load_binary_file(filepath): with open(filepath, "rb") as binaryfile: return bytearray(binaryfile.read()) + def compile_rust_contract(content): - empty_contract_rs = os.path.join(os.path.dirname(__file__), '../empty-contract-rs') + empty_contract_rs = os.path.join(os.path.dirname(__file__), + '../empty-contract-rs') run('mkdir -p /tmp/near') tmp_contract = tempfile.TemporaryDirectory(dir='/tmp/near').name p = run(f'cp -r {empty_contract_rs} {tmp_contract}') @@ -189,21 +204,26 @@ def user_name(): username = gcloud.list()[0].username.replace('_nearprotocol_com', '') return username + # from https://stackoverflow.com/questions/107705/disable-output-buffering # this class allows making print always flush by executing # # sys.stdout = Unbuffered(sys.stdout) class Unbuffered(object): - def __init__(self, stream): - self.stream = stream - def write(self, data): - self.stream.write(data) - self.stream.flush() - def writelines(self, datas): - self.stream.writelines(datas) - self.stream.flush() - def __getattr__(self, attr): - return getattr(self.stream, attr) + + def __init__(self, stream): + self.stream = stream + + def write(self, data): + self.stream.write(data) + self.stream.flush() + + def writelines(self, datas): + self.stream.writelines(datas) + self.stream.flush() + + def __getattr__(self, attr): + return getattr(self.stream, attr) def collect_gcloud_config(num_nodes): @@ -213,18 +233,37 @@ def collect_gcloud_config(num_nodes): if not os.path.exists(f'/tmp/near/node{i}'): # TODO: avoid hardcoding the username print(f'downloading node{i} config from gcloud') - pathlib.Path(f'/tmp/near/node{i}').mkdir(parents=True, exist_ok=True) - gcloud.get(f'pytest-node-{user_name()}-{i}').download('/home/bowen_nearprotocol_com/.near/config.json', f'/tmp/near/node{i}/') - gcloud.get(f'pytest-node-{user_name()}-{i}').download('/home/bowen_nearprotocol_com/.near/signer0_key.json', f'/tmp/near/node{i}/') - gcloud.get(f'pytest-node-{user_name()}-{i}').download('/home/bowen_nearprotocol_com/.near/validator_key.json', f'/tmp/near/node{i}/') - gcloud.get(f'pytest-node-{user_name()}-{i}').download('/home/bowen_nearprotocol_com/.near/node_key.json', f'/tmp/near/node{i}/') + pathlib.Path(f'/tmp/near/node{i}').mkdir(parents=True, + exist_ok=True) + gcloud.get(f'pytest-node-{user_name()}-{i}').download( + '/home/bowen_nearprotocol_com/.near/config.json', + f'/tmp/near/node{i}/') + gcloud.get(f'pytest-node-{user_name()}-{i}').download( + '/home/bowen_nearprotocol_com/.near/signer0_key.json', + f'/tmp/near/node{i}/') + gcloud.get(f'pytest-node-{user_name()}-{i}').download( + '/home/bowen_nearprotocol_com/.near/validator_key.json', + f'/tmp/near/node{i}/') + gcloud.get(f'pytest-node-{user_name()}-{i}').download( + '/home/bowen_nearprotocol_com/.near/node_key.json', + f'/tmp/near/node{i}/') with open(f'/tmp/near/node{i}/signer0_key.json') as f: key = json.load(f) keys.append(key) with open('/tmp/near/node0/config.json') as f: config = json.load(f) - ip_addresses = map(lambda x: x.split('@')[-1], config['network']['boot_nodes'].split(',')) - res = {'nodes': list(map(lambda x: {'ip': x.split(':')[0], 'port': 3030}, ip_addresses)), 'accounts': keys} + ip_addresses = map(lambda x: x.split('@')[-1], + config['network']['boot_nodes'].split(',')) + res = { + 'nodes': + list( + map(lambda x: { + 'ip': x.split(':')[0], + 'port': 3030 + }, ip_addresses)), + 'accounts': + keys + } outfile = '/tmp/near/gcloud_config.json' with open(outfile, 'w+') as f: json.dump(res, f) diff --git a/pytest/tests/adversarial/fork_sync.py b/pytest/tests/adversarial/fork_sync.py index a06cd1a2a56..758b407b363 100644 --- a/pytest/tests/adversarial/fork_sync.py +++ b/pytest/tests/adversarial/fork_sync.py @@ -8,7 +8,6 @@ sys.path.append('lib') - from cluster import start_cluster TIMEOUT = 120 @@ -16,7 +15,9 @@ SECOND_STEP_WAIT = 30 FINAL_HEIGHT_THRESHOLD = 80 -nodes = start_cluster(4, 0, 4, None, [["epoch_length", 200], ["block_producer_kickout_threshold", 10]], {}) +nodes = start_cluster( + 4, 0, 4, None, + [["epoch_length", 200], ["block_producer_kickout_threshold", 10]], {}) time.sleep(3) cur_height = 0 fork1_height = 0 @@ -74,7 +75,8 @@ statuses = [] for i, node in enumerate(nodes): cur_status = node.get_status() - statuses.append((i, cur_status['sync_info']['latest_block_height'], cur_status['sync_info']['latest_block_hash'])) + statuses.append((i, cur_status['sync_info']['latest_block_height'], + cur_status['sync_info']['latest_block_hash'])) statuses.sort(key=lambda x: x[1]) last = statuses[-1] cur_height = last[1] @@ -93,6 +95,3 @@ time.sleep(0.5) assert False, "timed out waiting for forks to resolve" - - - diff --git a/pytest/tests/adversarial/gc_rollback.py b/pytest/tests/adversarial/gc_rollback.py index 09820b39c7d..37f931a26a2 100644 --- a/pytest/tests/adversarial/gc_rollback.py +++ b/pytest/tests/adversarial/gc_rollback.py @@ -1,6 +1,6 @@ # Builds the following graph: # ------- -# \ +# \ # ------ # \ # -------- @@ -19,7 +19,8 @@ FORK_EACH_BLOCKS = 10 consensus_config = {"consensus": {"min_num_peers": 0}} -nodes = start_cluster(2, 0, 1, None, [["epoch_length", EPOCH_LENGTH]], {0: consensus_config}) +nodes = start_cluster(2, 0, 1, None, [["epoch_length", EPOCH_LENGTH]], + {0: consensus_config}) time.sleep(2) res = nodes[0].json_rpc('adv_disable_doomslug', []) diff --git a/pytest/tests/adversarial/malicious_chain.py b/pytest/tests/adversarial/malicious_chain.py index 437c3063b63..a3772cde71e 100644 --- a/pytest/tests/adversarial/malicious_chain.py +++ b/pytest/tests/adversarial/malicious_chain.py @@ -4,15 +4,17 @@ from cluster import start_cluster -valid_blocks_only = False # creating invalid blocks, should be banned instantly +valid_blocks_only = False # creating invalid blocks, should be banned instantly if "valid_blocks_only" in sys.argv: - valid_blocks_only = True # creating valid blocks, should be fixed by doom slug + valid_blocks_only = True # creating valid blocks, should be fixed by doom slug TIMEOUT = 300 BLOCKS = 25 MALICIOUS_BLOCKS = 50 -nodes = start_cluster(2, 1, 2, None, [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 2, 1, 2, None, + [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {}) started = time.time() @@ -32,7 +34,8 @@ status = nodes[1].get_status() print(status) -res = nodes[1].json_rpc('adv_produce_blocks', [MALICIOUS_BLOCKS, valid_blocks_only]) +res = nodes[1].json_rpc('adv_produce_blocks', + [MALICIOUS_BLOCKS, valid_blocks_only]) assert 'result' in res, res print("Generated %s malicious blocks" % MALICIOUS_BLOCKS) diff --git a/pytest/tests/adversarial/start_from_genesis.py b/pytest/tests/adversarial/start_from_genesis.py index f931d2a294e..140189d1a3f 100644 --- a/pytest/tests/adversarial/start_from_genesis.py +++ b/pytest/tests/adversarial/start_from_genesis.py @@ -4,18 +4,20 @@ from cluster import start_cluster -overtake = False # create a new chain which is shorter than current one +overtake = False # create a new chain which is shorter than current one if "overtake" in sys.argv: - overtake = True # create a new chain which is longer than current one + overtake = True # create a new chain which is longer than current one doomslug = True if "doomslug_off" in sys.argv: - doomslug = False # turn off doomslug + doomslug = False # turn off doomslug TIMEOUT = 300 BLOCKS = 30 -nodes = start_cluster(2, 1, 2, None, [["epoch_length", 100], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 2, 1, 2, None, + [["epoch_length", 100], ["block_producer_kickout_threshold", 80]], {}) started = time.time() @@ -33,7 +35,7 @@ print("Got to %s blocks, getting to fun stuff" % BLOCKS) -nodes[0].kill() # to disallow syncing +nodes[0].kill() # to disallow syncing nodes[1].kill() nodes[1].reset_data() diff --git a/pytest/tests/adversarial/wrong_sync_info.py b/pytest/tests/adversarial/wrong_sync_info.py index 4c0e1f3130d..1e7a2135b4b 100644 --- a/pytest/tests/adversarial/wrong_sync_info.py +++ b/pytest/tests/adversarial/wrong_sync_info.py @@ -13,7 +13,9 @@ TIMEOUT = 300 BLOCKS = 30 -nodes = start_cluster(2, 1, 2, None, [["epoch_length", 7], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 2, 1, 2, None, + [["epoch_length", 7], ["block_producer_kickout_threshold", 80]], {}) started = time.time() diff --git a/pytest/tests/contracts/deploy_call_smart_contract.py b/pytest/tests/contracts/deploy_call_smart_contract.py index 138756ba988..ac35fb90973 100644 --- a/pytest/tests/contracts/deploy_call_smart_contract.py +++ b/pytest/tests/contracts/deploy_call_smart_contract.py @@ -9,12 +9,17 @@ from transaction import sign_deploy_contract_tx, sign_function_call_tx from utils import load_binary_file, compile_rust_contract -nodes = start_cluster(4, 0, 4, None, [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 4, 0, 4, None, + [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) status = nodes[0].get_status() hash_ = status['sync_info']['latest_block_hash'] hash_ = base58.b58decode(hash_.encode('utf8')) -tx = sign_deploy_contract_tx(nodes[0].signer_key, load_binary_file('../runtime/near-vm-runner/tests/res/test_contract_rs.wasm'), 10, hash_) +tx = sign_deploy_contract_tx( + nodes[0].signer_key, + load_binary_file( + '../runtime/near-vm-runner/tests/res/test_contract_rs.wasm'), 10, hash_) nodes[0].send_tx(tx) time.sleep(3) @@ -22,7 +27,9 @@ status2 = nodes[1].get_status() hash_2 = status2['sync_info']['latest_block_hash'] hash_2 = base58.b58decode(hash_2.encode('utf8')) -tx2 = sign_function_call_tx(nodes[0].signer_key, nodes[0].signer_key.account_id, 'log_something', [], 100000000000, 100000000000, 20, hash_2) +tx2 = sign_function_call_tx(nodes[0].signer_key, nodes[0].signer_key.account_id, + 'log_something', [], 100000000000, 100000000000, 20, + hash_2) res = nodes[1].send_tx_and_wait(tx2, 10) assert res['result']['receipts_outcome'][0]['outcome']['logs'][0] == 'hello' @@ -44,13 +51,15 @@ status3 = nodes[2].get_status() hash_3 = status3['sync_info']['latest_block_hash'] hash_3 = base58.b58decode(hash_3.encode('utf8')) -tx3 = sign_deploy_contract_tx(nodes[2].signer_key, load_binary_file(wasm_file), 10, hash_3) +tx3 = sign_deploy_contract_tx(nodes[2].signer_key, load_binary_file(wasm_file), + 10, hash_3) res = nodes[3].send_tx(tx3) time.sleep(3) status4 = nodes[3].get_status() hash_4 = status4['sync_info']['latest_block_hash'] hash_4 = base58.b58decode(hash_4.encode('utf8')) -tx4 = sign_function_call_tx(nodes[2].signer_key, nodes[2].signer_key.account_id, 'log_world', [], 100000000000, 0, 20, hash_4) +tx4 = sign_function_call_tx(nodes[2].signer_key, nodes[2].signer_key.account_id, + 'log_world', [], 100000000000, 0, 20, hash_4) res = nodes[3].send_tx_and_wait(tx4, 10) assert res['result']['receipts_outcome'][0]['outcome']['logs'][0] == 'world' diff --git a/pytest/tests/contracts/gibberish.py b/pytest/tests/contracts/gibberish.py index 3221195e74b..f89896c670e 100644 --- a/pytest/tests/contracts/gibberish.py +++ b/pytest/tests/contracts/gibberish.py @@ -10,9 +10,12 @@ from transaction import sign_deploy_contract_tx, sign_function_call_tx from utils import load_binary_file -nodes = start_cluster(3, 0, 4, None, [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 3, 0, 4, None, + [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {}) -wasm_blob_1 = load_binary_file('../runtime/near-vm-runner/tests/res/test_contract_rs.wasm') +wasm_blob_1 = load_binary_file( + '../runtime/near-vm-runner/tests/res/test_contract_rs.wasm') status = nodes[0].get_status() hash_ = status['sync_info']['latest_block_hash'] @@ -20,8 +23,10 @@ for iter_ in range(10): print("Deploying garbage contract #%s" % iter_) - wasm_blob = bytes([random.randint(0, 255) for _ in range(random.randint(200, 500))]) - tx = sign_deploy_contract_tx(nodes[0].signer_key, wasm_blob, 10 + iter_, hash_) + wasm_blob = bytes( + [random.randint(0, 255) for _ in range(random.randint(200, 500))]) + tx = sign_deploy_contract_tx(nodes[0].signer_key, wasm_blob, 10 + iter_, + hash_) nodes[0].send_tx_and_wait(tx, 5) for iter_ in range(10): @@ -30,19 +35,26 @@ new_name = '%s_mething' % iter_ new_output = '%s_llo' % iter_ - wasm_blob = wasm_blob_1.replace(bytes('something', 'utf8'), bytes(new_name, 'utf8')).replace(bytes('hello', 'utf8'), bytes(new_output, 'utf8')) + wasm_blob = wasm_blob_1.replace(bytes('something', 'utf8'), + bytes(new_name, 'utf8')).replace( + bytes('hello', 'utf8'), + bytes(new_output, 'utf8')) assert len(wasm_blob) == len(wasm_blob_1) pos = random.randint(0, len(wasm_blob_1) - 1) val = random.randint(0, 255) wasm_blob = wasm_blob[:pos] + bytes([val]) + wasm_blob[pos + 1:] - tx = sign_deploy_contract_tx(nodes[0].signer_key, wasm_blob, 20 + iter_ * 2, hash_) + tx = sign_deploy_contract_tx(nodes[0].signer_key, wasm_blob, 20 + iter_ * 2, + hash_) res = nodes[0].send_tx_and_wait(tx, 10) print(res) print("Invoking perturbed contract #%s" % iter_) - tx2 = sign_function_call_tx(nodes[0].signer_key, nodes[0].signer_key.account_id, new_name, [], 100000000000, 100000000000, 20 + iter_ * 2 + 1, hash_) + tx2 = sign_function_call_tx(nodes[0].signer_key, + nodes[0].signer_key.account_id, new_name, [], + 100000000000, 100000000000, 20 + iter_ * 2 + 1, + hash_) # don't have any particular expectation for the call result res = nodes[1].send_tx_and_wait(tx2, 10) @@ -59,7 +71,9 @@ status2 = nodes[1].get_status() hash_2 = status2['sync_info']['latest_block_hash'] hash_2 = base58.b58decode(hash_2.encode('utf8')) -tx2 = sign_function_call_tx(nodes[0].signer_key, nodes[0].signer_key.account_id, 'log_something', [], 100000000000, 100000000000, 62, hash_2) +tx2 = sign_function_call_tx(nodes[0].signer_key, nodes[0].signer_key.account_id, + 'log_something', [], 100000000000, 100000000000, 62, + hash_2) res = nodes[1].send_tx_and_wait(tx2, 10) print(res) assert res['result']['receipts_outcome'][0]['outcome']['logs'][0] == 'hello' diff --git a/pytest/tests/mocknet/sanity.py b/pytest/tests/mocknet/sanity.py index aa48cf07e57..28c49c76e36 100644 --- a/pytest/tests/mocknet/sanity.py +++ b/pytest/tests/mocknet/sanity.py @@ -15,8 +15,14 @@ print() # Test balance transfers -initial_balances = [int(nodes[0].get_account(account.account_id)['result']['amount']) for account in accounts] -nonces = [nodes[0].get_nonce_for_pk(account.account_id, account.pk) for account in accounts] +initial_balances = [ + int(nodes[0].get_account(account.account_id)['result']['amount']) + for account in accounts +] +nonces = [ + nodes[0].get_nonce_for_pk(account.account_id, account.pk) + for account in accounts +] print("INITIAL BALANCES", initial_balances) print("NONCES", nonces) @@ -24,10 +30,14 @@ last_block_hash = nodes[0].get_status()['sync_info']['latest_block_hash'] last_block_hash_decoded = base58.b58decode(last_block_hash.encode('utf8')) -tx = sign_payment_tx(accounts[0], accounts[1].account_id, 100, nonces[0] + 1, last_block_hash_decoded) +tx = sign_payment_tx(accounts[0], accounts[1].account_id, 100, nonces[0] + 1, + last_block_hash_decoded) nodes[0].send_tx_and_wait(tx, timeout=10) -new_balances = [int(nodes[0].get_account(account.account_id)['result']['amount']) for account in accounts] +new_balances = [ + int(nodes[0].get_account(account.account_id)['result']['amount']) + for account in accounts +] print("NEW BALANCES", new_balances) @@ -36,9 +46,15 @@ # Test contract deployment -tx = sign_deploy_contract_tx(accounts[2], load_binary_file('../runtime/near-vm-runner/tests/res/test_contract_rs.wasm'), nonces[2] + 1, last_block_hash_decoded) +tx = sign_deploy_contract_tx( + accounts[2], + load_binary_file( + '../runtime/near-vm-runner/tests/res/test_contract_rs.wasm'), + nonces[2] + 1, last_block_hash_decoded) nodes[0].send_tx_and_wait(tx, timeout=20) -tx2 = sign_function_call_tx(accounts[2], accounts[2].account_id, 'log_something', [], 100000000000, 100000000000, nonces[2] + 2, last_block_hash_decoded) +tx2 = sign_function_call_tx(accounts[2], accounts[2].account_id, + 'log_something', [], 100000000000, 100000000000, + nonces[2] + 2, last_block_hash_decoded) res = nodes[1].send_tx_and_wait(tx2, 10) assert res['result']['receipts_outcome'][0]['outcome']['logs'][0] == 'hello' diff --git a/pytest/tests/sanity/backward_compatible.py b/pytest/tests/sanity/backward_compatible.py index 44dbd4b1217..a717d73ac8f 100755 --- a/pytest/tests/sanity/backward_compatible.py +++ b/pytest/tests/sanity/backward_compatible.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ This script runs node from stable branch and from current branch and makes sure they are backward compatible. @@ -23,16 +22,29 @@ def main(): shutil.rmtree(node_root) subprocess.check_output('mkdir -p /tmp/near', shell=True) - near_root, (stable_branch, current_branch) = branches.prepare_ab_test("beta") + near_root, (stable_branch, + current_branch) = branches.prepare_ab_test("beta") # Setup local network. - subprocess.call(["%snear-%s" % (near_root, stable_branch), "--home=%s" % node_root, "testnet", "--v", "2", "--prefix", "test"]) + subprocess.call([ + "%snear-%s" % (near_root, stable_branch), + "--home=%s" % node_root, "testnet", "--v", "2", "--prefix", "test" + ]) # Run both binaries at the same time. - config = {"local": True, 'near_root': near_root, 'binary_name': "near-%s" % stable_branch } - stable_node = cluster.spin_up_node(config, near_root, os.path.join(node_root, "test0"), 0, None, None) + config = { + "local": True, + 'near_root': near_root, + 'binary_name': "near-%s" % stable_branch + } + stable_node = cluster.spin_up_node(config, near_root, + os.path.join(node_root, "test0"), 0, + None, None) config["binary_name"] = "near-%s" % current_branch - current_node = cluster.spin_up_node(config, near_root, os.path.join(node_root, "test1"), 1, stable_node.node_key.pk, stable_node.addr()) + current_node = cluster.spin_up_node(config, near_root, + os.path.join(node_root, "test1"), 1, + stable_node.node_key.pk, + stable_node.addr()) # Check it all works. # TODO: we should run for at least 2 epochs. diff --git a/pytest/tests/sanity/block_production.py b/pytest/tests/sanity/block_production.py index d18fb613904..ecf5a8ca8d7 100644 --- a/pytest/tests/sanity/block_production.py +++ b/pytest/tests/sanity/block_production.py @@ -13,13 +13,14 @@ sys.path.append('lib') - from cluster import start_cluster TIMEOUT = 150 BLOCKS = 50 -nodes = start_cluster(4, 0, 4, None, [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 4, 0, 4, None, + [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) started = time.time() @@ -30,11 +31,16 @@ height_to_hash = {} -def min_common(): return min([min(x) for x in last_common]) + +def min_common(): + return min([min(x) for x in last_common]) + + def heights_report(): for i, sh in enumerate(seen_heights): print("Node %s: %s" % (i, sorted(list(sh)))) + while max_height < BLOCKS: assert time.time() - started < TIMEOUT for i, node in enumerate(nodes): @@ -45,12 +51,15 @@ def heights_report(): if height > max_height: max_height = height if height % 10 == 0: - print("Reached height %s, min common: %s" % (height, min_common())) + print("Reached height %s, min common: %s" % + (height, min_common())) if height not in height_to_hash: height_to_hash[height] = hash_ else: - assert height_to_hash[height] == hash_, "height: %s, h1: %s, h2: %s" % (height, hash_, height_to_hash[height]) + assert height_to_hash[ + height] == hash_, "height: %s, h1: %s, h2: %s" % ( + height, hash_, height_to_hash[height]) last_heights[i] = height seen_heights[i].add(height) @@ -69,7 +78,7 @@ def heights_report(): assert min_common() + 2 >= BLOCKS, heights_report() doomslug_final_block = nodes[0].json_rpc('block', {'finality': 'near-final'}) -assert(doomslug_final_block['result']['header']['height'] >= BLOCKS - 10) +assert (doomslug_final_block['result']['header']['height'] >= BLOCKS - 10) nfg_final_block = nodes[0].json_rpc('block', {'finality': 'final'}) -assert(nfg_final_block['result']['header']['height'] >= BLOCKS - 10) +assert (nfg_final_block['result']['header']['height'] >= BLOCKS - 10) diff --git a/pytest/tests/sanity/block_sync.py b/pytest/tests/sanity/block_sync.py index ca70898afd9..06e0bb05287 100644 --- a/pytest/tests/sanity/block_sync.py +++ b/pytest/tests/sanity/block_sync.py @@ -6,24 +6,46 @@ sys.path.append('lib') - from cluster import start_cluster BLOCKS = 10 TIMEOUT = 25 -consensus_config0 = {"consensus": {"block_fetch_horizon": 30, "block_header_fetch_horizon": 30}} -consensus_config1 = {"consensus": {"min_block_production_delay": {"secs": 100, "nanos": 0}, "max_block_production_delay": {"secs": 200, "nanos": 0}, "max_block_wait_delay": {"secs": 1000, "nanos": 0}}} +consensus_config0 = { + "consensus": { + "block_fetch_horizon": 30, + "block_header_fetch_horizon": 30 + } +} +consensus_config1 = { + "consensus": { + "min_block_production_delay": { + "secs": 100, + "nanos": 0 + }, + "max_block_production_delay": { + "secs": 200, + "nanos": 0 + }, + "max_block_wait_delay": { + "secs": 1000, + "nanos": 0 + } + } +} # give more stake to the bootnode so that it can produce the blocks alone nodes = start_cluster( 2, 0, 4, None, - [ - ["epoch_length", 100], ["num_block_producer_seats", 100], ["num_block_producer_seats_per_shard", [25, 25, 25, 25]], - ["validators", 0, "amount", "110000000000000000000000000000000"], ["records", 0, "Account", "account", "locked", "110000000000000000000000000000000"], - ["total_supply", "3060000000000000000000000000000000"] - ], - {0: consensus_config0, 1: consensus_config1} -) + [["epoch_length", 100], ["num_block_producer_seats", 100], + ["num_block_producer_seats_per_shard", [25, 25, 25, 25]], + ["validators", 0, "amount", "110000000000000000000000000000000"], + [ + "records", 0, "Account", "account", "locked", + "110000000000000000000000000000000" + ], ["total_supply", "3060000000000000000000000000000000"]], { + 0: consensus_config0, + 1: consensus_config1 + }) time.sleep(3) node0_height = 0 @@ -52,4 +74,3 @@ if cur_height >= node1_height: break time.sleep(1) - diff --git a/pytest/tests/sanity/epoch_switches.py b/pytest/tests/sanity/epoch_switches.py index fb62147db5a..eb43992c0d4 100644 --- a/pytest/tests/sanity/epoch_switches.py +++ b/pytest/tests/sanity/epoch_switches.py @@ -13,15 +13,26 @@ EPOCH_LENGTH = 20 config = None -nodes = start_cluster(2, 2, 1, config, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40]], {2: {"tracked_shards": [0]}}) +nodes = start_cluster( + 2, 2, 1, config, + [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40]], + {2: { + "tracked_shards": [0] + }}) started = time.time() + def get_validators(): return set([x['account_id'] for x in nodes[0].get_status()['validators']]) + def get_stakes(): - return [int(nodes[2].get_account("test%s" % i)['result']['locked']) for i in range(3)] + return [ + int(nodes[2].get_account("test%s" % i)['result']['locked']) + for i in range(3) + ] + status = nodes[0].get_status() prev_hash = status['sync_info']['latest_block_hash'] @@ -48,7 +59,8 @@ def get_stakes(): height = block['result']['header']['height'] # we expect no skipped heights - height_to_num_approvals[height] = len(block['result']['header']['approvals']) + height_to_num_approvals[height] = len( + block['result']['header']['approvals']) if height > largest_height: print("... %s" % height) @@ -60,13 +72,18 @@ def get_stakes(): if height > epoch_switch_height + 2: for val_ord in next_vals: - tx = sign_staking_tx(nodes[val_ord].signer_key, nodes[val_ord].validator_key, 0, next_nonce, base58.b58decode(prev_hash.encode('utf8'))) + tx = sign_staking_tx(nodes[val_ord].signer_key, + nodes[val_ord].validator_key, 0, next_nonce, + base58.b58decode(prev_hash.encode('utf8'))) for target in range(0, 4): nodes[target].send_tx(tx) next_nonce += 1 for val_ord in cur_vals: - tx = sign_staking_tx(nodes[val_ord].signer_key, nodes[val_ord].validator_key, 50000000000000000000000000000000, next_nonce, base58.b58decode(prev_hash.encode('utf8'))) + tx = sign_staking_tx(nodes[val_ord].signer_key, + nodes[val_ord].validator_key, + 50000000000000000000000000000000, next_nonce, + base58.b58decode(prev_hash.encode('utf8'))) for target in range(0, 4): nodes[target].send_tx(tx) next_nonce += 1 @@ -76,7 +93,7 @@ def get_stakes(): print("EPOCH %s, VALS %s" % (epoch_id, get_validators())) - if len(seen_epochs) > 2: # the first two epochs share the validator set + if len(seen_epochs) > 2: # the first two epochs share the validator set assert height_to_num_approvals[height] == 2 has_prev = height - 1 in height_to_num_approvals @@ -96,7 +113,6 @@ def get_stakes(): if i in height_to_num_approvals: assert height_to_num_approvals[i] == 2 - cur_vals, next_vals = next_vals, cur_vals epoch_switch_height = height diff --git a/pytest/tests/sanity/garbage_collection.py b/pytest/tests/sanity/garbage_collection.py index 0f98db88e95..d32d797d202 100644 --- a/pytest/tests/sanity/garbage_collection.py +++ b/pytest/tests/sanity/garbage_collection.py @@ -11,16 +11,35 @@ TARGET_HEIGHT = 60 TIMEOUT = 30 -consensus_config = {"consensus": {"min_block_production_delay": {"secs": 0, "nanos": 100000000}, "max_block_production_delay": {"secs": 0, "nanos": 400000000}, "max_block_wait_delay": {"secs": 0, "nanos": 400000000}}} +consensus_config = { + "consensus": { + "min_block_production_delay": { + "secs": 0, + "nanos": 100000000 + }, + "max_block_production_delay": { + "secs": 0, + "nanos": 400000000 + }, + "max_block_wait_delay": { + "secs": 0, + "nanos": 400000000 + } + } +} nodes = start_cluster( 2, 0, 1, None, - [ - ["epoch_length", 10], ["num_block_producer_seats", 5], ["num_block_producer_seats_per_shard", [5]], ["validators", 0, "amount", "110000000000000000000000000000000"], ["records", 0, "Account", "account", "locked", "110000000000000000000000000000000"], - ["total_supply", "3060000000000000000000000000000000"] - ], - {0: consensus_config, 1: consensus_config} -) + [["epoch_length", 10], ["num_block_producer_seats", 5], + ["num_block_producer_seats_per_shard", [5]], + ["validators", 0, "amount", "110000000000000000000000000000000"], + [ + "records", 0, "Account", "account", "locked", + "110000000000000000000000000000000" + ], ["total_supply", "3060000000000000000000000000000000"]], { + 0: consensus_config, + 1: consensus_config + }) print('Kill node 1') nodes[1].kill() diff --git a/pytest/tests/sanity/garbage_collection1.py b/pytest/tests/sanity/garbage_collection1.py index 53d95fa4efe..f5dffa051ac 100644 --- a/pytest/tests/sanity/garbage_collection1.py +++ b/pytest/tests/sanity/garbage_collection1.py @@ -13,16 +13,37 @@ TARGET_HEIGHT = 60 TIMEOUT = 30 -consensus_config = {"consensus": {"min_block_production_delay": {"secs": 0, "nanos": 100000000}, "max_block_production_delay": {"secs": 0, "nanos": 400000000}, "max_block_wait_delay": {"secs": 0, "nanos": 400000000}}} +consensus_config = { + "consensus": { + "min_block_production_delay": { + "secs": 0, + "nanos": 100000000 + }, + "max_block_production_delay": { + "secs": 0, + "nanos": 400000000 + }, + "max_block_wait_delay": { + "secs": 0, + "nanos": 400000000 + } + } +} nodes = start_cluster( 3, 0, 1, None, - [ - ["epoch_length", 10], ["num_block_producer_seats", 5], ["num_block_producer_seats_per_shard", [5]], - ["total_supply", "4210000000000000000000000000000000"], ["validators", 0, "amount", "260000000000000000000000000000000"], ["records", 0, "Account", "account", "locked", "260000000000000000000000000000000"] - ], - {0: consensus_config, 1: consensus_config, 2: consensus_config} -) + [["epoch_length", 10], ["num_block_producer_seats", 5], + ["num_block_producer_seats_per_shard", [5]], + ["total_supply", "4210000000000000000000000000000000"], + ["validators", 0, "amount", "260000000000000000000000000000000"], + [ + "records", 0, "Account", "account", "locked", + "260000000000000000000000000000000" + ]], { + 0: consensus_config, + 1: consensus_config, + 2: consensus_config + }) print('kill node1 and node2') nodes[1].kill() diff --git a/pytest/tests/sanity/gc_after_sync.py b/pytest/tests/sanity/gc_after_sync.py index d04739b7e70..8edd50ede7a 100644 --- a/pytest/tests/sanity/gc_after_sync.py +++ b/pytest/tests/sanity/gc_after_sync.py @@ -12,19 +12,35 @@ AFTER_SYNC_HEIGHT = 150 TIMEOUT = 300 -consensus_config = {"consensus": {"min_block_production_delay": {"secs": 0, "nanos": 100000000}, "max_block_production_delay": {"secs": 0, "nanos": 400000000}, "max_block_wait_delay": {"secs": 0, "nanos": 400000000}}} +consensus_config = { + "consensus": { + "min_block_production_delay": { + "secs": 0, + "nanos": 100000000 + }, + "max_block_production_delay": { + "secs": 0, + "nanos": 400000000 + }, + "max_block_wait_delay": { + "secs": 0, + "nanos": 400000000 + } + } +} nodes = start_cluster( 4, 0, 1, None, - [ - ["epoch_length", 10], - ["num_block_producer_seats_per_shard", [5]], - ["validators", 0, "amount", "60000000000000000000000000000000"], - ["records", 0, "Account", "account", "locked", "60000000000000000000000000000000"], - ["total_supply", "5010000000000000000000000000000000"] - ], - {0: consensus_config, 1: consensus_config, 2: consensus_config} -) + [["epoch_length", 10], ["num_block_producer_seats_per_shard", [5]], + ["validators", 0, "amount", "60000000000000000000000000000000"], + [ + "records", 0, "Account", "account", "locked", + "60000000000000000000000000000000" + ], ["total_supply", "5010000000000000000000000000000000"]], { + 0: consensus_config, + 1: consensus_config, + 2: consensus_config + }) node0_height = 0 while node0_height < TARGET_HEIGHT: diff --git a/pytest/tests/sanity/gc_sync_after_sync.py b/pytest/tests/sanity/gc_sync_after_sync.py index 1c167429ec9..e1e602345d7 100644 --- a/pytest/tests/sanity/gc_sync_after_sync.py +++ b/pytest/tests/sanity/gc_sync_after_sync.py @@ -8,7 +8,7 @@ swap_nodes = False if "swap_nodes" in sys.argv: - swap_nodes = True # swap nodes 0 and 1 after first sync + swap_nodes = True # swap nodes 0 and 1 after first sync from cluster import start_cluster @@ -17,21 +17,27 @@ TARGET_HEIGHT_3 = 250 TIMEOUT = 300 -consensus_config = {"consensus": {"block_fetch_horizon": 20, "block_header_fetch_horizon": 20}} +consensus_config = { + "consensus": { + "block_fetch_horizon": 20, + "block_header_fetch_horizon": 20 + } +} nodes = start_cluster( 4, 0, 1, None, - [ - ["epoch_length", 10], - ["validators", 0, "amount", "12500000000000000000000000000000"], - ["records", 0, "Account", "account", "locked", "12500000000000000000000000000000"], - ["validators", 1, "amount", "12500000000000000000000000000000"], - ["records", 2, "Account", "account", "locked", "12500000000000000000000000000000"], - ['total_supply', "4925000000000000000000000000000000"], - ["num_block_producer_seats", 10], ["num_block_producer_seats_per_shard", [10]] - ], - {1: consensus_config} -) + [["epoch_length", 10], + ["validators", 0, "amount", "12500000000000000000000000000000"], + [ + "records", 0, "Account", "account", "locked", + "12500000000000000000000000000000" + ], ["validators", 1, "amount", "12500000000000000000000000000000"], + [ + "records", 2, "Account", "account", "locked", + "12500000000000000000000000000000" + ], ['total_supply', "4925000000000000000000000000000000"], + ["num_block_producer_seats", 10], + ["num_block_producer_seats_per_shard", [10]]], {1: consensus_config}) print('Kill node 1') nodes[1].kill() diff --git a/pytest/tests/sanity/lightclnt.py b/pytest/tests/sanity/lightclnt.py index ca55d2af4b9..c75d3a4505c 100644 --- a/pytest/tests/sanity/lightclnt.py +++ b/pytest/tests/sanity/lightclnt.py @@ -14,26 +14,29 @@ client_config_changes = {} if not config['local']: client_config_changes = { - "consensus": { - "min_block_production_delay": { - "secs": 4, - "nanos": 0, - }, - "max_block_production_delay": { - "secs": 8, - "nanos": 0, - }, - "max_block_wait_delay": { - "secs": 24, - "nanos": 0, - }, - } + "consensus": { + "min_block_production_delay": { + "secs": 4, + "nanos": 0, + }, + "max_block_production_delay": { + "secs": 8, + "nanos": 0, + }, + "max_block_wait_delay": { + "secs": 24, + "nanos": 0, + }, + } } TIMEOUT = 600 client_config_changes['archive'] = True -nodes = start_cluster(4, 0, 4, None, [["epoch_length", 6], ["block_producer_kickout_threshold", 80]], client_config_changes) +nodes = start_cluster( + 4, 0, 4, None, + [["epoch_length", 6], ["block_producer_kickout_threshold", 80]], + client_config_changes) started = time.time() @@ -44,14 +47,20 @@ epochs = [] block_producers_map = {} + + def get_light_client_block(hash_, last_known_block): global block_producers_map ret = nodes[0].json_rpc('next_light_client_block', [hash_]) if ret['result'] is not None and last_known_block is not None: - validate_light_client_block(last_known_block, ret['result'], block_producers_map, panic=True) + validate_light_client_block(last_known_block, + ret['result'], + block_producers_map, + panic=True) return ret + def get_up_to(from_, to): global hash_to_height, hash_to_epoch, hash_to_next_epoch, height_to_hash, epochs @@ -70,7 +79,6 @@ def get_up_to(from_, to): hash_to_epoch[hash_] = block['result']['header']['epoch_id'] hash_to_next_epoch[hash_] = block['result']['header']['next_epoch_id'] - if height >= to: break @@ -81,6 +89,7 @@ def get_up_to(from_, to): if len(epochs) == 0 or epochs[-1] != hash_to_epoch[hash_]: epochs.append(hash_to_epoch[hash_]) + # don't start from 1, sicne couple heights get produced while the nodes spin up get_up_to(4, 29) @@ -105,15 +114,22 @@ def get_up_to(from_, to): assert res['result']['inner_lite']['epoch_id'] == epochs[iter_] print(iter_, heights[iter_]) - assert res['result']['inner_lite']['height'] == heights[iter_], res['result']['inner_lite'] + assert res['result']['inner_lite']['height'] == heights[iter_], res[ + 'result']['inner_lite'] - last_known_block_hash = compute_block_hash(res['result']['inner_lite'], res['result']['inner_rest_hash'], res['result']['prev_hash']).decode('ascii') - assert last_known_block_hash == height_to_hash[res['result']['inner_lite']['height']], "%s != %s" % (last_known_block_hash, height_to_hash[res['result']['inner_lite']['height']]) + last_known_block_hash = compute_block_hash( + res['result']['inner_lite'], res['result']['inner_rest_hash'], + res['result']['prev_hash']).decode('ascii') + assert last_known_block_hash == height_to_hash[ + res['result']['inner_lite']['height']], "%s != %s" % ( + last_known_block_hash, + height_to_hash[res['result']['inner_lite']['height']]) if last_known_block is None: - block_producers_map[res['result']['inner_lite']['next_epoch_id']] = res['result']['next_bps'] + block_producers_map[res['result']['inner_lite'] + ['next_epoch_id']] = res['result']['next_bps'] last_known_block = res['result'] - + iter_ += 1 res = get_light_client_block(height_to_hash[26], last_known_block) @@ -135,4 +151,3 @@ def get_up_to(from_, to): res = get_light_client_block(height_to_hash[28], last_known_block) assert res['result']['inner_lite']['height'] == 31 - diff --git a/pytest/tests/sanity/one_val.py b/pytest/tests/sanity/one_val.py index d8e8eedbee3..dcc44da0528 100644 --- a/pytest/tests/sanity/one_val.py +++ b/pytest/tests/sanity/one_val.py @@ -7,25 +7,24 @@ sys.path.append('lib') - from cluster import start_cluster from utils import TxContext from transaction import sign_payment_tx TIMEOUT = 240 - # give more stake to the bootnode so that it can produce the blocks alone nodes = start_cluster( 2, 1, 8, None, - [ - ["num_block_producer_seats", 199], ["num_block_producer_seats_per_shard", [24, 25, 25, 25, 25, 25, 25, 25]], - ["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 70], - ["validators", 0, "amount", "110000000000000000000000000000000"], ["records", 0, "Account", "account", "locked", "110000000000000000000000000000000"], - ["total_supply", "4060000000000000000000000000000000"] - ], - {} -) + [["num_block_producer_seats", 199], + ["num_block_producer_seats_per_shard", [24, 25, 25, 25, 25, 25, 25, 25]], + ["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 70], + ["validators", 0, "amount", "110000000000000000000000000000000"], + [ + "records", 0, "Account", "account", "locked", + "110000000000000000000000000000000" + ], ["total_supply", "4060000000000000000000000000000000"]], {}) time.sleep(3) nodes[1].kill() @@ -52,16 +51,17 @@ max_height = height if ctx.get_balances() == ctx.expected_balances: - print("Balances caught up, took %s blocks, moving on" % (height - sent_height)); + print("Balances caught up, took %s blocks, moving on" % + (height - sent_height)) ctx.send_moar_txs(hash_, 10, use_routing=True) sent_height = height caught_up_times += 1 else: if height > sent_height + 30: - assert False, "Balances before: %s\nExpected balances: %s\nCurrent balances: %s\nSent at height: %s\n" % (last_balances, ctx.expected_balances, ctx.get_balances(), sent_height) + assert False, "Balances before: %s\nExpected balances: %s\nCurrent balances: %s\nSent at height: %s\n" % ( + last_balances, ctx.expected_balances, ctx.get_balances(), + sent_height) time.sleep(0.2) if caught_up_times == 3: break - - diff --git a/pytest/tests/sanity/restaked.py b/pytest/tests/sanity/restaked.py index 95f98f51672..3d40c8ae3a4 100644 --- a/pytest/tests/sanity/restaked.py +++ b/pytest/tests/sanity/restaked.py @@ -11,13 +11,13 @@ sys.path.append('lib') - from cluster import start_cluster, load_config TIMEOUT = 150 BLOCKS = 50 EPOCH_LENGTH = 10 + def atexit_stop_restaked(pid): print("Cleaning up restaked on script exit") os.kill(pid, signal.SIGKILL) @@ -28,15 +28,20 @@ def start_restaked(node_dir, rpc_port, config): config = load_config() near_root = config['near_root'] command = [ - near_root + 'restaked', '--home=%s' % node_dir, - '--rpc-url=127.0.0.1:%d' % rpc_port, '--wait-period=1'] + near_root + 'restaked', + '--home=%s' % node_dir, + '--rpc-url=127.0.0.1:%d' % rpc_port, '--wait-period=1' + ] pid = subprocess.Popen(command).pid print("Starting restaked for %s, rpc = 0.0.0.0:%d" % (node_dir, rpc_port)) atexit.register(atexit_stop_restaked, pid) # Local: -nodes = start_cluster(4, 0, 1, None, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 4, 0, 1, None, + [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 80]], + {}) # Remote: # NEAR_PYTEST_CONFIG=remote.json python tests/sanity/block_production.py diff --git a/pytest/tests/sanity/restart.py b/pytest/tests/sanity/restart.py index e871c4eb9bd..2fe228d07fb 100644 --- a/pytest/tests/sanity/restart.py +++ b/pytest/tests/sanity/restart.py @@ -6,14 +6,15 @@ sys.path.append('lib') - from cluster import start_cluster TIMEOUT = 150 BLOCKS1 = 20 BLOCKS2 = 40 -nodes = start_cluster(2, 0, 2, None, [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 2, 0, 2, None, + [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) started = time.time() @@ -24,11 +25,16 @@ height_to_hash = {} -def min_common(): return min([min(x) for x in last_common]) + +def min_common(): + return min([min(x) for x in last_common]) + + def heights_report(): for i, sh in enumerate(seen_heights): print("Node %s: %s" % (i, sorted(list(sh)))) + while max_height < BLOCKS1: assert time.time() - started < TIMEOUT for i, node in enumerate(nodes): @@ -39,12 +45,15 @@ def heights_report(): if height > max_height: max_height = height if height % 10 == 0: - print("Reached height %s, min common: %s" % (height, min_common())) + print("Reached height %s, min common: %s" % + (height, min_common())) if height not in height_to_hash: height_to_hash[height] = hash_ else: - assert height_to_hash[height] == hash_, "height: %s, h1: %s, h2: %s" % (height, hash_, height_to_hash[height]) + assert height_to_hash[ + height] == hash_, "height: %s, h1: %s, h2: %s" % ( + height, hash_, height_to_hash[height]) last_heights[i] = height seen_heights[i].add(height) @@ -73,12 +82,15 @@ def heights_report(): if height > max_height: max_height = height if height % 10 == 0: - print("Reached height %s, min common: %s" % (height, min_common())) + print("Reached height %s, min common: %s" % + (height, min_common())) if height not in height_to_hash: height_to_hash[height] = hash_ else: - assert height_to_hash[height] == hash_, "height: %s, h1: %s, h2: %s" % (height, hash_, height_to_hash[height]) + assert height_to_hash[ + height] == hash_, "height: %s, h1: %s, h2: %s" % ( + height, hash_, height_to_hash[height]) last_heights[i] = height seen_heights[i].add(height) @@ -90,4 +102,3 @@ def heights_report(): assert min_common() + 2 >= height, heights_report() assert min_common() + 2 >= BLOCKS2, heights_report() - diff --git a/pytest/tests/sanity/rpc_finality.py b/pytest/tests/sanity/rpc_finality.py index b84ae002c44..c9f7aa243f6 100644 --- a/pytest/tests/sanity/rpc_finality.py +++ b/pytest/tests/sanity/rpc_finality.py @@ -6,12 +6,12 @@ sys.path.append('lib') - from cluster import start_cluster from utils import TxContext from transaction import sign_payment_tx -nodes = start_cluster(3, 1, 1, None, [["min_gas_price", 0], ["epoch_length", 100]], {}) +nodes = start_cluster(3, 1, 1, None, + [["min_gas_price", 0], ["epoch_length", 100]], {}) time.sleep(3) # kill one validating node so that no block can be finalized @@ -24,7 +24,8 @@ token_transfer = 10 status = nodes[0].get_status() latest_block_hash = status['sync_info']['latest_block_hash'] -tx = sign_payment_tx(nodes[0].signer_key, 'test1', token_transfer, 1, base58.b58decode(latest_block_hash.encode('utf8'))) +tx = sign_payment_tx(nodes[0].signer_key, 'test1', token_transfer, 1, + base58.b58decode(latest_block_hash.encode('utf8'))) print(nodes[0].send_tx_and_wait(tx, timeout=20)) # wait for doomslug finality @@ -35,10 +36,14 @@ acc_doomslug_finality = nodes[0].get_account(acc_id, "near-final") acc_nfg_finality = nodes[0].get_account(acc_id, "final") if i == 0: - assert int(acc_no_finality['result']['amount']) == acc0_balance - token_transfer - assert int(acc_doomslug_finality['result']['amount']) == acc0_balance - token_transfer + assert int(acc_no_finality['result'] + ['amount']) == acc0_balance - token_transfer + assert int(acc_doomslug_finality['result'] + ['amount']) == acc0_balance - token_transfer assert int(acc_nfg_finality['result']['amount']) == acc0_balance else: - assert int(acc_no_finality['result']['amount']) == acc1_balance + token_transfer - assert int(acc_doomslug_finality['result']['amount']) == acc1_balance + token_transfer + assert int(acc_no_finality['result'] + ['amount']) == acc1_balance + token_transfer + assert int(acc_doomslug_finality['result'] + ['amount']) == acc1_balance + token_transfer assert int(acc_nfg_finality['result']['amount']) == acc1_balance diff --git a/pytest/tests/sanity/rpc_query.py b/pytest/tests/sanity/rpc_query.py index 057a28bc726..25ee14b8a8e 100644 --- a/pytest/tests/sanity/rpc_query.py +++ b/pytest/tests/sanity/rpc_query.py @@ -7,12 +7,16 @@ sys.path.append('lib') - from cluster import start_cluster from utils import TxContext from transaction import sign_payment_tx -nodes = start_cluster(2, 2, 4, None, [["min_gas_price", 0], ["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {2: {"tracked_shards": [0, 1, 2, 3]}}) +nodes = start_cluster(2, 2, 4, None, + [["min_gas_price", 0], ["epoch_length", 10], + ["block_producer_kickout_threshold", 70]], + {2: { + "tracked_shards": [0, 1, 2, 3] + }}) time.sleep(3) @@ -22,7 +26,9 @@ latest_block_hash = status['sync_info']['latest_block_hash'] for j in range(4): if i != j: - tx = sign_payment_tx(nodes[i].signer_key, 'test%s' % j, 100, nonce, base58.b58decode(latest_block_hash.encode('utf8'))) + tx = sign_payment_tx( + nodes[i].signer_key, 'test%s' % j, 100, nonce, + base58.b58decode(latest_block_hash.encode('utf8'))) nonce += 1 print("sending transaction from test%d to test%d" % (i, j)) result = nodes[-1].send_tx_and_wait(tx, timeout=15) @@ -31,12 +37,15 @@ time.sleep(2) for i in range(4): + def fix_result(result): result["result"]["block_hash"] = None result["result"]["block_height"] = None return result + query_result1 = fix_result(nodes[-2].get_account("test%s" % i)) query_result2 = fix_result(nodes[-1].get_account("test%s" % i)) if query_result1 != query_result2: - print("query same account suspicious %s, %s", query_result1, query_result2) + print("query same account suspicious %s, %s", query_result1, + query_result2) assert query_result1 == query_result2, "query same account gives different result" diff --git a/pytest/tests/sanity/rpc_state_changes.py b/pytest/tests/sanity/rpc_state_changes.py index 040d0885663..856abf67e83 100644 --- a/pytest/tests/sanity/rpc_state_changes.py +++ b/pytest/tests/sanity/rpc_state_changes.py @@ -2,7 +2,6 @@ # and call various scenarios to trigger store changes. # Check that the key changes are observable via `changes` RPC call. - import sys import base58, base64 import json @@ -15,7 +14,10 @@ from utils import load_binary_file import transaction -nodes = start_cluster(4, 0, 1, None, [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 4, 0, 1, None, + [["epoch_length", 1000], ["block_producer_kickout_threshold", 80]], {}) + def assert_changes_in_block_response(request, expected_response): for node_index, node in enumerate(nodes): @@ -64,28 +66,24 @@ def test_changes_with_new_account_with_access_key(): new_key=new_key, balance=10**24, nonce=7, - block_hash=base58.b58decode(latest_block_hash.encode('utf8')) - ) + block_hash=base58.b58decode(latest_block_hash.encode('utf8'))) new_account_response = nodes[0].send_tx_and_wait(create_account_tx, 10) # Step 2 - block_hash = new_account_response['result']['receipts_outcome'][0]['block_hash'] - assert_changes_in_block_response( - request={"block_id": block_hash}, - expected_response={ - "block_hash": block_hash, - "changes": [ - { - "type": "account_touched", - "account_id": new_key.account_id, - }, - { - "type": "access_key_touched", - "account_id": new_key.account_id, - } - ] - } - ) + block_hash = new_account_response['result']['receipts_outcome'][0][ + 'block_hash'] + assert_changes_in_block_response(request={"block_id": block_hash}, + expected_response={ + "block_hash": + block_hash, + "changes": [{ + "type": "account_touched", + "account_id": new_key.account_id, + }, { + "type": "access_key_touched", + "account_id": new_key.account_id, + }] + }) base_request = { "block_id": block_hash, @@ -93,45 +91,65 @@ def test_changes_with_new_account_with_access_key(): } for request in [ # Test empty account_ids - {**base_request, "account_ids": []}, + { + **base_request, "account_ids": [] + }, # Test an account_id that is a prefix of the original account_id. - {**base_request, "account_ids": [new_key.account_id[:-1]]}, + { + **base_request, "account_ids": [new_key.account_id[:-1]] + }, # Test an account_id that has the original account_id as a prefix. - {**base_request, "account_ids": [new_key.account_id + '_extra']}, - ]: - assert_changes_response(request=request, expected_response={"block_hash": block_hash, "changes": []}) + { + **base_request, "account_ids": [new_key.account_id + '_extra'] + }, + ]: + assert_changes_response(request=request, + expected_response={ + "block_hash": block_hash, + "changes": [] + }) # Test happy-path expected_response = { - "block_hash": block_hash, - "changes": [ - { - "cause": { - "type": "receipt_processing", - "receipt_hash": new_account_response["result"]["receipts_outcome"][0]["id"], + "block_hash": + block_hash, + "changes": [{ + "cause": { + "type": + "receipt_processing", + "receipt_hash": + new_account_response["result"]["receipts_outcome"][0]["id"], + }, + "type": "access_key_update", + "change": { + "account_id": new_key.account_id, + "public_key": new_key.pk, + "access_key": { + "nonce": 0, + "permission": "FullAccess" }, - "type": "access_key_update", - "change": { - "account_id": new_key.account_id, - "public_key": new_key.pk, - "access_key": {"nonce": 0, "permission": "FullAccess"}, - } } - ] + }] } for request in [ - { - "block_id": block_hash, - "changes_type": "all_access_key_changes", - "account_ids": [new_key.account_id], - }, - { - "block_id": block_hash, - "changes_type": "all_access_key_changes", - "account_ids": [new_key.account_id + '_non_existing1', new_key.account_id, new_key.account_id + '_non_existing2'], - }, - ]: - assert_changes_response(request=request, expected_response=expected_response) + { + "block_id": block_hash, + "changes_type": "all_access_key_changes", + "account_ids": [new_key.account_id], + }, + { + "block_id": + block_hash, + "changes_type": + "all_access_key_changes", + "account_ids": [ + new_key.account_id + '_non_existing1', new_key.account_id, + new_key.account_id + '_non_existing2' + ], + }, + ]: + assert_changes_response(request=request, + expected_response=expected_response) # Step 3 status = nodes[0].get_status() @@ -141,28 +159,25 @@ def test_changes_with_new_account_with_access_key(): target_account_id=new_key.account_id, key_for_deletion=new_key, nonce=8, - block_hash=base58.b58decode(latest_block_hash.encode('utf8')) - ) - delete_access_key_response = nodes[1].send_tx_and_wait(delete_access_key_tx, 10) + block_hash=base58.b58decode(latest_block_hash.encode('utf8'))) + delete_access_key_response = nodes[1].send_tx_and_wait( + delete_access_key_tx, 10) # Step 4 - block_hash = delete_access_key_response['result']['receipts_outcome'][0]['block_hash'] - assert_changes_in_block_response( - request={"block_id": block_hash}, - expected_response={ - "block_hash": block_hash, - "changes": [ - { - "type": "account_touched", - "account_id": new_key.account_id, - }, - { - "type": "access_key_touched", - "account_id": new_key.account_id, - } - ] - } - ) + block_hash = delete_access_key_response['result']['receipts_outcome'][0][ + 'block_hash'] + assert_changes_in_block_response(request={"block_id": block_hash}, + expected_response={ + "block_hash": + block_hash, + "changes": [{ + "type": "account_touched", + "account_id": new_key.account_id, + }, { + "type": "access_key_touched", + "account_id": new_key.account_id, + }] + }) base_request = { "block_id": block_hash, @@ -170,60 +185,86 @@ def test_changes_with_new_account_with_access_key(): } for request in [ # Test empty account_ids - {**base_request, "account_ids": []}, + { + **base_request, "account_ids": [] + }, # Test an account_id that is a prefix of the original account_id - {**base_request, "account_ids": [new_key.account_id[:-1]]}, + { + **base_request, "account_ids": [new_key.account_id[:-1]] + }, # Test an account_id that has the original account_id as a prefix - {**base_request, "account_ids": [new_key.account_id + '_extra']}, + { + **base_request, "account_ids": [new_key.account_id + '_extra'] + }, # Test empty keys in single_access_key_changes request - {"block_id": block_hash, "changes_type": "single_access_key_changes", "keys": []}, + { + "block_id": block_hash, + "changes_type": "single_access_key_changes", + "keys": [] + }, # Test non-existing account_id - { - "block_id": block_hash, - "changes_type": "single_access_key_changes", - "keys": [ - {"account_id": new_key.account_id + '_non_existing1', "public_key": new_key.pk}, - ], - }, + { + "block_id": + block_hash, + "changes_type": + "single_access_key_changes", + "keys": [{ + "account_id": new_key.account_id + '_non_existing1', + "public_key": new_key.pk + },], + }, # Test non-existing public_key for an existing account_id - { - "block_id": block_hash, - "changes_type": "single_access_key_changes", - "keys": [ - {"account_id": new_key.account_id, "public_key": new_key.pk[:-3] + 'aaa'}, - ], - }, - ]: - assert_changes_response(request=request, expected_response={"block_hash": block_hash, "changes": []}) + { + "block_id": + block_hash, + "changes_type": + "single_access_key_changes", + "keys": [{ + "account_id": new_key.account_id, + "public_key": new_key.pk[:-3] + 'aaa' + },], + }, + ]: + assert_changes_response(request=request, + expected_response={ + "block_hash": block_hash, + "changes": [] + }) # Test happy-path expected_response = { - "block_hash": block_hash, - "changes": [ - { - "cause": { - 'type': 'transaction_processing', - 'tx_hash': delete_access_key_response['result']['transaction']['hash'], - }, - "type": "access_key_update", - "change": { - "account_id": new_key.account_id, - "public_key": new_key.pk, - "access_key": {"nonce": 8, "permission": "FullAccess"}, - } + "block_hash": + block_hash, + "changes": [{ + "cause": { + 'type': + 'transaction_processing', + 'tx_hash': + delete_access_key_response['result']['transaction']['hash'], }, - { - "cause": { - "type": "receipt_processing", - "receipt_hash": delete_access_key_response["result"]["receipts_outcome"][0]["id"] + "type": "access_key_update", + "change": { + "account_id": new_key.account_id, + "public_key": new_key.pk, + "access_key": { + "nonce": 8, + "permission": "FullAccess" }, - "type": "access_key_deletion", - "change": { - "account_id": new_key.account_id, - "public_key": new_key.pk, - } } - ] + }, { + "cause": { + "type": + "receipt_processing", + "receipt_hash": + delete_access_key_response["result"]["receipts_outcome"][0] + ["id"] + }, + "type": "access_key_deletion", + "change": { + "account_id": new_key.account_id, + "public_key": new_key.pk, + } + }] } for request in [ @@ -233,25 +274,44 @@ def test_changes_with_new_account_with_access_key(): "account_ids": [new_key.account_id], }, { - "block_id": block_hash, - "changes_type": "all_access_key_changes", - "account_ids": [new_key.account_id + '_non_existing1', new_key.account_id, new_key.account_id + '_non_existing2'], + "block_id": + block_hash, + "changes_type": + "all_access_key_changes", + "account_ids": [ + new_key.account_id + '_non_existing1', new_key.account_id, + new_key.account_id + '_non_existing2' + ], }, { - "block_id": block_hash, - "changes_type": "single_access_key_changes", - "keys": [{"account_id": new_key.account_id, "public_key": new_key.pk}], + "block_id": + block_hash, + "changes_type": + "single_access_key_changes", + "keys": [{ + "account_id": new_key.account_id, + "public_key": new_key.pk + }], }, { - "block_id": block_hash, - "changes_type": "single_access_key_changes", + "block_id": + block_hash, + "changes_type": + "single_access_key_changes", "keys": [ - {"account_id": new_key.account_id + '_non_existing1', "public_key": new_key.pk}, - {"account_id": new_key.account_id, "public_key": new_key.pk}, + { + "account_id": new_key.account_id + '_non_existing1', + "public_key": new_key.pk + }, + { + "account_id": new_key.account_id, + "public_key": new_key.pk + }, ], }, ]: - assert_changes_response(request=request, expected_response=expected_response) + assert_changes_response(request=request, + expected_response=expected_response) def test_key_value_changes(): @@ -271,36 +331,29 @@ def test_key_value_changes(): status = nodes[0].get_status() latest_block_hash = status['sync_info']['latest_block_hash'] deploy_contract_tx = transaction.sign_deploy_contract_tx( - contract_key, - hello_smart_contract, - 10, - base58.b58decode(latest_block_hash.encode('utf8')) - ) + contract_key, hello_smart_contract, 10, + base58.b58decode(latest_block_hash.encode('utf8'))) deploy_contract_response = nodes[0].send_tx_and_wait(deploy_contract_tx, 10) # Step 2 - block_hash = deploy_contract_response['result']['transaction_outcome']['block_hash'] + block_hash = deploy_contract_response['result']['transaction_outcome'][ + 'block_hash'] assert_changes_in_block_response( request={"block_id": block_hash}, expected_response={ - "block_hash": block_hash, - "changes": [ - { - "type": "account_touched", - "account_id": contract_key.account_id, - }, - { - "type": "contract_code_touched", - "account_id": contract_key.account_id, - }, - { - "type": "access_key_touched", - "account_id": contract_key.account_id, - } - - ] - } - ) + "block_hash": + block_hash, + "changes": [{ + "type": "account_touched", + "account_id": contract_key.account_id, + }, { + "type": "contract_code_touched", + "account_id": contract_key.account_id, + }, { + "type": "access_key_touched", + "account_id": contract_key.account_id, + }] + }) base_request = { "block_id": block_hash, @@ -308,40 +361,63 @@ def test_key_value_changes(): } for request in [ # Test empty account_ids - {**base_request, "account_ids": []}, + { + **base_request, "account_ids": [] + }, # Test an account_id that is a prefix of the original account_id - {**base_request, "account_ids": [contract_key.account_id[:-1]]}, + { + **base_request, "account_ids": [contract_key.account_id[:-1]] + }, # Test an account_id that has the original account_id as a prefix - {**base_request, "account_ids": [contract_key.account_id + '_extra']}, - ]: - assert_changes_response(request=request, expected_response={"block_hash": block_hash, "changes": []}) + { + **base_request, "account_ids": [contract_key.account_id + '_extra'] + }, + ]: + assert_changes_response(request=request, + expected_response={ + "block_hash": block_hash, + "changes": [] + }) # Test happy-path expected_response = { - "block_hash": block_hash, - "changes": [ - { - "cause": { - "type": "receipt_processing", - "receipt_hash": deploy_contract_response["result"]["receipts_outcome"][0]["id"], - }, - "type": "contract_code_update", - "change": { - "account_id": contract_key.account_id, - "code_base64": base64.b64encode(hello_smart_contract).decode('utf-8'), - } + "block_hash": + block_hash, + "changes": [{ + "cause": { + "type": + "receipt_processing", + "receipt_hash": + deploy_contract_response["result"]["receipts_outcome"][0] + ["id"], }, - ] + "type": "contract_code_update", + "change": { + "account_id": + contract_key.account_id, + "code_base64": + base64.b64encode(hello_smart_contract).decode('utf-8'), + } + },] } base_request = { "block_id": block_hash, "changes_type": "contract_code_changes", } for request in [ - {**base_request, "account_ids": [contract_key.account_id]}, - {**base_request, "account_ids": [contract_key.account_id + '_non_existing1', contract_key.account_id, contract_key.account_id + '_non_existing2']}, - ]: - assert_changes_response(request=request, expected_response=expected_response) + { + **base_request, "account_ids": [contract_key.account_id] + }, + { + **base_request, "account_ids": [ + contract_key.account_id + '_non_existing1', + contract_key.account_id, + contract_key.account_id + '_non_existing2' + ] + }, + ]: + assert_changes_response(request=request, + expected_response=expected_response) # Step 3 status = nodes[1].get_status() @@ -350,41 +426,38 @@ def test_key_value_changes(): def set_value_1(): function_call_1_tx = transaction.sign_function_call_tx( - function_caller_key, - contract_key.account_id, - 'setKeyValue', - json.dumps({"key": "my_key", "value": "my_value_1"}).encode('utf-8'), - 10000000000000000, - 100000000000, - 20, - base58.b58decode(latest_block_hash.encode('utf8')) - ) + function_caller_key, contract_key.account_id, 'setKeyValue', + json.dumps({ + "key": "my_key", + "value": "my_value_1" + }).encode('utf-8'), 10000000000000000, 100000000000, 20, + base58.b58decode(latest_block_hash.encode('utf8'))) nodes[1].send_tx_and_wait(function_call_1_tx, 10) + function_call_1_thread = threading.Thread(target=set_value_1) function_call_1_thread.start() function_call_2_tx = transaction.sign_function_call_tx( - function_caller_key, - contract_key.account_id, - 'setKeyValue', - json.dumps({"key": "my_key", "value": "my_value_2"}).encode('utf-8'), - 10000000000000000, - 100000000000, - 30, - base58.b58decode(latest_block_hash.encode('utf8')) - ) + function_caller_key, contract_key.account_id, 'setKeyValue', + json.dumps({ + "key": "my_key", + "value": "my_value_2" + }).encode('utf-8'), 10000000000000000, 100000000000, 30, + base58.b58decode(latest_block_hash.encode('utf8'))) function_call_2_response = nodes[1].send_tx_and_wait(function_call_2_tx, 10) assert function_call_2_response['result']['receipts_outcome'][0]['outcome']['status'] == {'SuccessValue': ''}, \ "Expected successful execution, but the output was: %s" % function_call_2_response function_call_1_thread.join() - tx_block_hash = function_call_2_response['result']['transaction_outcome']['block_hash'] + tx_block_hash = function_call_2_response['result']['transaction_outcome'][ + 'block_hash'] # Step 4 assert_changes_in_block_response( request={"block_id": tx_block_hash}, expected_response={ - "block_hash": tx_block_hash, + "block_hash": + tx_block_hash, "changes": [ { "type": "account_touched", @@ -399,8 +472,7 @@ def set_value_1(): "account_id": contract_key.account_id, }, ] - } - ) + }) base_request = { "block_id": block_hash, @@ -409,48 +481,60 @@ def set_value_1(): } for request in [ # Test empty account_ids - {**base_request, "account_ids": []}, + { + **base_request, "account_ids": [] + }, # Test an account_id that is a prefix of the original account_id - {**base_request, "account_ids": [contract_key.account_id[:-1]]}, + { + **base_request, "account_ids": [contract_key.account_id[:-1]] + }, # Test an account_id that has the original account_id as a prefix - {**base_request, "account_ids": [contract_key.account_id + '_extra']}, + { + **base_request, "account_ids": [contract_key.account_id + '_extra'] + }, # Test non-existing key prefix - { - **base_request, - "account_ids": [contract_key.account_id], - "key_prefix_base64": base64.b64encode(b"my_key_with_extra").decode('utf-8'), - }, - ]: - assert_changes_response(request=request, expected_response={"block_hash": block_hash, "changes": []}) + { + **base_request, + "account_ids": [contract_key.account_id], + "key_prefix_base64": + base64.b64encode(b"my_key_with_extra").decode('utf-8'), + }, + ]: + assert_changes_response(request=request, + expected_response={ + "block_hash": block_hash, + "changes": [] + }) # Test happy-path expected_response = { - "block_hash": tx_block_hash, - "changes": [ - { - "cause": { - "type": "receipt_processing", - }, - "type": "data_update", - "change": { - "account_id": contract_key.account_id, - "key_base64": base64.b64encode(b"my_key").decode('utf-8'), - "value_base64": base64.b64encode(b"my_value_1").decode('utf-8'), - } + "block_hash": + tx_block_hash, + "changes": [{ + "cause": { + "type": "receipt_processing", }, - { - "cause": { - "type": "receipt_processing", - "receipt_hash": function_call_2_response["result"]["receipts_outcome"][0]["id"], - }, - "type": "data_update", - "change": { - "account_id": contract_key.account_id, - "key_base64": base64.b64encode(b"my_key").decode('utf-8'), - "value_base64": base64.b64encode(b"my_value_2").decode('utf-8'), - } + "type": "data_update", + "change": { + "account_id": contract_key.account_id, + "key_base64": base64.b64encode(b"my_key").decode('utf-8'), + "value_base64": base64.b64encode(b"my_value_1").decode('utf-8'), } - ] + }, { + "cause": { + "type": + "receipt_processing", + "receipt_hash": + function_call_2_response["result"]["receipts_outcome"][0] + ["id"], + }, + "type": "data_update", + "change": { + "account_id": contract_key.account_id, + "key_base64": base64.b64encode(b"my_key").decode('utf-8'), + "value_base64": base64.b64encode(b"my_value_2").decode('utf-8'), + } + }] } base_request = { @@ -459,19 +543,27 @@ def set_value_1(): "key_prefix_base64": base64.b64encode(b"my_key").decode('utf-8'), } for request in [ - {**base_request, "account_ids": [contract_key.account_id]}, - {**base_request, "account_ids": [contract_key.account_id + '_non_existing1', contract_key.account_id, contract_key.account_id + '_non_existing2']}, - { - **base_request, - "account_ids": [contract_key.account_id], - "key_prefix_base64": base64.b64encode(b"").decode('utf-8'), - }, - { - **base_request, - "account_ids": [contract_key.account_id], - "key_prefix_base64": base64.b64encode(b"my_ke").decode('utf-8'), - }, - ]: + { + **base_request, "account_ids": [contract_key.account_id] + }, + { + **base_request, "account_ids": [ + contract_key.account_id + '_non_existing1', + contract_key.account_id, + contract_key.account_id + '_non_existing2' + ] + }, + { + **base_request, + "account_ids": [contract_key.account_id], + "key_prefix_base64": base64.b64encode(b"").decode('utf-8'), + }, + { + **base_request, + "account_ids": [contract_key.account_id], + "key_prefix_base64": base64.b64encode(b"my_ke").decode('utf-8'), + }, + ]: assert_changes_response( request=request, expected_response=expected_response, diff --git a/pytest/tests/sanity/rpc_tx_forwarding.py b/pytest/tests/sanity/rpc_tx_forwarding.py index 98efb06c294..46abe1bcb9c 100644 --- a/pytest/tests/sanity/rpc_tx_forwarding.py +++ b/pytest/tests/sanity/rpc_tx_forwarding.py @@ -7,17 +7,24 @@ sys.path.append('lib') - from cluster import start_cluster from utils import TxContext from transaction import sign_payment_tx -nodes = start_cluster(2, 2, 4, None, [["min_gas_price", 0], ["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {3: {"tracked_shards": [0, 1, 2, 3]}}) +nodes = start_cluster(2, 2, 4, None, + [["min_gas_price", 0], ["epoch_length", 10], + ["block_producer_kickout_threshold", 70]], + {3: { + "tracked_shards": [0, 1, 2, 3] + }}) time.sleep(3) started = time.time() -old_balances = [int(nodes[-1].get_account("test%s" % x)['result']['amount']) for x in [0, 1, 2]] +old_balances = [ + int(nodes[-1].get_account("test%s" % x)['result']['amount']) + for x in [0, 1, 2] +] print("BALANCES BEFORE", old_balances) status = nodes[1].get_status() @@ -25,13 +32,16 @@ time.sleep(5) -tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1, base58.b58decode(hash_.encode('utf8'))) +tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1, + base58.b58decode(hash_.encode('utf8'))) print(nodes[-2].send_tx_and_wait(tx, timeout=20)) -new_balances = [int(nodes[-1].get_account("test%s" % x)['result']['amount']) for x in [0, 1, 2]] +new_balances = [ + int(nodes[-1].get_account("test%s" % x)['result']['amount']) + for x in [0, 1, 2] +] print("BALANCES AFTER", new_balances) old_balances[0] -= 100 old_balances[1] += 100 assert old_balances == new_balances - diff --git a/pytest/tests/sanity/rpc_tx_submission.py b/pytest/tests/sanity/rpc_tx_submission.py index b8c886eb0a8..cfb71dbf2c1 100644 --- a/pytest/tests/sanity/rpc_tx_submission.py +++ b/pytest/tests/sanity/rpc_tx_submission.py @@ -8,39 +8,50 @@ from utils import TxContext from transaction import sign_payment_tx -nodes = start_cluster(2, 0, 1, None, [["min_gas_price", 0], ['max_inflation_rate', [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {}) +nodes = start_cluster( + 2, 0, 1, None, + [["min_gas_price", 0], ['max_inflation_rate', [0, 1]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 70]], {}) time.sleep(3) started = time.time() -old_balances = [int(nodes[0].get_account("test%s" % x)['result']['amount']) for x in [0, 1]] +old_balances = [ + int(nodes[0].get_account("test%s" % x)['result']['amount']) for x in [0, 1] +] print("BALANCES BEFORE", old_balances) status = nodes[0].get_status() hash_ = status['sync_info']['latest_block_hash'] for i in range(3): - tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100 + i, i + 1, base58.b58decode(hash_.encode('utf8'))) + tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100 + i, i + 1, + base58.b58decode(hash_.encode('utf8'))) if i == 0: res = nodes[0].send_tx_and_wait(tx, timeout=20) if 'error' in res: assert False, res else: method_name = 'broadcast_tx_async' if i == 1 else 'EXPERIMENTAL_broadcast_tx_sync' - res = nodes[0].json_rpc(method_name, [base64.b64encode(tx).decode('utf8')]) + res = nodes[0].json_rpc(method_name, + [base64.b64encode(tx).decode('utf8')]) assert 'error' not in res, res time.sleep(5) tx_query_res = nodes[0].json_rpc('tx', [res['result'], 'test0']) assert 'error' not in tx_query_res, tx_query_res time.sleep(1) -new_balances = [int(nodes[0].get_account("test%s" % x)['result']['amount']) for x in [0, 1]] +new_balances = [ + int(nodes[0].get_account("test%s" % x)['result']['amount']) for x in [0, 1] +] print("BALANCES AFTER", new_balances) assert new_balances[0] == old_balances[0] - 303 assert new_balances[1] == old_balances[1] + 303 status = nodes[0].get_status() hash_ = status['sync_info']['latest_block_hash'] -tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1, base58.b58decode(hash_.encode('utf8'))) -res = nodes[0].json_rpc('EXPERIMENTAL_check_tx', [base64.b64encode(tx).decode('utf8')]) +tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1, + base58.b58decode(hash_.encode('utf8'))) +res = nodes[0].json_rpc('EXPERIMENTAL_check_tx', + [base64.b64encode(tx).decode('utf8')]) assert 'TxExecutionError' in res['error']['data'], res diff --git a/pytest/tests/sanity/skip_epoch.py b/pytest/tests/sanity/skip_epoch.py index 06c6914934f..99dfa0a84de 100644 --- a/pytest/tests/sanity/skip_epoch.py +++ b/pytest/tests/sanity/skip_epoch.py @@ -17,20 +17,30 @@ config = load_config() # give more stake to the bootnode so that it can produce the blocks alone -near_root, node_dirs = init_cluster(4, 1, 4, config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 7], ["block_producer_kickout_threshold", 40]], {4: {"tracked_shards": [0, 1, 2, 3]}}) +near_root, node_dirs = init_cluster( + 4, 1, 4, config, + [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 7], + ["block_producer_kickout_threshold", 40]], + {4: { + "tracked_shards": [0, 1, 2, 3] + }}) started = time.time() boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None) -node3 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk, boot_node.addr()) -node4 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk, boot_node.addr()) -observer = spin_up_node(config, near_root, node_dirs[4], 4, boot_node.node_key.pk, boot_node.addr()) +node3 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk, + boot_node.addr()) +node4 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk, + boot_node.addr()) +observer = spin_up_node(config, near_root, node_dirs[4], 4, + boot_node.node_key.pk, boot_node.addr()) ctx = TxContext([0, 0, 0, 0, 0], [boot_node, None, node3, node4, observer]) initial_balances = ctx.get_balances() total_supply = sum(initial_balances) -print("Initial balances: %s\nTotal supply: %s" % (initial_balances, total_supply)) +print("Initial balances: %s\nTotal supply: %s" % + (initial_balances, total_supply)) seen_boot_heights = set() sent_txs = False @@ -59,7 +69,8 @@ time.sleep(0.1) # 2. Spin up the second node and make sure it gets to 25 as well, and doesn't diverge -node2 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr()) +node2 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, + boot_node.addr()) status = boot_node.get_status() new_height = status['sync_info']['latest_block_height'] @@ -80,7 +91,8 @@ print(new_height) if node2_height > TWENTY_FIVE: - assert node2_height in seen_boot_heights, "%s not in %s" % (node2_height, seen_boot_heights) + assert node2_height in seen_boot_heights, "%s not in %s" % ( + node2_height, seen_boot_heights) break time.sleep(0.1) @@ -95,25 +107,29 @@ balances = ctx.get_balances() print("New balances: %s\nNew total supply: %s" % (balances, sum(balances))) -assert(balances != initial_balances) -assert(sum(balances) == total_supply) +assert (balances != initial_balances) +assert (sum(balances) == total_supply) initial_balances = balances # 4. Stake for the second node to bring it back up as a validator and wait until it actually # becomes one + def get_validators(): return set([x['account_id'] for x in boot_node.get_status()['validators']]) + print(get_validators()) # The stake for node2 must be higher than that of boot_node, so that it can produce blocks # after the boot_node is brought down -tx = sign_staking_tx(node2.signer_key, node2.validator_key, 50000000000000000000000000000000, 20, base58.b58decode(hash_.encode('utf8'))) +tx = sign_staking_tx(node2.signer_key, node2.validator_key, + 50000000000000000000000000000000, 20, + base58.b58decode(hash_.encode('utf8'))) boot_node.send_tx(tx) -assert(get_validators() == set(["test0", "test2", "test3"])), get_validators() +assert (get_validators() == set(["test0", "test2", "test3"])), get_validators() while True: if time.time() - started > TIMEOUT: @@ -125,7 +141,6 @@ def get_validators(): time.sleep(1) - ctx.next_nonce = 100 # 5. Record the latest height and bring down the first node, wait for couple epochs to pass status = node2.get_status() @@ -163,6 +178,5 @@ def get_validators(): ctx.nodes = [observer, node2] print("Observer sees: %s" % ctx.get_balances()) -assert(balances != initial_balances) -assert(sum(balances) == total_supply) - +assert (balances != initial_balances) +assert (sum(balances) == total_supply) diff --git a/pytest/tests/sanity/staking1.py b/pytest/tests/sanity/staking1.py index 21c1118aa6a..d9c85b083c4 100644 --- a/pytest/tests/sanity/staking1.py +++ b/pytest/tests/sanity/staking1.py @@ -12,20 +12,33 @@ TIMEOUT = 150 config = None -nodes = start_cluster(2, 1, 1, config, [["epoch_length", 10], ["block_producer_kickout_threshold", 40]], {2: {"tracked_shards": [0]}}) +nodes = start_cluster( + 2, 1, 1, config, + [["epoch_length", 10], ["block_producer_kickout_threshold", 40]], + {2: { + "tracked_shards": [0] + }}) started = time.time() + def get_validators(): return set([x['account_id'] for x in nodes[0].get_status()['validators']]) + def get_stakes(): - return [int(nodes[2].get_account("test%s" % i)['result']['locked']) for i in range(3)] + return [ + int(nodes[2].get_account("test%s" % i)['result']['locked']) + for i in range(3) + ] + status = nodes[2].get_status() hash_ = status['sync_info']['latest_block_hash'] -tx = sign_staking_tx(nodes[2].signer_key, nodes[2].validator_key, 100000000000000000000000000000000, 2, base58.b58decode(hash_.encode('utf8'))) +tx = sign_staking_tx(nodes[2].signer_key, nodes[2].validator_key, + 100000000000000000000000000000000, 2, + base58.b58decode(hash_.encode('utf8'))) nodes[0].send_tx(tx) max_height = 0 @@ -48,7 +61,8 @@ def get_stakes(): print("..Reached height %s, no luck yet" % height) time.sleep(0.1) -tx = sign_staking_tx(nodes[2].signer_key, nodes[2].validator_key, 0, 3, base58.b58decode(hash_.encode('utf8'))) +tx = sign_staking_tx(nodes[2].signer_key, nodes[2].validator_key, 0, 3, + base58.b58decode(hash_.encode('utf8'))) nodes[2].send_tx(tx) while True: diff --git a/pytest/tests/sanity/staking2.py b/pytest/tests/sanity/staking2.py index 498997bff85..d1214cd4277 100644 --- a/pytest/tests/sanity/staking2.py +++ b/pytest/tests/sanity/staking2.py @@ -24,16 +24,23 @@ # random. See `staking_repro1.py` for an example sequence = [] + def get_validators(): return set([x['account_id'] for x in nodes[0].get_status()['validators']]) + def get_stakes(): - return [int(nodes[2].get_account("test%s" % i)['result']['locked']) for i in range(3)] + return [ + int(nodes[2].get_account("test%s" % i)['result']['locked']) + for i in range(3) + ] + def get_expected_stakes(): global all_stakes return [max([x[i] for x in all_stakes[-3:]]) for i in range(3)] + def do_moar_stakes(last_block_hash, update_expected): global next_nonce, all_stakes, sequence @@ -41,8 +48,10 @@ def do_moar_stakes(last_block_hash, update_expected): stakes = [0, 0, 0] # have 1-2 validators with stake, and the remaining without # make numbers dibisable by 1M so that we can easily distinguish a situation when the current locked amt has some reward added to it (not divisable by 1M) vs not (divisable by 1M) - stakes[random.randint(0, 2)] = random.randint(70000000000000000000000000, 100000000000000000000000000) * 1000000 - stakes[random.randint(0, 2)] = random.randint(70000000000000000000000000, 100000000000000000000000000) * 1000000 + stakes[random.randint(0, 2)] = random.randint( + 70000000000000000000000000, 100000000000000000000000000) * 1000000 + stakes[random.randint(0, 2)] = random.randint( + 70000000000000000000000000, 100000000000000000000000000) * 1000000 else: stakes = sequence[0] sequence = sequence[1:] @@ -50,22 +59,30 @@ def do_moar_stakes(last_block_hash, update_expected): vals = get_validators() val_id = int(list(vals)[0][4:]) for i in range(3): - tx = sign_staking_tx(nodes[i].signer_key, nodes[i].validator_key, stakes[i], next_nonce, base58.b58decode(last_block_hash.encode('utf8'))) + tx = sign_staking_tx(nodes[i].signer_key, nodes[i].validator_key, + stakes[i], next_nonce, + base58.b58decode(last_block_hash.encode('utf8'))) nodes[val_id].send_tx(tx) next_nonce += 1 if update_expected: all_stakes.append(stakes) print("") - print("Sent %s staking txs: %s" % ("REAL" if update_expected else "fake", stakes)) + print("Sent %s staking txs: %s" % + ("REAL" if update_expected else "fake", stakes)) -def doit(seq = []): +def doit(seq=[]): global nodes, all_stakes, sequence sequence = seq config = None - nodes = start_cluster(2, 1, 1, config, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40]], {2: {"tracked_shards": [0]}}) + nodes = start_cluster(2, 1, 1, config, + [["epoch_length", EPOCH_LENGTH], + ["block_producer_kickout_threshold", 40]], + {2: { + "tracked_shards": [0] + }}) started = time.time() last_iter = started @@ -91,7 +108,9 @@ def doit(seq = []): height = status['sync_info']['latest_block_height'] hash_ = status['sync_info']['latest_block_hash'] - if (height + EPOCH_LENGTH - FAKE_OFFSET) // EPOCH_LENGTH > (last_fake_stakes_height + EPOCH_LENGTH - FAKE_OFFSET) // EPOCH_LENGTH: + if (height + EPOCH_LENGTH - FAKE_OFFSET) // EPOCH_LENGTH > ( + last_fake_stakes_height + EPOCH_LENGTH - + FAKE_OFFSET) // EPOCH_LENGTH: last_iter = time.time() cur_stakes = get_stakes() print("Current stakes: %s" % cur_stakes) @@ -108,10 +127,12 @@ def doit(seq = []): do_moar_stakes(hash_, False) last_fake_stakes_height = height - if (height + EPOCH_LENGTH - REAL_OFFSET) // EPOCH_LENGTH > (last_staked_height + EPOCH_LENGTH - REAL_OFFSET) // EPOCH_LENGTH: + if (height + EPOCH_LENGTH - REAL_OFFSET) // EPOCH_LENGTH > ( + last_staked_height + EPOCH_LENGTH - + REAL_OFFSET) // EPOCH_LENGTH: do_moar_stakes(hash_, True) last_staked_height = height + if __name__ == "__main__": doit() - diff --git a/pytest/tests/sanity/staking_repro1.py b/pytest/tests/sanity/staking_repro1.py index 0e01ec4e501..3706d783954 100644 --- a/pytest/tests/sanity/staking_repro1.py +++ b/pytest/tests/sanity/staking_repro1.py @@ -9,28 +9,28 @@ [0, 95711839000000000000000000000000, 75774073000000000000000000000000], [0, 84382393000000000000000000000000, 98749818000000000000000000000000], [0, 84382393000000000000000000000000, 98749818000000000000000000000000], - [0, 0, 90224511000000000000000000000000] , - [0, 0, 90224511000000000000000000000000] , + [0, 0, 90224511000000000000000000000000], + [0, 0, 90224511000000000000000000000000], [86432337000000000000000000000000, 76422536000000000000000000000000, 0], [86432337000000000000000000000000, 76422536000000000000000000000000, 0], [75063341000000000000000000000000, 0, 80786582000000000000000000000000], [75063341000000000000000000000000, 0, 80786582000000000000000000000000], [86164122000000000000000000000000, 0, 73964803000000000000000000000000], [86164122000000000000000000000000, 0, 73964803000000000000000000000000], - [70076530000000000000000000000000, 0, 0] , - [70076530000000000000000000000000, 0, 0] , - [0, 78553537000000000000000000000000, 0] , - [0, 78553537000000000000000000000000, 0] , + [70076530000000000000000000000000, 0, 0], + [70076530000000000000000000000000, 0, 0], + [0, 78553537000000000000000000000000, 0], + [0, 78553537000000000000000000000000, 0], [85364825000000000000000000000000, 0, 81322978000000000000000000000000], [85364825000000000000000000000000, 0, 81322978000000000000000000000000], - [98064272000000000000000000000000, 0, 0] , - [98064272000000000000000000000000, 0, 0] , - [0, 0, 78616653000000000000000000000000] , - [0, 0, 78616653000000000000000000000000] , + [98064272000000000000000000000000, 0, 0], + [98064272000000000000000000000000, 0, 0], + [0, 0, 78616653000000000000000000000000], + [0, 0, 78616653000000000000000000000000], [99017960000000000000000000000000, 78066788000000000000000000000000, 0], [99017960000000000000000000000000, 78066788000000000000000000000000, 0], - [83587257000000000000000000000000, 0, 0] , - [83587257000000000000000000000000, 0, 0] , + [83587257000000000000000000000000, 0, 0], + [83587257000000000000000000000000, 0, 0], [81625375000000000000000000000000, 0, 90569985000000000000000000000000], [81625375000000000000000000000000, 0, 90569985000000000000000000000000], [76171853000000000000000000000000, 70811780000000000000000000000000, 0], @@ -38,4 +38,3 @@ ] doit(sequence) - diff --git a/pytest/tests/sanity/staking_repro2.py b/pytest/tests/sanity/staking_repro2.py index f4ae2f84a74..9172397263f 100644 --- a/pytest/tests/sanity/staking_repro2.py +++ b/pytest/tests/sanity/staking_repro2.py @@ -11,4 +11,3 @@ ] doit(sequence) - diff --git a/pytest/tests/sanity/state_migration.py b/pytest/tests/sanity/state_migration.py index 46d84fbcb94..ed15c974e72 100755 --- a/pytest/tests/sanity/state_migration.py +++ b/pytest/tests/sanity/state_migration.py @@ -1,5 +1,4 @@ #!/usr/bin/env python - """ Spins up stable node, runs it for a few blocks and stops it. Dump state via the stable state-viewer. @@ -37,13 +36,24 @@ def main(): shutil.rmtree(node_root) subprocess.check_output('mkdir -p /tmp/near', shell=True) - near_root, (stable_branch, current_branch) = branches.prepare_ab_test("beta") + near_root, (stable_branch, + current_branch) = branches.prepare_ab_test("beta") # Run stable node for few blocks. - subprocess.call(["%snear-%s" % (near_root, stable_branch), "--home=%s/test0" % node_root, "init", "--fast"]) - stable_protocol_version = json.load(open('%s/test0/genesis.json' % node_root))['protocol_version'] - config = {"local": True, 'near_root': near_root, 'binary_name': "near-%s" % stable_branch } - stable_node = cluster.spin_up_node(config, near_root, os.path.join(node_root, "test0"), 0, None, None) + subprocess.call([ + "%snear-%s" % (near_root, stable_branch), + "--home=%s/test0" % node_root, "init", "--fast" + ]) + stable_protocol_version = json.load( + open('%s/test0/genesis.json' % node_root))['protocol_version'] + config = { + "local": True, + 'near_root': near_root, + 'binary_name': "near-%s" % stable_branch + } + stable_node = cluster.spin_up_node(config, near_root, + os.path.join(node_root, "test0"), 0, + None, None) wait_for_blocks_or_timeout(stable_node, 20, 100) # TODO: we should make state more interesting to migrate by sending some tx / contracts. @@ -51,26 +61,41 @@ def main(): os.mkdir('%s/test0' % node_root) # Dump state. - subprocess.call(["%sstate-viewer-%s" % (near_root, stable_branch), "--home", '%s/test0_finished' % node_root, "dump_state"]) + subprocess.call([ + "%sstate-viewer-%s" % (near_root, stable_branch), "--home", + '%s/test0_finished' % node_root, "dump_state" + ]) # Migrate. migrations_home = '../scripts/migrations' - all_migrations = sorted(os.listdir(migrations_home), key=lambda x: int(x.split('-')[0])) + all_migrations = sorted(os.listdir(migrations_home), + key=lambda x: int(x.split('-')[0])) for fname in all_migrations: m = re.match('([0-9]+)\-.*', fname) if m: version = int(m.groups()[0]) if version > stable_protocol_version: - exitcode = subprocess.call(['python', os.path.join(migrations_home, fname), '%s/test0_finished' % node_root, '%s/test0_finished' % node_root]) + exitcode = subprocess.call([ + 'python', + os.path.join(migrations_home, fname), + '%s/test0_finished' % node_root, + '%s/test0_finished' % node_root + ]) assert exitcode == 0, "Failed to run migration %d" % version - os.rename(os.path.join(node_root, 'test0_finished/output.json'), os.path.join(node_root, 'test0/genesis.json')) - shutil.copy(os.path.join(node_root, 'test0_finished/config.json'), os.path.join(node_root, 'test0/')) - shutil.copy(os.path.join(node_root, 'test0_finished/validator_key.json'), os.path.join(node_root, 'test0/')) - shutil.copy(os.path.join(node_root, 'test0_finished/node_key.json'), os.path.join(node_root, 'test0/')) + os.rename(os.path.join(node_root, 'test0_finished/output.json'), + os.path.join(node_root, 'test0/genesis.json')) + shutil.copy(os.path.join(node_root, 'test0_finished/config.json'), + os.path.join(node_root, 'test0/')) + shutil.copy(os.path.join(node_root, 'test0_finished/validator_key.json'), + os.path.join(node_root, 'test0/')) + shutil.copy(os.path.join(node_root, 'test0_finished/node_key.json'), + os.path.join(node_root, 'test0/')) # Run new node and verify it runs for a few more blocks. config["binary_name"] = "near-%s" % current_branch - current_node = cluster.spin_up_node(config, near_root, os.path.join(node_root, "test0"), 0, None, None) + current_node = cluster.spin_up_node(config, near_root, + os.path.join(node_root, "test0"), 0, + None, None) wait_for_blocks_or_timeout(current_node, 20, 100) diff --git a/pytest/tests/sanity/state_sync.py b/pytest/tests/sanity/state_sync.py index 4cd42f5c0eb..3e91c68e44e 100644 --- a/pytest/tests/sanity/state_sync.py +++ b/pytest/tests/sanity/state_sync.py @@ -27,12 +27,18 @@ TIMEOUT = 150 + START_AT_BLOCK * 10 config = load_config() -near_root, node_dirs = init_cluster(2, 1, 1, config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {2: {"tracked_shards": [0]}}) +near_root, node_dirs = init_cluster( + 2, 1, 1, config, + [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 80]], {2: { + "tracked_shards": [0] + }}) started = time.time() boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None) -node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr()) +node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, + boot_node.addr()) ctx = TxContext([0, 0], [boot_node, node1]) @@ -46,7 +52,7 @@ hash_ = status['sync_info']['latest_block_hash'] if new_height > observed_height: observed_height = new_height - print("Boot node got to height %s" % new_height); + print("Boot node got to height %s" % new_height) if mode == 'onetx' and not sent_txs: ctx.send_moar_txs(hash_, 3, False) @@ -61,7 +67,8 @@ if mode == 'onetx': assert ctx.get_balances() == ctx.expected_balances -node2 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk, boot_node.addr()) +node2 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk, + boot_node.addr()) tracker = LogTracker(node2) time.sleep(3) @@ -72,7 +79,7 @@ new_height = status['sync_info']['latest_block_height'] if new_height > catch_up_height: catch_up_height = new_height - print("Second node got to height %s" % new_height); + print("Second node got to height %s" % new_height) status = boot_node.get_status() boot_height = status['sync_info']['latest_block_height'] @@ -85,9 +92,11 @@ boot_heights = boot_node.get_all_heights() -assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, boot_heights) +assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, + boot_heights) -tracker.reset() # the transition might have happened before we initialized the tracker +tracker.reset( +) # the transition might have happened before we initialized the tracker if catch_up_height >= 100: assert tracker.check("transition to State Sync") elif catch_up_height <= 30: @@ -96,7 +105,9 @@ if mode == 'manytx': while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT - print("Waiting for the old node to catch up. Current balances: %s; Expected balances: %s" % (ctx.get_balances(), ctx.expected_balances)) + print( + "Waiting for the old node to catch up. Current balances: %s; Expected balances: %s" + % (ctx.get_balances(), ctx.expected_balances)) time.sleep(1) # requery the balances from the newly started node @@ -105,5 +116,7 @@ while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT - print("Waiting for the new node to catch up. Current balances: %s; Expected balances: %s" % (ctx.get_balances(), ctx.expected_balances)) + print( + "Waiting for the new node to catch up. Current balances: %s; Expected balances: %s" + % (ctx.get_balances(), ctx.expected_balances)) time.sleep(1) diff --git a/pytest/tests/sanity/state_sync1.py b/pytest/tests/sanity/state_sync1.py index 82286d91d82..67b692a92ab 100644 --- a/pytest/tests/sanity/state_sync1.py +++ b/pytest/tests/sanity/state_sync1.py @@ -6,14 +6,24 @@ sys.path.append('lib') - from cluster import start_cluster BLOCK_WAIT = 40 EPOCH_LENGTH = 80 -consensus_config = {"consensus": {"block_fetch_horizon": 10, "block_header_fetch_horizon": 10}} -nodes = start_cluster(4, 0, 1, None, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], ["chunk_producer_kickout_threshold", 10]], {0: consensus_config, 1: consensus_config}) +consensus_config = { + "consensus": { + "block_fetch_horizon": 10, + "block_header_fetch_horizon": 10 + } +} +nodes = start_cluster( + 4, 0, 1, None, + [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], + ["chunk_producer_kickout_threshold", 10]], { + 0: consensus_config, + 1: consensus_config + }) time.sleep(2) nodes[1].kill() @@ -59,7 +69,8 @@ assert False, "Nodes are not synced" status = nodes[0].get_status() -validator_info = nodes[0].json_rpc('validators', [status['sync_info']['latest_block_hash']]) +validator_info = nodes[0].json_rpc('validators', + [status['sync_info']['latest_block_hash']]) if len(validator_info['result']['next_validators']) < 2: assert False, "Node 1 did not produce enough blocks" diff --git a/pytest/tests/sanity/state_sync2.py b/pytest/tests/sanity/state_sync2.py index 3da47189ef7..2568c2ce2b2 100644 --- a/pytest/tests/sanity/state_sync2.py +++ b/pytest/tests/sanity/state_sync2.py @@ -11,14 +11,18 @@ from cluster import start_cluster from utils import LogTracker -fcntl.fcntl(1, fcntl.F_SETFL, 0) # no cache when execute from nightly runner +fcntl.fcntl(1, fcntl.F_SETFL, 0) # no cache when execute from nightly runner print('start state sync2') print('start state sync2', file=sys.stderr) TIMEOUT = 600 -BLOCKS = 105 # should be enough to trigger state sync for node 1 later, see comments there +BLOCKS = 105 # should be enough to trigger state sync for node 1 later, see comments there -nodes = start_cluster(2, 0, 2, None, [["num_block_producer_seats", 199], ["num_block_producer_seats_per_shard", [99, 100]], ["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {}) +nodes = start_cluster( + 2, 0, 2, None, + [["num_block_producer_seats", 199], + ["num_block_producer_seats_per_shard", [99, 100]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 80]], {}) print('cluster started') started = time.time() @@ -51,4 +55,3 @@ # make sure `nodes[0]` actually state synced assert tracker.check("transition to State Sync") - diff --git a/pytest/tests/sanity/state_sync3.py b/pytest/tests/sanity/state_sync3.py index a62f6c380f7..246386eb88a 100644 --- a/pytest/tests/sanity/state_sync3.py +++ b/pytest/tests/sanity/state_sync3.py @@ -11,9 +11,30 @@ EPOCH_LENGTH = 1000 MAX_SYNC_WAIT = 120 -consensus_config0 = {"consensus": {"min_block_production_delay": {"secs": 0, "nanos": 100000000}}} -consensus_config1 = {"consensus": {"sync_step_period": {"secs": 0, "nanos": 1000}}, "tracked_shards": [0]} -nodes = start_cluster(1, 1, 1, None, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], ["chunk_producer_kickout_threshold", 10]], {0: consensus_config0, 1: consensus_config1}) +consensus_config0 = { + "consensus": { + "min_block_production_delay": { + "secs": 0, + "nanos": 100000000 + } + } +} +consensus_config1 = { + "consensus": { + "sync_step_period": { + "secs": 0, + "nanos": 1000 + } + }, + "tracked_shards": [0] +} +nodes = start_cluster( + 1, 1, 1, None, + [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], + ["chunk_producer_kickout_threshold", 10]], { + 0: consensus_config0, + 1: consensus_config1 + }) time.sleep(2) nodes[1].kill() @@ -45,4 +66,3 @@ elif time.time() - state_sync_done_time > 8: assert node1_height > state_sync_done_height, "No progress after state sync is done" time.sleep(2) - diff --git a/pytest/tests/sanity/state_sync_late.py b/pytest/tests/sanity/state_sync_late.py index 261294b49cf..9cb51bf5d37 100644 --- a/pytest/tests/sanity/state_sync_late.py +++ b/pytest/tests/sanity/state_sync_late.py @@ -27,12 +27,18 @@ TIMEOUT = 150 + START_AT_BLOCK * 10 config = load_config() -near_root, node_dirs = init_cluster(2, 1, 1, config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {2: {"tracked_shards": [0]}}) +near_root, node_dirs = init_cluster( + 2, 1, 1, config, + [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 80]], {2: { + "tracked_shards": [0] + }}) started = time.time() boot_node = spin_up_node(config, near_root, node_dirs[0], 0, None, None) -node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr()) +node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, + boot_node.addr()) ctx = TxContext([0, 0], [boot_node, node1]) @@ -46,7 +52,7 @@ hash_ = status['sync_info']['latest_block_hash'] if new_height > observed_height: observed_height = new_height - print("Boot node got to height %s" % new_height); + print("Boot node got to height %s" % new_height) if mode == 'onetx' and not sent_txs: ctx.send_moar_txs(hash_, 3, False) @@ -61,7 +67,8 @@ if mode == 'onetx': assert ctx.get_balances() == ctx.expected_balances -node2 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk, boot_node.addr()) +node2 = spin_up_node(config, near_root, node_dirs[2], 2, boot_node.node_key.pk, + boot_node.addr()) tracker = LogTracker(node2) time.sleep(3) @@ -72,7 +79,7 @@ new_height = status['sync_info']['latest_block_height'] if new_height > catch_up_height: catch_up_height = new_height - print("Second node got to height %s" % new_height); + print("Second node got to height %s" % new_height) status = boot_node.get_status() boot_height = status['sync_info']['latest_block_height'] @@ -85,9 +92,10 @@ boot_heights = boot_node.get_all_heights() -assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, boot_heights) +assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, + boot_heights) -tracker.offset = 0 # the transition might have happened before we initialized the tracker +tracker.offset = 0 # the transition might have happened before we initialized the tracker if catch_up_height >= 100: assert tracker.check("transition to State Sync") elif catch_up_height <= 30: @@ -96,7 +104,9 @@ if mode == 'manytx': while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT - print("Waiting for the old node to catch up. Current balances: %s; Expected balances: %s" % (ctx.get_balances(), ctx.expected_balances)) + print( + "Waiting for the old node to catch up. Current balances: %s; Expected balances: %s" + % (ctx.get_balances(), ctx.expected_balances)) time.sleep(1) # requery the balances from the newly started node @@ -105,5 +115,7 @@ while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT - print("Waiting for the new node to catch up. Current balances: %s; Expected balances: %s" % (ctx.get_balances(), ctx.expected_balances)) + print( + "Waiting for the new node to catch up. Current balances: %s; Expected balances: %s" + % (ctx.get_balances(), ctx.expected_balances)) time.sleep(1) diff --git a/pytest/tests/sanity/state_sync_routed.py b/pytest/tests/sanity/state_sync_routed.py index 2fafea3d769..eadfb2e946b 100644 --- a/pytest/tests/sanity/state_sync_routed.py +++ b/pytest/tests/sanity/state_sync_routed.py @@ -31,7 +31,12 @@ config = load_config() -near_root, node_dirs = init_cluster(2, 3, 1, config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {4: {"tracked_shards": [0]}}) +near_root, node_dirs = init_cluster( + 2, 3, 1, config, + [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 80]], {4: { + "tracked_shards": [0] + }}) started = time.time() @@ -41,11 +46,14 @@ boot_node = node2 # Second observer -node3 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk, boot_node.addr()) +node3 = spin_up_node(config, near_root, node_dirs[3], 3, boot_node.node_key.pk, + boot_node.addr()) # Spin up validators -node0 = spin_up_node(config, near_root, node_dirs[0], 0, boot_node.node_key.pk, boot_node.addr(), [4]) -node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, boot_node.addr(), [4]) +node0 = spin_up_node(config, near_root, node_dirs[0], 0, boot_node.node_key.pk, + boot_node.addr(), [4]) +node1 = spin_up_node(config, near_root, node_dirs[1], 1, boot_node.node_key.pk, + boot_node.addr(), [4]) ctx = TxContext([0, 0], [node0, node1]) @@ -59,7 +67,7 @@ hash_ = status['sync_info']['latest_block_hash'] if new_height > observed_height: observed_height = new_height - print("Boot node got to height %s" % new_height); + print("Boot node got to height %s" % new_height) if mode == 'onetx' and not sent_txs: ctx.send_moar_txs(hash_, 3, False) @@ -74,7 +82,8 @@ if mode == 'onetx': assert ctx.get_balances() == ctx.expected_balances -node4 = spin_up_node(config, near_root, node_dirs[4], 4, boot_node.node_key.pk, boot_node.addr(), [0, 1]) +node4 = spin_up_node(config, near_root, node_dirs[4], 4, boot_node.node_key.pk, + boot_node.addr(), [0, 1]) tracker4 = LogTracker(node4) time.sleep(3) @@ -86,7 +95,7 @@ print("Latest block at:", new_height) if new_height > catch_up_height: catch_up_height = new_height - print("Last observer got to height %s" % new_height); + print("Last observer got to height %s" % new_height) status = boot_node.get_status() boot_height = status['sync_info']['latest_block_height'] @@ -99,16 +108,19 @@ boot_heights = boot_node.get_all_heights() -assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, boot_heights) +assert catch_up_height in boot_heights, "%s not in %s" % (catch_up_height, + boot_heights) -tracker4.reset() # the transition might have happened before we initialized the tracker +tracker4.reset( +) # the transition might have happened before we initialized the tracker if catch_up_height >= 100: assert tracker4.check("transition to State Sync") elif catch_up_height <= 30: assert not tracker4.check("transition to State Sync") while True: - assert time.time() - started < TIMEOUT, "Waiting for node 4 to connect to two peers" + assert time.time( + ) - started < TIMEOUT, "Waiting for node 4 to connect to two peers" tracker4.reset() if tracker4.count("Consolidated connection with FullPeerInfo") == 2: break @@ -121,7 +133,9 @@ if mode == 'manytx': while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT - print("Waiting for the old node to catch up. Current balances: %s; Expected balances: %s" % (ctx.get_balances(), ctx.expected_balances)) + print( + "Waiting for the old node to catch up. Current balances: %s; Expected balances: %s" + % (ctx.get_balances(), ctx.expected_balances)) time.sleep(1) # requery the balances from the newly started node @@ -130,5 +144,7 @@ while ctx.get_balances() != ctx.expected_balances: assert time.time() - started < TIMEOUT - print("Waiting for the new node to catch up. Current balances: %s; Expected balances: %s" % (ctx.get_balances(), ctx.expected_balances)) + print( + "Waiting for the new node to catch up. Current balances: %s; Expected balances: %s" + % (ctx.get_balances(), ctx.expected_balances)) time.sleep(1) diff --git a/pytest/tests/sanity/transactions.py b/pytest/tests/sanity/transactions.py index a5f737669c2..cf1477f9d5d 100644 --- a/pytest/tests/sanity/transactions.py +++ b/pytest/tests/sanity/transactions.py @@ -8,14 +8,16 @@ sys.path.append('lib') - from cluster import start_cluster from utils import TxContext from transaction import sign_payment_tx TIMEOUT = 240 -nodes = start_cluster(4, 0, 4, None, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {}) +nodes = start_cluster( + 4, 0, 4, None, + [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", 10], + ["block_producer_kickout_threshold", 70]], {}) started = time.time() @@ -38,7 +40,8 @@ if step == 0: print(f'step {step}') if height >= 1: - tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1, base58.b58decode(hash_.encode('utf8'))) + tx = sign_payment_tx(nodes[0].signer_key, 'test1', 100, 1, + base58.b58decode(hash_.encode('utf8'))) nodes[3].send_tx(tx) ctx.expected_balances[0] -= 100 ctx.expected_balances[1] += 100 @@ -51,21 +54,24 @@ if height == sent_height + 6: cur_balances = ctx.get_balances() - assert cur_balances == ctx.expected_balances, "%s != %s" % (cur_balances, ctx.expected_balances) + assert cur_balances == ctx.expected_balances, "%s != %s" % ( + cur_balances, ctx.expected_balances) step = 2 else: # we are done with the sanity test, now let's stress it if ctx.get_balances() == ctx.expected_balances: - print("Balances caught up, took %s blocks, moving on" % (height - sent_height)); + print("Balances caught up, took %s blocks, moving on" % + (height - sent_height)) last_balances = [x for x in ctx.expected_balances] ctx.send_moar_txs(hash_, 10, use_routing=True) sent_height = height else: if height > sent_height + 10: - assert False, "Balances before: %s\nExpected balances: %s\nCurrent balances: %s\nSent at height: %s, cur height: %s\n" % (last_balances, ctx.expected_balances, ctx.get_balances(), sent_height, height) + assert False, "Balances before: %s\nExpected balances: %s\nCurrent balances: %s\nSent at height: %s, cur height: %s\n" % ( + last_balances, ctx.expected_balances, ctx.get_balances(), + sent_height, height) time.sleep(0.2) if height >= 100: break - diff --git a/pytest/tests/sanity/validator_switch.py b/pytest/tests/sanity/validator_switch.py index 9b2234d5dcb..b194a59717b 100644 --- a/pytest/tests/sanity/validator_switch.py +++ b/pytest/tests/sanity/validator_switch.py @@ -13,7 +13,14 @@ EPOCH_LENGTH = 20 tracked_shards = {"tracked_shards": [0, 1, 2, 3]} -nodes = start_cluster(3, 1, 4, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], ["chunk_producer_kickout_threshold", 10]], {0: tracked_shards, 1: tracked_shards}) +nodes = start_cluster(3, 1, 4, { + 'local': True, + 'near_root': '../target/debug/' +}, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 10], + ["chunk_producer_kickout_threshold", 10]], { + 0: tracked_shards, + 1: tracked_shards + }) time.sleep(3) @@ -22,7 +29,8 @@ for i in range(4): stake = 100000000000000000000000000 if i == 3 else 0 - tx = sign_staking_tx(nodes[i].signer_key, nodes[i].validator_key, stake, 1, base58.b58decode(hash_.encode('utf8'))) + tx = sign_staking_tx(nodes[i].signer_key, nodes[i].validator_key, stake, 1, + base58.b58decode(hash_.encode('utf8'))) nodes[0].send_tx(tx) print("test%s stakes %d" % (i, stake)) @@ -32,9 +40,13 @@ cur_height = status['sync_info']['latest_block_height'] if cur_height > EPOCH_LENGTH + 1: status = nodes[0].get_status() - validator_info = nodes[0].json_rpc('validators', [status['sync_info']['latest_block_hash']]) - assert len(validator_info['result']['next_validators']) == 1, "Number of validators do not match" - assert validator_info['result']['next_validators'][0]['account_id'] == "test3" + validator_info = nodes[0].json_rpc( + 'validators', [status['sync_info']['latest_block_hash']]) + assert len( + validator_info['result'] + ['next_validators']) == 1, "Number of validators do not match" + assert validator_info['result']['next_validators'][0][ + 'account_id'] == "test3" time.sleep(1) synced = False @@ -42,7 +54,8 @@ statuses = [] for i, node in enumerate(nodes): cur_status = node.get_status() - statuses.append((i, cur_status['sync_info']['latest_block_height'], cur_status['sync_info']['latest_block_hash'])) + statuses.append((i, cur_status['sync_info']['latest_block_height'], + cur_status['sync_info']['latest_block_hash'])) statuses.sort(key=lambda x: x[1]) last = statuses[-1] cur_height = last[1] @@ -60,4 +73,3 @@ if not synced: assert False, "Nodes are not synced" - diff --git a/pytest/tests/stress/network_stress.py b/pytest/tests/stress/network_stress.py index 73f917b8124..888e274f99c 100644 --- a/pytest/tests/stress/network_stress.py +++ b/pytest/tests/stress/network_stress.py @@ -7,6 +7,7 @@ TIMEOUT = 300 + @stress_process def monkey_transactions_noval(stopped, error, nodes, nonces): while stopped.value == 0: @@ -22,7 +23,9 @@ def monkey_transactions_noval(stopped, error, nodes, nonces): hash_, _ = get_recent_hash(nodes[-1]) with nonce_lock: - tx = sign_payment_tx(nodes[from_].signer_key, 'test%s' % to, amt, nonce_val.value, base58.b58decode(hash_.encode('utf8'))) + tx = sign_payment_tx(nodes[from_].signer_key, 'test%s' % to, amt, + nonce_val.value, + base58.b58decode(hash_.encode('utf8'))) for validator_id in validator_ids: try: tx_hash = nodes[validator_id].send_tx(tx)['result'] @@ -33,6 +36,7 @@ def monkey_transactions_noval(stopped, error, nodes, nonces): time.sleep(0.1) + @stress_process def monkey_network_hammering(stopped, error, nodes, nonces): s = [False for x in nodes] @@ -53,9 +57,10 @@ def monkey_network_hammering(stopped, error, nodes, nonces): pid = nodes[i].pid.value print("Resuming network for process %s" % pid) resume_network(pid) - + expect_network_issues() init_network_pillager() -doit(3, 3, 3, 0, [monkey_network_hammering, monkey_transactions_noval, monkey_staking], TIMEOUT) - +doit(3, 3, 3, 0, + [monkey_network_hammering, monkey_transactions_noval, monkey_staking], + TIMEOUT) diff --git a/pytest/tests/stress/stress.py b/pytest/tests/stress/stress.py index b09839c4e34..8af37b24d49 100644 --- a/pytest/tests/stress/stress.py +++ b/pytest/tests/stress/stress.py @@ -35,13 +35,13 @@ sys.stdout = Unbuffered(sys.stdout) -TIMEOUT = 1500 # after how much time to shut down the test -TIMEOUT_SHUTDOWN = 120 # time to wait after the shutdown was initiated before +TIMEOUT = 1500 # after how much time to shut down the test +TIMEOUT_SHUTDOWN = 120 # time to wait after the shutdown was initiated before MAX_STAKE = int(1e26) EPOCH_LENGTH = 20 -block_timeout = 20 # if two blocks are not produced within that many seconds, the test will fail. The timeout is increased if nodes are restarted or network is being messed up with -balances_timeout = 15 # how long to tolerate for balances to update after txs are sent +block_timeout = 20 # if two blocks are not produced within that many seconds, the test will fail. The timeout is increased if nodes are restarted or network is being messed up with +balances_timeout = 15 # how long to tolerate for balances to update after txs are sent tx_tolerance = 0.1 assert balances_timeout * 2 <= TIMEOUT_SHUTDOWN @@ -49,20 +49,25 @@ network_issues_expected = False + def expect_network_issues(): global network_issues_expected network_issues_expected = True + def stress_process(func): + def wrapper(stopped, error, *args): try: func(stopped, error, *args) except: traceback.print_exc() error.value = 1 + wrapper.__name__ = func.__name__ return wrapper + def get_recent_hash(node): # return the parent of the last block known to the observer # don't return the last block itself, since some validators might have not seen it yet @@ -76,10 +81,14 @@ def get_recent_hash(node): def get_validator_ids(nodes): # the [4:] part is a hack to convert test7 => 7 - return set([int(x['account_id'][4:]) for x in nodes[-1].get_status()['validators']]) + return set([ + int(x['account_id'][4:]) for x in nodes[-1].get_status()['validators'] + ]) + @stress_process def monkey_node_set(stopped, error, nodes, nonces): + def get_future_time(): if random.choice([True, False]): return time.time() + random.randint(1, 5) @@ -107,10 +116,12 @@ def get_future_time(): if random.choice([True, False]): wipe = True #node.reset_data() - print("Node set: stopping%s node %s" % (" and wiping" if wipe else "", i)) + print("Node set: stopping%s node %s" % + (" and wiping" if wipe else "", i)) nodes_stopped[i] = not nodes_stopped[i] change_status_at[i] = get_future_time() + @stress_process def monkey_node_restart(stopped, error, nodes, nonces): heights_after_restart = [0 for _ in nodes] @@ -125,7 +136,8 @@ def monkey_node_restart(stopped, error, nodes, nonces): # don't kill the same node too frequently, give it time to reboot and produce something while True: _, h = get_recent_hash(node) - assert h >= heights_after_restart[node_idx], "%s > %s" % (h, heights_after_restart[node_idx]) + assert h >= heights_after_restart[node_idx], "%s > %s" % ( + h, heights_after_restart[node_idx]) if h > heights_after_restart[node_idx]: break time.sleep(1) @@ -139,6 +151,7 @@ def monkey_node_restart(stopped, error, nodes, nonces): time.sleep(5) + @stress_process def monkey_local_network(stopped, error, nodes, nonces): while stopped.value == 0: @@ -152,12 +165,15 @@ def monkey_local_network(stopped, error, nodes, nonces): nodes[node_idx].resume_network() time.sleep(5) + @stress_process def monkey_global_network(): pass + @stress_process def monkey_transactions(stopped, error, nodes, nonces): + def get_balances(): acts = [ nodes[-1].get_account("test%s" % i)['result'] @@ -171,7 +187,7 @@ def get_balances(): print("TOTAL SUPPLY: %s" % total_supply) last_iter_switch = time.time() - mode = 0 # 0 = send more tx, 1 = wait for balances + mode = 0 # 0 = send more tx, 1 = wait for balances tx_count = 0 last_tx_set = [] @@ -185,8 +201,11 @@ def get_balances(): print("%s TRANSACTIONS SENT. WAITING FOR BALANCES" % tx_count) mode = 1 else: - print("BALANCES NEVER CAUGHT UP, CHECKING UNFINISHED TRANSACTIONS") + print( + "BALANCES NEVER CAUGHT UP, CHECKING UNFINISHED TRANSACTIONS" + ) snapshot_expected_balances = [x for x in expected_balances] + def revert_txs(): nonlocal expected_balances good = 0 @@ -194,13 +213,18 @@ def revert_txs(): for tx in last_tx_set: tx_happened = True try: - response = nodes[-1].json_rpc('tx', [tx[3], "test%s" % tx[1]], timeout=1) + response = nodes[-1].json_rpc( + 'tx', [tx[3], "test%s" % tx[1]], timeout=1) # due to #2195 if the tx was dropped, the query today times out. - if 'error' in response and 'data' in response['error'] and response['error']['data'] == 'Timeout': + if 'error' in response and 'data' in response[ + 'error'] and response['error'][ + 'data'] == 'Timeout': tx_happened = False - elif 'result' in response and 'receipts_outcome' in response['result']: - tx_happened = len(response['result']['receipts_outcome']) > 0 + elif 'result' in response and 'receipts_outcome' in response[ + 'result']: + tx_happened = len( + response['result']['receipts_outcome']) > 0 else: assert False, response # This exception handler is also due to #2195 @@ -216,10 +240,12 @@ def revert_txs(): else: good += 1 return (good, bad) + good, bad = revert_txs() if expected_balances == get_balances(): # reverting helped - print("REVERTING HELPED, TX EXECUTED: %s, TX LOST: %s" % (good, bad)) + print("REVERTING HELPED, TX EXECUTED: %s, TX LOST: %s" % + (good, bad)) bad_ratio = bad / (good + bad) if bad_ratio > rolling_tolerance: rolling_tolerance -= bad_ratio - rolling_tolerance @@ -234,14 +260,21 @@ def revert_txs(): last_tx_set = [] else: # still no match, fail - print("REVERTING DIDN'T HELP, TX EXECUTED: %s, TX LOST: %s" % (good, bad)) - for step in range(10): # trace balances for 20 seconds to see if they are catching up + print( + "REVERTING DIDN'T HELP, TX EXECUTED: %s, TX LOST: %s" % + (good, bad)) + for step in range( + 10 + ): # trace balances for 20 seconds to see if they are catching up print(get_balances()) time.sleep(2) expected_balances = snapshot_expected_balances good, bad = revert_txs() - print("The latest and greatest stats on successful/failed: %s/%s" % (good, bad)) - assert False, "Balances didn't update in time. Expected: %s, received: %s" % (expected_balances, get_balances()) + print( + "The latest and greatest stats on successful/failed: %s/%s" + % (good, bad)) + assert False, "Balances didn't update in time. Expected: %s, received: %s" % ( + expected_balances, get_balances()) last_iter_switch = time.time() if mode == 0: @@ -259,12 +292,16 @@ def revert_txs(): hash_, _ = get_recent_hash(nodes[-1]) with nonce_lock: - tx = sign_payment_tx(nodes[from_].signer_key, 'test%s' % to, amt, nonce_val.value, base58.b58decode(hash_.encode('utf8'))) + tx = sign_payment_tx(nodes[from_].signer_key, 'test%s' % to, + amt, nonce_val.value, + base58.b58decode(hash_.encode('utf8'))) for validator_id in validator_ids: try: tx_hash = nodes[validator_id].send_tx(tx)['result'] - except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): - if not network_issues_expected and not nodes[validator_id].mess_with: + except (requests.exceptions.ReadTimeout, + requests.exceptions.ConnectionError): + if not network_issues_expected and not nodes[ + validator_id].mess_with: raise last_tx_set.append((tx, from_, to, tx_hash, amt)) @@ -285,13 +322,17 @@ def revert_txs(): rolling_tolerance = tx_tolerance last_tx_set = [] - if mode == 1: time.sleep(1) - elif mode == 0: time.sleep(0.1) + if mode == 1: + time.sleep(1) + elif mode == 0: + time.sleep(0.1) + def get_the_guy_to_mess_up_with(nodes): _, height = get_recent_hash(nodes[-1]) return (height // EPOCH_LENGTH) % (len(nodes) - 1) + @stress_process def monkey_staking(stopped, error, nodes, nonces): while stopped.value == 0: @@ -305,17 +346,23 @@ def monkey_staking(stopped, error, nodes, nonces): nonce_val, nonce_lock = nonces[whom] with nonce_lock: - stake = random.randint(0.7 * MAX_STAKE // 1000000, MAX_STAKE // 1000000) * 1000000 + stake = random.randint(0.7 * MAX_STAKE // 1000000, + MAX_STAKE // 1000000) * 1000000 if whom == who_can_unstake: stake = 0 - tx = sign_staking_tx(nodes[whom].signer_key, nodes[whom].validator_key, stake, nonce_val.value, base58.b58decode(hash_.encode('utf8'))) + tx = sign_staking_tx(nodes[whom].signer_key, + nodes[whom].validator_key, stake, + nonce_val.value, + base58.b58decode(hash_.encode('utf8'))) for validator_id in validator_ids: try: nodes[validator_id].send_tx(tx) - except (requests.exceptions.ReadTimeout, requests.exceptions.ConnectionError): - if not network_issues_expected and not nodes[validator_id].mess_with: + except (requests.exceptions.ReadTimeout, + requests.exceptions.ConnectionError): + if not network_issues_expected and not nodes[ + validator_id].mess_with: raise nonce_val.value = nonce_val.value + 1 @@ -342,7 +389,9 @@ def blocks_tracker(stopped, error, nodes, nonces): status = nodes[val_id].get_status() if status['validators'] != last_validators and val_id == -1: last_validators = status['validators'] - print("VALIDATORS TRACKER: validators set changed, new set: %s" % [x['account_id'] for x in last_validators]) + print( + "VALIDATORS TRACKER: validators set changed, new set: %s" + % [x['account_id'] for x in last_validators]) hash_ = status['sync_info']['latest_block_hash'] height = status['sync_info']['latest_block_height'] largest_per_node[val_id] = height @@ -354,7 +403,9 @@ def blocks_tracker(stopped, error, nodes, nonces): if largest_height >= 20: if not every_ten: every_ten = True - print("BLOCK TRACKER: switching to tracing every ten blocks to reduce spam") + print( + "BLOCK TRACKER: switching to tracing every ten blocks to reduce spam" + ) largest_height = height last_updated = time.time() @@ -367,7 +418,8 @@ def blocks_tracker(stopped, error, nodes, nonces): assert height == confirm_height prev_hash = block_info['result']['header']['prev_hash'] if height in height_to_hash: - assert False, "Two blocks for the same height: %s and %s" % (height_to_hash[height], hash_) + assert False, "Two blocks for the same height: %s and %s" % ( + height_to_hash[height], hash_) height_to_hash[height] = hash_ mapping[hash_] = (prev_hash, height) @@ -427,7 +479,11 @@ def doit(s, n, N, k, monkeys, timeout): # make all the observers track all the shards local_config_changes[i] = {"tracked_shards": list(range(s))} - near_root, node_dirs = init_cluster(N, k + 1, s, config, [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], ["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 70]], local_config_changes) + near_root, node_dirs = init_cluster( + N, k + 1, s, config, + [["min_gas_price", 0], ["max_inflation_rate", [0, 1]], + ["epoch_length", EPOCH_LENGTH], + ["block_producer_kickout_threshold", 70]], local_config_changes) started = time.time() @@ -436,7 +492,8 @@ def doit(s, n, N, k, monkeys, timeout): nodes = [boot_node] for i in range(1, N + k + 1): - node = spin_up_node(config, near_root, node_dirs[i], i, boot_node.node_key.pk, boot_node.addr()) + node = spin_up_node(config, near_root, node_dirs[i], i, + boot_node.node_key.pk, boot_node.addr()) nodes.append(node) if i >= n and i < N: node.kill() @@ -447,7 +504,8 @@ def doit(s, n, N, k, monkeys, timeout): monkey_names = [x.__name__ for x in monkeys] print(monkey_names) if 'monkey_local_network' in monkey_names or 'monkey_global_network' in monkey_names: - print("There are monkeys messing up with network, initializing the infra") + print( + "There are monkeys messing up with network, initializing the infra") if config['local']: init_network_pillager() expect_network_issues() @@ -505,16 +563,20 @@ def check_errors(): if time.time() - started_shutdown > TIMEOUT_SHUTDOWN: for (p, _) in ps: p.terminate() - assert False, "The test didn't gracefully shut down in time\nStill running: %s" % (still_running) + assert False, "The test didn't gracefully shut down in time\nStill running: %s" % ( + still_running) check_errors() -MONKEYS = dict([(name[7:], obj) for name, obj in inspect.getmembers(sys.modules[__name__]) if inspect.isfunction(obj) and name.startswith("monkey_")]) +MONKEYS = dict([(name[7:], obj) + for name, obj in inspect.getmembers(sys.modules[__name__]) + if inspect.isfunction(obj) and name.startswith("monkey_")]) if __name__ == "__main__": if len(sys.argv) < 5: - print("Usage:\npython tests/stress/stress.py s n N k monkey1 monkey2 ...") + print( + "Usage:\npython tests/stress/stress.py s n N k monkey1 monkey2 ...") sys.exit(1) s = int(sys.argv[1]) @@ -528,4 +590,3 @@ def check_errors(): assert monkey in MONKEYS, "Unknown monkey \"%s\"" % monkey doit(s, n, N, k, [globals()["monkey_%s" % x] for x in monkeys], TIMEOUT) - diff --git a/scripts/create_service.py b/scripts/create_service.py index 3ec9437dd8f..0ef7f699295 100755 --- a/scripts/create_service.py +++ b/scripts/create_service.py @@ -20,19 +20,24 @@ default_near_args = 'run' default_near_user = getpass.getuser() -near_user = raw_input("Enter user to run service, default: '{}': ".format(default_near_user)) or default_near_user -near_path = raw_input("Enter the nearcore binary path, default '{}': ".format(default_near_path if default_near_path else "")) or default_near_path -near_args = raw_input("Enter args, default: '{}': ".format(default_near_args)) or default_near_args +near_user = raw_input("Enter user to run service, default: '{}': ".format( + default_near_user)) or default_near_user +near_path = raw_input("Enter the nearcore binary path, default '{}': ".format( + default_near_path if default_near_path else "")) or default_near_path +near_args = raw_input("Enter args, default: '{}': ".format( + default_near_args)) or default_near_args template = open(join(dirname(__file__), './near.service.template')).read() -service_file = template.format(exec_start=near_path + " " + near_args, user=near_user) +service_file = template.format(exec_start=near_path + " " + near_args, + user=near_user) service_file_path = '/etc/systemd/system/near.service' with open(service_file_path, 'w') as f: f.write(service_file) st = os.stat(service_file_path) -os.chmod(service_file_path, st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH) +os.chmod(service_file_path, + st.st_mode | stat.S_IEXEC | stat.S_IXGRP | stat.S_IXOTH) print(""" Service created at {}. To start: sudo systemctl start near diff --git a/scripts/migrations/10-gas-price-fix.py b/scripts/migrations/10-gas-price-fix.py index 6bd7c6ffaa9..e97cc740cb8 100644 --- a/scripts/migrations/10-gas-price-fix.py +++ b/scripts/migrations/10-gas-price-fix.py @@ -11,7 +11,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 9 diff --git a/scripts/migrations/11-runtime-cost-adjustment.py b/scripts/migrations/11-runtime-cost-adjustment.py index c7df7563f7f..dd882d4946f 100644 --- a/scripts/migrations/11-runtime-cost-adjustment.py +++ b/scripts/migrations/11-runtime-cost-adjustment.py @@ -11,7 +11,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 10 @@ -21,138 +22,135 @@ "send_sir": 108059500000, "send_not_sir": 108059500000, "execution": 108059500000 - }, - "data_receipt_creation_config": { + }, + "data_receipt_creation_config": { "base_cost": { - "send_sir": 4697339419375, - "send_not_sir": 4697339419375, - "execution": 4697339419375 + "send_sir": 4697339419375, + "send_not_sir": 4697339419375, + "execution": 4697339419375 }, "cost_per_byte": { - "send_sir": 59357464, - "send_not_sir": 59357464, - "execution": 59357464 + "send_sir": 59357464, + "send_not_sir": 59357464, + "execution": 59357464 } - }, - "action_creation_config": { + }, + "action_creation_config": { "create_account_cost": { - "send_sir": 99607375000, - "send_not_sir": 99607375000, - "execution": 99607375000 + "send_sir": 99607375000, + "send_not_sir": 99607375000, + "execution": 99607375000 }, "deploy_contract_cost": { - "send_sir": 184765750000, - "send_not_sir": 184765750000, - "execution": 184765750000 + "send_sir": 184765750000, + "send_not_sir": 184765750000, + "execution": 184765750000 }, "deploy_contract_cost_per_byte": { - "send_sir": 6812999, - "send_not_sir": 6812999, - "execution": 6812999 + "send_sir": 6812999, + "send_not_sir": 6812999, + "execution": 6812999 }, "function_call_cost": { - "send_sir": 2319861500000, - "send_not_sir": 2319861500000, - "execution": 2319861500000 + "send_sir": 2319861500000, + "send_not_sir": 2319861500000, + "execution": 2319861500000 }, "function_call_cost_per_byte": { - "send_sir": 2235934, - "send_not_sir": 2235934, - "execution": 2235934 + "send_sir": 2235934, + "send_not_sir": 2235934, + "execution": 2235934 }, "transfer_cost": { - "send_sir": 115123062500, - "send_not_sir": 115123062500, - "execution": 115123062500 + "send_sir": 115123062500, + "send_not_sir": 115123062500, + "execution": 115123062500 }, "stake_cost": { - "send_sir": 141715687500, - "send_not_sir": 141715687500, - "execution": 102217625000 + "send_sir": 141715687500, + "send_not_sir": 141715687500, + "execution": 102217625000 }, "add_key_cost": { - "full_access_cost": { - "send_sir": 101765125000, - "send_not_sir": 101765125000, - "execution": 101765125000 - }, - "function_call_cost": { - "send_sir": 102217625000, - "send_not_sir": 102217625000, - "execution": 102217625000 - }, - "function_call_cost_per_byte": { - "send_sir": 1925331, - "send_not_sir": 1925331, - "execution": 1925331 - } + "full_access_cost": { + "send_sir": 101765125000, + "send_not_sir": 101765125000, + "execution": 101765125000 + }, + "function_call_cost": { + "send_sir": 102217625000, + "send_not_sir": 102217625000, + "execution": 102217625000 + }, + "function_call_cost_per_byte": { + "send_sir": 1925331, + "send_not_sir": 1925331, + "execution": 1925331 + } }, "delete_key_cost": { - "send_sir": 94946625000, - "send_not_sir": 94946625000, - "execution": 94946625000 + "send_sir": 94946625000, + "send_not_sir": 94946625000, + "execution": 94946625000 }, "delete_account_cost": { - "send_sir": 147489000000, - "send_not_sir": 147489000000, - "execution": 147489000000 + "send_sir": 147489000000, + "send_not_sir": 147489000000, + "execution": 147489000000 } - }, - "storage_usage_config": { + }, + "storage_usage_config": { "num_bytes_account": 100, "num_extra_bytes_record": 40 - }, - "burnt_gas_reward": [ - 3, - 10 - ] + }, + "burnt_gas_reward": [3, 10] } config['runtime_config']['wasm_config']['ext_costs'] = { - "base": 265261758, - "read_memory_base": 2584050225, - "read_memory_byte": 3801396, - "write_memory_base": 2780731725, - "write_memory_byte": 2723859, - "read_register_base": 2493624561, - "read_register_byte": 98622, - "write_register_base": 2840975211, - "write_register_byte": 3801645, - "utf8_decoding_base": 3110963061, - "utf8_decoding_byte": 289342653, - "utf16_decoding_base": 3593689800, - "utf16_decoding_byte": 167519322, - "sha256_base": 4530046500, - "sha256_byte": 24116301, - "keccak256_base": 5867223186, - "keccak256_byte": 21469644, - "keccak512_base": 5798128650, - "keccak512_byte": 36651981, - "log_base": 2408221236, - "log_byte": 15863835, - "storage_write_base": 45187219125, - "storage_write_key_byte": 66445653, - "storage_write_value_byte": 29682120, - "storage_write_evicted_byte": 28939782, - "storage_read_base": 32029296375, - "storage_read_key_byte": 28463997, - "storage_read_value_byte": 3289884, - "storage_remove_base": 35876668875, - "storage_remove_key_byte": 35342424, - "storage_remove_ret_value_byte": 7303842, - "storage_has_key_base": 31315025250, - "storage_has_key_byte": 28376217, - "storage_iter_create_prefix_base": 0, - "storage_iter_create_prefix_byte": 0, - "storage_iter_create_range_base": 0, - "storage_iter_create_from_byte": 0, - "storage_iter_create_to_byte": 0, - "storage_iter_next_base": 0, - "storage_iter_next_key_byte": 0, - "storage_iter_next_value_byte": 0, - "touching_trie_node": 5764118571, - "promise_and_base": 1473816795, - "promise_and_per_promise": 5613432, - "promise_return": 558292404 -}; + "base": 265261758, + "read_memory_base": 2584050225, + "read_memory_byte": 3801396, + "write_memory_base": 2780731725, + "write_memory_byte": 2723859, + "read_register_base": 2493624561, + "read_register_byte": 98622, + "write_register_base": 2840975211, + "write_register_byte": 3801645, + "utf8_decoding_base": 3110963061, + "utf8_decoding_byte": 289342653, + "utf16_decoding_base": 3593689800, + "utf16_decoding_byte": 167519322, + "sha256_base": 4530046500, + "sha256_byte": 24116301, + "keccak256_base": 5867223186, + "keccak256_byte": 21469644, + "keccak512_base": 5798128650, + "keccak512_byte": 36651981, + "log_base": 2408221236, + "log_byte": 15863835, + "storage_write_base": 45187219125, + "storage_write_key_byte": 66445653, + "storage_write_value_byte": 29682120, + "storage_write_evicted_byte": 28939782, + "storage_read_base": 32029296375, + "storage_read_key_byte": 28463997, + "storage_read_value_byte": 3289884, + "storage_remove_base": 35876668875, + "storage_remove_key_byte": 35342424, + "storage_remove_ret_value_byte": 7303842, + "storage_has_key_base": 31315025250, + "storage_has_key_byte": 28376217, + "storage_iter_create_prefix_base": 0, + "storage_iter_create_prefix_byte": 0, + "storage_iter_create_range_base": 0, + "storage_iter_create_from_byte": 0, + "storage_iter_create_to_byte": 0, + "storage_iter_next_base": 0, + "storage_iter_next_key_byte": 0, + "storage_iter_next_value_byte": 0, + "touching_trie_node": 5764118571, + "promise_and_base": 1473816795, + "promise_and_per_promise": 5613432, + "promise_return": 558292404 +} json.dump(config, open(os.path.join(output_home, 'output.json'), 'w'), indent=2) diff --git a/scripts/migrations/12-fix-inflation.py b/scripts/migrations/12-fix-inflation.py index 3cddc8e1a29..e168c36d9ad 100644 --- a/scripts/migrations/12-fix-inflation.py +++ b/scripts/migrations/12-fix-inflation.py @@ -11,7 +11,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 11 diff --git a/scripts/migrations/13-block-merkle-root.py b/scripts/migrations/13-block-merkle-root.py index 0e68ddc3154..204417564ed 100644 --- a/scripts/migrations/13-block-merkle-root.py +++ b/scripts/migrations/13-block-merkle-root.py @@ -13,7 +13,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 12 diff --git a/scripts/migrations/5-preserve-height.py b/scripts/migrations/5-preserve-height.py index 16b1def1c94..83be6dd38a5 100644 --- a/scripts/migrations/5-preserve-height.py +++ b/scripts/migrations/5-preserve-height.py @@ -6,8 +6,12 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output_config.json')), object_pairs_hook=OrderedDict) -records_fname = [filename for filename in os.listdir(home) if filename.startswith('output_records_')] +config = json.load(open(os.path.join(home, 'output_config.json')), + object_pairs_hook=OrderedDict) +records_fname = [ + filename for filename in os.listdir(home) + if filename.startswith('output_records_') +] assert len(records_fname) == 1, "Not found records file or found too many" records = json.load(open(os.path.join(home, records_fname[0]))) @@ -61,4 +65,3 @@ config['records'] = records json.dump(config, open(os.path.join(output_home, 'output.json'), 'w'), indent=2) - diff --git a/scripts/migrations/6-state-stake.py b/scripts/migrations/6-state-stake.py index e115750c01e..3e046319086 100644 --- a/scripts/migrations/6-state-stake.py +++ b/scripts/migrations/6-state-stake.py @@ -20,7 +20,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 5 @@ -35,6 +36,7 @@ storage_usage = record["Account"]["account"]["storage_usage"] # If account doesn't have balance to cover storage, we will mint some tokens to cover for it. if storage_usage * 1e20 > int(record["Account"]["account"]["amount"]): - record["Account"]["account"]["amount"] = str(storage_usage) + "0" * 20 + record["Account"]["account"]["amount"] = str( + storage_usage) + "0" * 20 json.dump(config, open(os.path.join(output_home, 'output.json'), 'w'), indent=2) diff --git a/scripts/migrations/7-account-registrar.py b/scripts/migrations/7-account-registrar.py index e1d9973bc8c..5ff7ad53c58 100644 --- a/scripts/migrations/7-account-registrar.py +++ b/scripts/migrations/7-account-registrar.py @@ -17,7 +17,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 6 @@ -33,15 +34,18 @@ # Removing existing `registrar` account. for record in config['records']: - if ('Account' in record) and (record['Account']['account_id'] == 'registrar'): + if ('Account' in record) and ( + record['Account']['account_id'] == 'registrar'): continue - if ('AccessKey' in record) and (record['AccessKey']['account_id'] == 'registrar'): + if ('AccessKey' in record) and ( + record['AccessKey']['account_id'] == 'registrar'): continue - if ('AccessKey' in record) and (record['AccessKey']['account_id'] == 'near'): + if ('AccessKey' in record) and ( + record['AccessKey']['account_id'] == 'near'): near_access_key_records.append(record['AccessKey']) records.append(record) -assert(len(near_access_key_records) > 0) +assert (len(near_access_key_records) > 0) records.append({ 'Account': { @@ -65,7 +69,6 @@ } }) - config['records'] = records json.dump(config, open(os.path.join(output_home, 'output.json'), 'w'), indent=2) diff --git a/scripts/migrations/8-fraction.py b/scripts/migrations/8-fraction.py index 957dc44c7c0..801ab671f39 100644 --- a/scripts/migrations/8-fraction.py +++ b/scripts/migrations/8-fraction.py @@ -6,7 +6,6 @@ and `max_inflation_rate` to fractions. """ - import sys import os import json @@ -15,7 +14,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 7 diff --git a/scripts/migrations/9-state-record-data.py b/scripts/migrations/9-state-record-data.py index 814a2554f67..7e216516f8a 100644 --- a/scripts/migrations/9-state-record-data.py +++ b/scripts/migrations/9-state-record-data.py @@ -18,7 +18,8 @@ home = sys.argv[1] output_home = sys.argv[2] -config = json.load(open(os.path.join(home, 'output.json')), object_pairs_hook=OrderedDict) +config = json.load(open(os.path.join(home, 'output.json')), + object_pairs_hook=OrderedDict) assert config['protocol_version'] == 8 @@ -32,7 +33,7 @@ key = base64.b64decode(record["Data"].pop("key")) # Splitting key separator_pos = key.find(b',') - assert(separator_pos > 0) + assert (separator_pos > 0) account_id = key[1:separator_pos] data_key = key[separator_pos + 1:] record["Data"]["account_id"] = account_id.decode('utf-8') @@ -44,11 +45,13 @@ for record in config['records']: if "Account" in record: - if record["Account"]["account"]["locked"] != "0" and record["Account"]["account_id"] not in validators: + if record["Account"]["account"]["locked"] != "0" and record["Account"][ + "account_id"] not in validators: a = int(record["Account"]["account"]["amount"]) l = int(record["Account"]["account"]["locked"]) record["Account"]["account"]["locked"] = str(0) - record["Account"]["account"]["amount"] = str(a+l) + record["Account"]["account"]["amount"] = str(a + l) elif record["Account"]["account_id"] in validators: - validators[record["Account"]["account_id"]]["amount"] = record["Account"]["account"]["locked"] + validators[record["Account"]["account_id"]]["amount"] = record[ + "Account"]["account"]["locked"] json.dump(config, open(os.path.join(output_home, 'output.json'), 'w'), indent=2) diff --git a/scripts/nodelib.py b/scripts/nodelib.py index 971be659429..ceb15cb6635 100755 --- a/scripts/nodelib.py +++ b/scripts/nodelib.py @@ -11,84 +11,108 @@ except NameError: pass - -USER = str(os.getuid())+':'+str(os.getgid()) +USER = str(os.getuid()) + ':' + str(os.getgid()) +"""Installs cargo/Rust.""" -"""Installs cargo/Rust.""" def install_cargo(): try: subprocess.call([os.path.expanduser('~/.cargo/bin/cargo'), '--version']) except OSError: print("Installing Rust...") - subprocess.check_output('curl https://sh.rustup.rs -sSf | sh -s -- -y', shell=True) + subprocess.check_output('curl https://sh.rustup.rs -sSf | sh -s -- -y', + shell=True) """Inits the node configuration using docker.""" + + def docker_init(image, home_dir, init_flags): subprocess.check_output(['mkdir', '-p', home_dir]) - subprocess.check_output(['docker', 'run', '-u', USER, - '-v', '%s:/srv/near' % home_dir, - '-v', os.path.abspath('near/res') + ':/near/res', - image, 'near', '--home=/srv/near', 'init'] + init_flags) + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-v', + '%s:/srv/near' % home_dir, '-v', + os.path.abspath('near/res') + + ':/near/res', image, 'near', '--home=/srv/near', 'init' + ] + init_flags) """Inits the node configuration using local build.""" + + def nodocker_init(home_dir, is_release, init_flags): target = './target/%s/near' % ('release' if is_release else 'debug') - subprocess.call([target, - '--home=%s' % home_dir, 'init'] + init_flags) + subprocess.call([target, '--home=%s' % home_dir, 'init'] + init_flags) """Retrieve requested chain id from the flags.""" + + def get_chain_id_from_flags(flags): chain_id_flags = [flag for flag in flags if flag.startswith('--chain-id=')] if len(chain_id_flags) == 1: return chain_id_flags[0][len('--chain-id='):] return '' + """Compile given package using cargo""" + + def compile_package(package_name, is_release): flags = ['-p', package_name] if is_release: flags = ['--release'] + flags - code = subprocess.call( - [os.path.expanduser('cargo'), 'build'] + flags) + code = subprocess.call([os.path.expanduser('cargo'), 'build'] + flags) if code != 0: print("Compilation failed, aborting") exit(code) """Checks if there is already everything setup on this machine, otherwise sets up NEAR node.""" -def check_and_setup(nodocker, is_release, image, home_dir, init_flags, no_gas_price=False): + + +def check_and_setup(nodocker, + is_release, + image, + home_dir, + init_flags, + no_gas_price=False): if nodocker: compile_package('neard', is_release) chain_id = get_chain_id_from_flags(init_flags) if os.path.exists(os.path.join(home_dir, 'config.json')): - genesis_config = json.loads(open(os.path.join(os.path.join(home_dir, 'genesis.json'))).read()) - if chain_id !='' and genesis_config['chain_id'] != chain_id: + genesis_config = json.loads( + open(os.path.join(os.path.join(home_dir, 'genesis.json'))).read()) + if chain_id != '' and genesis_config['chain_id'] != chain_id: if chain_id == 'testnet': - print("Folder %s already has network configuration for %s, which is not the official TestNet.\n" - "Use ./scripts/start_localnet.py instead to keep running with existing configuration.\n" - "If you want to run a different network, either specify different --home or remove %s to start from scratch." % (home_dir, genesis_config['chain_id'], home_dir)) + print( + "Folder %s already has network configuration for %s, which is not the official TestNet.\n" + "Use ./scripts/start_localnet.py instead to keep running with existing configuration.\n" + "If you want to run a different network, either specify different --home or remove %s to start from scratch." + % (home_dir, genesis_config['chain_id'], home_dir)) elif genesis_config['chain_id'] == 'testnet': - print("Folder %s already has network configuration for the official TestNet.\n" - "Use ./scripts/start_testnet.py instead to keep running it.\n" - "If you want to run a different network, either specify different --home or remove %s to start from scratch" % (home_dir, home_dir)) + print( + "Folder %s already has network configuration for the official TestNet.\n" + "Use ./scripts/start_testnet.py instead to keep running it.\n" + "If you want to run a different network, either specify different --home or remove %s to start from scratch" + % (home_dir, home_dir)) elif chain_id != '': - print("Folder %s already has network configuration for %s. Use ./scripts/start_localnet.py to continue running it." % (home_dir, genesis_config['chain_id'])) + print( + "Folder %s already has network configuration for %s. Use ./scripts/start_localnet.py to continue running it." + % (home_dir, genesis_config['chain_id'])) exit(1) - print("Using existing node configuration from %s for %s" % (home_dir, genesis_config['chain_id'])) + print("Using existing node configuration from %s for %s" % + (home_dir, genesis_config['chain_id'])) return print("Setting up network configuration.") if len([x for x in init_flags if x.startswith('--account-id')]) == 0: prompt = "Enter your account ID" if chain_id != '': - prompt += " (leave empty if not going to be a validator): " + prompt += " (leave empty if not going to be a validator): " else: - prompt += ": " + prompt += ": " account_id = input(prompt) init_flags.append('--account-id=%s' % account_id) @@ -99,8 +123,11 @@ def check_and_setup(nodocker, is_release, image, home_dir, init_flags, no_gas_pr print('Downloading testnet genesis records') url = 'https://s3-us-west-1.amazonaws.com/testnet.nearprotocol.com/testnet_genesis_records_%s.json' % testnet_genesis_hash urllib.urlretrieve(url, testnet_genesis_records) - init_flags.extend(['--genesis-config', 'near/res/testnet_genesis_config.json', '--genesis-records', testnet_genesis_records, - '--genesis-hash', testnet_genesis_hash]) + init_flags.extend([ + '--genesis-config', 'near/res/testnet_genesis_config.json', + '--genesis-records', testnet_genesis_records, '--genesis-hash', + testnet_genesis_hash + ]) if nodocker: nodocker_init(home_dir, is_release, init_flags) @@ -123,55 +150,78 @@ def print_staking_key(home_dir): if not key_file['account_id']: print("Node is not staking. Re-run init to specify staking account.") return - print("Stake for user '%s' with '%s'" % (key_file['account_id'], key_file['public_key'])) + print("Stake for user '%s' with '%s'" % + (key_file['account_id'], key_file['public_key'])) """Stops and removes given docker container.""" + + def docker_stop_if_exists(name): try: - subprocess.Popen(['docker', 'stop', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() + subprocess.Popen(['docker', 'stop', name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() except subprocess.CalledProcessError: pass try: - subprocess.Popen(['docker', 'rm', name], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() + subprocess.Popen(['docker', 'rm', name], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE).communicate() except subprocess.CalledProcessError: pass + """Checks the ports saved in config.json""" + + def get_port(home_dir, net): config = json.load(open(os.path.join(home_dir, 'config.json'))) p = config[net]['addr'][config[net]['addr'].find(':') + 1:] return p + ":" + p + """Runs NEAR core inside the docker container for isolation and easy update with Watchtower.""" + + def run_docker(image, home_dir, boot_nodes, telemetry_url, verbose): print("Starting NEAR client and Watchtower dockers...") docker_stop_if_exists('watchtower') docker_stop_if_exists('nearcore') # Start nearcore container, mapping home folder and ports. - envs = ['-e', 'BOOT_NODES=%s' % boot_nodes, '-e', 'TELEMETRY_URL=%s' % telemetry_url] + envs = [ + '-e', + 'BOOT_NODES=%s' % boot_nodes, '-e', + 'TELEMETRY_URL=%s' % telemetry_url + ] rpc_port = get_port(home_dir, 'rpc') network_port = get_port(home_dir, 'network') if verbose: envs.extend(['-e', 'VERBOSE=1']) subprocess.check_output(['mkdir', '-p', home_dir]) - subprocess.check_output(['docker', 'run', '-u', USER, - '-d', '-p', rpc_port, '-p', network_port, '-v', '%s:/srv/near' % home_dir, - '-v', '/tmp:/tmp', - '--ulimit', 'core=-1', - '--name', 'nearcore', '--restart', 'unless-stopped'] + - envs + [image]) + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-d', '-p', rpc_port, '-p', network_port, + '-v', + '%s:/srv/near' % home_dir, '-v', '/tmp:/tmp', '--ulimit', 'core=-1', + '--name', 'nearcore', '--restart', 'unless-stopped' + ] + envs + [image]) # Start Watchtower that will automatically update the nearcore container when new version appears. - subprocess.check_output(['docker', 'run', '-u', USER, - '-d', '--restart', 'unless-stopped', '--name', 'watchtower', - '-v', '/var/run/docker.sock:/var/run/docker.sock', - 'v2tec/watchtower', image]) - print("Node is running! \nTo check logs call: docker logs --follow nearcore") + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-d', '--restart', 'unless-stopped', + '--name', 'watchtower', '-v', + '/var/run/docker.sock:/var/run/docker.sock', 'v2tec/watchtower', image + ]) + print( + "Node is running! \nTo check logs call: docker logs --follow nearcore") + """Runs NEAR core outside of docker.""" + + def run_nodocker(home_dir, is_release, boot_nodes, telemetry_url, verbose): print("Starting NEAR client...") - print("Autoupdate is not supported at the moment for runs outside of docker") + print( + "Autoupdate is not supported at the moment for runs outside of docker") cmd = ['./target/%s/near' % ('release' if is_release else 'debug')] cmd.extend(['--home', home_dir]) if verbose: @@ -186,7 +236,15 @@ def run_nodocker(home_dir, is_release, boot_nodes, telemetry_url, verbose): print("\nStopping NEARCore.") -def setup_and_run(nodocker, is_release, image, home_dir, init_flags, boot_nodes, telemetry_url, verbose=False, no_gas_price=False): +def setup_and_run(nodocker, + is_release, + image, + home_dir, + init_flags, + boot_nodes, + telemetry_url, + verbose=False, + no_gas_price=False): if nodocker: install_cargo() else: @@ -197,7 +255,8 @@ def setup_and_run(nodocker, is_release, image, home_dir, init_flags, boot_nodes, print("Failed to fetch docker containers: %s" % exc) exit(1) - check_and_setup(nodocker, is_release, image, home_dir, init_flags, no_gas_price) + check_and_setup(nodocker, is_release, image, home_dir, init_flags, + no_gas_price) print_staking_key(home_dir) @@ -208,14 +267,20 @@ def setup_and_run(nodocker, is_release, image, home_dir, init_flags, boot_nodes, """Stops docker for Nearcore and watchtower if they are running.""" + + def stop_docker(): docker_stop_if_exists('watchtower') docker_stop_if_exists('nearcore') + def generate_node_key(home, is_release, nodocker, image): print("Generating node key...") if nodocker: - cmd = ['./target/%s/keypair-generator' % ('release' if is_release else 'debug')] + cmd = [ + './target/%s/keypair-generator' % + ('release' if is_release else 'debug') + ] cmd.extend(['--home', home]) cmd.extend(['--generate-config']) cmd.extend(['node-key']) @@ -225,13 +290,21 @@ def generate_node_key(home, is_release, nodocker, image): print("\nStopping NEARCore.") else: subprocess.check_output(['mkdir', '-p', home]) - subprocess.check_output(['docker', 'run', '-u', USER, '-v', '%s:/srv/keypair-generator' % home, image, 'keypair-generator', '--home=/srv/keypair-generator', '--generate-config', 'node-key']) + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-v', + '%s:/srv/keypair-generator' % home, image, 'keypair-generator', + '--home=/srv/keypair-generator', '--generate-config', 'node-key' + ]) print("Node key generated") + def generate_validator_key(home, is_release, nodocker, image, account_id): print("Generating validator key...") if nodocker: - cmd = ['./target/%s/keypair-generator' % ('release' if is_release else 'debug')] + cmd = [ + './target/%s/keypair-generator' % + ('release' if is_release else 'debug') + ] cmd.extend(['--home', home]) cmd.extend(['--generate-config']) cmd.extend(['--account-id', account_id]) @@ -242,13 +315,22 @@ def generate_validator_key(home, is_release, nodocker, image, account_id): print("\nStopping NEARCore.") else: subprocess.check_output(['mkdir', '-p', home]) - subprocess.check_output(['docker', 'run', '-u', USER, '-v', '%s:/srv/keypair-generator' % home, image, 'keypair-generator', '--home=/srv/keypair-generator', '--generate-config', '--account-id=%s' % account_id, 'validator-key']) + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-v', + '%s:/srv/keypair-generator' % home, image, 'keypair-generator', + '--home=/srv/keypair-generator', '--generate-config', + '--account-id=%s' % account_id, 'validator-key' + ]) print("Validator key generated") + def generate_signer_key(home, is_release, nodocker, image, account_id): print("Generating signer keys...") if nodocker: - cmd = ['./target/%s/keypair-generator' % ('release' if is_release else 'debug')] + cmd = [ + './target/%s/keypair-generator' % + ('release' if is_release else 'debug') + ] cmd.extend(['--home', home]) cmd.extend(['--generate-config']) cmd.extend(['--account-id', account_id]) @@ -259,11 +341,17 @@ def generate_signer_key(home, is_release, nodocker, image, account_id): print("\nStopping NEARCore.") else: subprocess.check_output(['mkdir', '-p', home]) - subprocess.check_output(['docker', 'run', '-u', USER, '-v', '%s:/srv/keypair-generator' % home, image, 'keypair-generator', '--home=/srv/keypair-generator', '--generate-config', '--account-id=%s' % account_id, 'signer-keys']) + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-v', + '%s:/srv/keypair-generator' % home, image, 'keypair-generator', + '--home=/srv/keypair-generator', '--generate-config', + '--account-id=%s' % account_id, 'signer-keys' + ]) print("Signer keys generated") -def initialize_keys(home, is_release, nodocker, image, account_id, generate_signer_keys): +def initialize_keys(home, is_release, nodocker, image, account_id, + generate_signer_keys): if nodocker: install_cargo() compile_package('keypair-generator', is_release) @@ -279,15 +367,20 @@ def initialize_keys(home, is_release, nodocker, image, account_id, generate_sign if account_id: generate_validator_key(home, is_release, nodocker, image, account_id) + def create_genesis(home, is_release, nodocker, image, chain_id, tracked_shards): if os.path.exists(os.path.join(home, 'genesis.json')): print("Genesis already exists") return print("Creating genesis...") if not os.path.exists(os.path.join(home, 'accounts.csv')): - raise Exception("Failed to generate genesis: accounts.csv does not exist") + raise Exception( + "Failed to generate genesis: accounts.csv does not exist") if nodocker: - cmd = ['./target/%s/genesis-csv-to-json' % ('release' if is_release else 'debug')] + cmd = [ + './target/%s/genesis-csv-to-json' % + ('release' if is_release else 'debug') + ] cmd.extend(['--home', home]) cmd.extend(['--chain-id', chain_id]) if len(tracked_shards) > 0: @@ -298,10 +391,18 @@ def create_genesis(home, is_release, nodocker, image, chain_id, tracked_shards): print("\nStopping NEARCore.") else: subprocess.check_output(['mkdir', '-p', home]) - subprocess.check_output(['docker', 'run', '-u', USER, '-v', '%s:/srv/genesis-csv-to-json' % home, image, 'genesis-csv-to-json', '--home=/srv/genesis-csv-to-json', '--chain-id=%s' % chain_id, '--tracked-shards=%s' % tracked_shards]) + subprocess.check_output([ + 'docker', 'run', '-u', USER, '-v', + '%s:/srv/genesis-csv-to-json' % home, image, 'genesis-csv-to-json', + '--home=/srv/genesis-csv-to-json', + '--chain-id=%s' % chain_id, + '--tracked-shards=%s' % tracked_shards + ]) print("Genesis created") -def start_stakewars(home, is_release, nodocker, image, telemetry_url, verbose, tracked_shards): + +def start_stakewars(home, is_release, nodocker, image, telemetry_url, verbose, + tracked_shards): if nodocker: install_cargo() compile_package('genesis-csv-to-json', is_release) @@ -312,8 +413,17 @@ def start_stakewars(home, is_release, nodocker, image, telemetry_url, verbose, t except subprocess.CalledProcessError as exc: print("Failed to fetch docker containers: %s" % exc) exit(1) - create_genesis(home, is_release, nodocker, image, 'stakewars', tracked_shards) + create_genesis(home, is_release, nodocker, image, 'stakewars', + tracked_shards) if nodocker: - run_nodocker(home, is_release, boot_nodes='', telemetry_url=telemetry_url, verbose=verbose) + run_nodocker(home, + is_release, + boot_nodes='', + telemetry_url=telemetry_url, + verbose=verbose) else: - run_docker(image, home, boot_nodes='', telemetry_url=telemetry_url, verbose=verbose) + run_docker(image, + home, + boot_nodes='', + telemetry_url=telemetry_url, + verbose=verbose) diff --git a/scripts/parallel_coverage.py b/scripts/parallel_coverage.py index 91ae04e6498..393f9f58a56 100644 --- a/scripts/parallel_coverage.py +++ b/scripts/parallel_coverage.py @@ -23,23 +23,26 @@ def coverage(test_binary): if not os.path.isfile(test_binary): return -1, '', f'{test_binary} does not exist' - - p = subprocess.Popen(['docker', 'run', '--rm', - '--security-opt', 'seccomp=unconfined', - '-u', f'{os.getuid()}:{os.getgid()}', - '-v', f'{test_binary}:{test_binary}', - '-v', f'{src_dir}:{src_dir}', - '-v', f'{coverage_output}:{coverage_output}', - 'nearprotocol/near-coverage-runtime', - 'bash', '-c', f'/usr/local/bin/kcov --include-pattern=nearcore --exclude-pattern=.so --verify {coverage_output} {test_binary}'], - stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + + p = subprocess.Popen([ + 'docker', 'run', '--rm', '--security-opt', 'seccomp=unconfined', '-u', + f'{os.getuid()}:{os.getgid()}', '-v', f'{test_binary}:{test_binary}', + '-v', f'{src_dir}:{src_dir}', '-v', + f'{coverage_output}:{coverage_output}', + 'nearprotocol/near-coverage-runtime', 'bash', '-c', + f'/usr/local/bin/kcov --include-pattern=nearcore --exclude-pattern=.so --verify {coverage_output} {test_binary}' + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) stdout, stderr = p.communicate() return (p.returncode, stdout, stderr) def clean_coverage(): subprocess.check_output(f'rm -rf {current_path}/../target/cov*', shell=True) - subprocess.check_output(f'rm -rf {current_path}/../target/merged_coverage', shell=True) + subprocess.check_output(f'rm -rf {current_path}/../target/merged_coverage', + shell=True) def coverage_dir(i): @@ -47,7 +50,12 @@ def coverage_dir(i): def merge_coverage(i, to_merge, j): - p = subprocess.Popen(['kcov', '--merge', os.path.join(coverage_dir(i+1), str(j)), *to_merge], stdout=subprocess.PIPE, stderr=subprocess.PIPE) + p = subprocess.Popen([ + 'kcov', '--merge', + os.path.join(coverage_dir(i + 1), str(j)), *to_merge + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) stdout, stderr = p.communicate() return (p.returncode, stdout, stderr) @@ -71,14 +79,18 @@ def merge_coverage(i, to_merge, j): # Run coverage with ThreadPoolExecutor(max_workers=workers()) as executor: - future_to_binary = {executor.submit(coverage, binary): binary for binary in binaries} + future_to_binary = { + executor.submit(coverage, binary): binary for binary in binaries + } for future in as_completed(future_to_binary): binary = future_to_binary[future] result = future.result() if result[0] != 0: print(result[2]) errors = True - print(f'========= error: kcov {binary} fail, exit code {result[0]} cause coverage fail') + print( + f'========= error: kcov {binary} fail, exit code {result[0]} cause coverage fail' + ) else: print(f'========= kcov {binary} done') @@ -97,21 +109,23 @@ def merge_coverage(i, to_merge, j): # ensure the last to merge is not only one cov cov_to_merge[-2] += (cov_to_merge[-1][0],) del cov_to_merge[-1] - + futures = [] for cov in cov_to_merge: - j+=1 + j += 1 futures.append(executor.submit(merge_coverage, i, cov, j)) for f in as_completed(futures): pass - i+=1 + i += 1 merged_coverage = os.path.join(coverage_dir(i), str(j)) print(f'========= coverage merged to {merged_coverage}') - subprocess.check_output(['mv', merged_coverage, f'{current_path}/../merged_coverage']) + subprocess.check_output( + ['mv', merged_coverage, f'{current_path}/../merged_coverage']) subprocess.check_output(f'rm -rf {current_path}/../target/cov*', shell=True) if errors: - print(f'========= some errors in running kcov, coverage maybe inaccurate') \ No newline at end of file + print( + f'========= some errors in running kcov, coverage maybe inaccurate') diff --git a/scripts/parallel_run_tests.py b/scripts/parallel_run_tests.py index 6d8ad3d3513..6c1b6a4d7eb 100644 --- a/scripts/parallel_run_tests.py +++ b/scripts/parallel_run_tests.py @@ -18,14 +18,17 @@ def show_test_result(binary, result): clean_binary_tests() run_doc_tests() build_tests() - binaries = test_binaries(exclude=[r'test_regression-.*', r'near_rpc_error_macro-.*']) + binaries = test_binaries( + exclude=[r'test_regression-.*', r'near_rpc_error_macro-.*']) print(f'========= collected {len(binaries)} test binaries:') print('\n'.join(binaries)) completed = 0 fails = [] with ThreadPoolExecutor(max_workers=workers()) as executor: - future_to_binary = {executor.submit(run_test, binary): binary for binary in binaries} + future_to_binary = { + executor.submit(run_test, binary): binary for binary in binaries + } for future in as_completed(future_to_binary): completed += 1 binary_full_name = future_to_binary[future] @@ -45,7 +48,9 @@ def show_test_result(binary, result): binary_full_name = f[0] result = f[1] binary = os.path.basename(binary_full_name) - print(f'========= test binary {binary} run in parallel failed, exit code {result[0]}, retry run equentially ...') + print( + f'========= test binary {binary} run in parallel failed, exit code {result[0]}, retry run equentially ...' + ) result = run_test(binary_full_name, isolate=False) if result[0] != 0: new_fails.append((binary_full_name, result)) @@ -58,7 +63,9 @@ def show_test_result(binary, result): result = f[1] binary = os.path.basename(binary_full_name) show_test_result(binary, result) - new_fail_summary.append(f'========= test binary {binary} run sequentially failed, exit code {result[0]}') + new_fail_summary.append( + f'========= test binary {binary} run sequentially failed, exit code {result[0]}' + ) for s in new_fail_summary: print(s) exit(1) @@ -71,7 +78,9 @@ def show_test_result(binary, result): result = f[1] binary = os.path.basename(binary_full_name) show_test_result(binary, result) - print(f'========= test binary {binary} failed, exit code {result[0]}') + print( + f'========= test binary {binary} failed, exit code {result[0]}' + ) exit(1) else: print("========= all tests passed") diff --git a/scripts/start_localnet.py b/scripts/start_localnet.py index 7dedd48eb21..49150d1c599 100755 --- a/scripts/start_localnet.py +++ b/scripts/start_localnet.py @@ -5,27 +5,45 @@ from nodelib import setup_and_run - if __name__ == "__main__": print("****************************************************") print("* Running NEAR validator node for Local TestNet *") print("****************************************************") parser = argparse.ArgumentParser() - parser.add_argument('--local', action='store_true', help='deprecated: use --nodocker') - parser.add_argument('--nodocker', action='store_true', help='If set, compiles and runs the node on the machine directly (not inside the docker).') - parser.add_argument('--debug', action='store_true', help='If set, compiles local nearcore in debug mode') - parser.add_argument('--verbose', action='store_true', help='If set, prints verbose logs') - parser.add_argument('--home', default=os.path.expanduser('~/.near/'), help='Home path for storing configs, keys and chain data (Default: ~/.near)') + parser.add_argument('--local', + action='store_true', + help='deprecated: use --nodocker') + parser.add_argument( + '--nodocker', + action='store_true', + help= + 'If set, compiles and runs the node on the machine directly (not inside the docker).' + ) + parser.add_argument('--debug', + action='store_true', + help='If set, compiles local nearcore in debug mode') + parser.add_argument('--verbose', + action='store_true', + help='If set, prints verbose logs') + parser.add_argument( + '--home', + default=os.path.expanduser('~/.near/'), + help= + 'Home path for storing configs, keys and chain data (Default: ~/.near)') parser.add_argument( - '--image', default='nearprotocol/nearcore', + '--image', + default='nearprotocol/nearcore', help='Image to run in docker (default: nearprotocol/nearcore)') args = parser.parse_args() if args.local: print("Flag --local deprecated, please use --nodocker") nodocker = args.nodocker or args.local - setup_and_run(nodocker, not args.debug, args.image, args.home, + setup_and_run(nodocker, + not args.debug, + args.image, + args.home, init_flags=['--chain-id='], boot_nodes='', telemetry_url='', diff --git a/scripts/start_staging_testnet.py b/scripts/start_staging_testnet.py index 58ea3db1c5f..382631da734 100755 --- a/scripts/start_staging_testnet.py +++ b/scripts/start_staging_testnet.py @@ -5,7 +5,6 @@ from nodelib import setup_and_run - if __name__ == "__main__": print("****************************************************") print("* Running NEAR validator node for Staging TestNet *") @@ -19,21 +18,39 @@ TELEMETRY_URL = 'https://explorer.staging.nearprotocol.com/api/nodes' parser = argparse.ArgumentParser() - parser.add_argument('--local', action='store_true', help='deprecated: use --nodocker') - parser.add_argument('--nodocker', action='store_true', help='If set, compiles and runs the node on the machine directly (not inside the docker).') - parser.add_argument('--debug', action='store_true', help='If set, compiles local nearcore in debug mode') - parser.add_argument('--verbose', action='store_true', help='If set, prints verbose logs') - parser.add_argument('--home', default=os.path.expanduser('~/.near/'), help='Home path for storing configs, keys and chain data (Default: ~/.near)') + parser.add_argument('--local', + action='store_true', + help='deprecated: use --nodocker') parser.add_argument( - '--image', default='nearprotocol/nearcore:staging', - help='Image to run in docker (default: nearprotocol/nearcore:staging)') + '--nodocker', + action='store_true', + help= + 'If set, compiles and runs the node on the machine directly (not inside the docker).' + ) + parser.add_argument('--debug', + action='store_true', + help='If set, compiles local nearcore in debug mode') + parser.add_argument('--verbose', + action='store_true', + help='If set, prints verbose logs') parser.add_argument( - '--boot-nodes', default=DEFAULT_BOOT_NODE, - help='Specify boot nodes to load from (Default: %s)' % DEFAULT_BOOT_NODE) + '--home', + default=os.path.expanduser('~/.near/'), + help= + 'Home path for storing configs, keys and chain data (Default: ~/.near)') + parser.add_argument( + '--image', + default='nearprotocol/nearcore:staging', + help='Image to run in docker (default: nearprotocol/nearcore:staging)') + parser.add_argument('--boot-nodes', + default=DEFAULT_BOOT_NODE, + help='Specify boot nodes to load from (Default: %s)' % + DEFAULT_BOOT_NODE) args = parser.parse_args() if args.local: print("Flag --local deprecated, please use --nodocker") nodocker = args.nodocker or args.local - setup_and_run(nodocker, not args.debug, args.image, args.home, ['--chain-id=staging'], - args.boot_nodes, TELEMETRY_URL, args.verbose) + setup_and_run(nodocker, not args.debug, args.image, args.home, + ['--chain-id=staging'], args.boot_nodes, TELEMETRY_URL, + args.verbose) diff --git a/scripts/start_stakewars.py b/scripts/start_stakewars.py index 48766ff9ce8..dccc64972ae 100755 --- a/scripts/start_stakewars.py +++ b/scripts/start_stakewars.py @@ -5,22 +5,51 @@ from nodelib import setup_and_run, initialize_keys, start_stakewars - if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--local', action='store_true', help='deprecated: use --nodocker') - parser.add_argument('--nodocker', action='store_true', help='If set, compiles and runs the node on the machine directly (not inside the docker).') - parser.add_argument('--debug', action='store_true', help='If set, compiles local nearcore in debug mode') - parser.add_argument('--verbose', action='store_true', help='If set, prints verbose logs') - parser.add_argument('--home', default=os.path.expanduser('~/.near/'), help='Home path for storing configs, keys and chain data (Default: ~/.near)') - parser.add_argument('--init', action='store_true', help='If set, initialize the home dir by generating validator key and node key') - parser.add_argument('--signer-keys', action='store_true', help='If set, generate signer keys for account specified') - parser.add_argument('--account-id', default='', help='If set, the account id will be used for running a validator') + parser.add_argument('--local', + action='store_true', + help='deprecated: use --nodocker') + parser.add_argument( + '--nodocker', + action='store_true', + help= + 'If set, compiles and runs the node on the machine directly (not inside the docker).' + ) + parser.add_argument('--debug', + action='store_true', + help='If set, compiles local nearcore in debug mode') + parser.add_argument('--verbose', + action='store_true', + help='If set, prints verbose logs') + parser.add_argument( + '--home', + default=os.path.expanduser('~/.near/'), + help= + 'Home path for storing configs, keys and chain data (Default: ~/.near)') + parser.add_argument( + '--init', + action='store_true', + help= + 'If set, initialize the home dir by generating validator key and node key' + ) + parser.add_argument( + '--signer-keys', + action='store_true', + help='If set, generate signer keys for account specified') + parser.add_argument( + '--account-id', + default='', + help='If set, the account id will be used for running a validator') parser.add_argument( - '--image', default='nearprotocol/nearcore:stakewars', - help='Image to run in docker (default: nearprotocol/nearcore:stakewars)') - parser.add_argument('--tracked-shards', default='', help='The shards that this node wants to track') + '--image', + default='nearprotocol/nearcore:stakewars', + help='Image to run in docker (default: nearprotocol/nearcore:stakewars)' + ) + parser.add_argument('--tracked-shards', + default='', + help='The shards that this node wants to track') args = parser.parse_args() TELEMETRY_URL = 'https://explorer.tatooine.nearprotocol.com/api/nodes' @@ -30,9 +59,16 @@ nodocker = args.nodocker or args.local if args.init: - initialize_keys(args.home, not args.debug, nodocker, args.image, args.account_id, args.signer_keys) + initialize_keys(args.home, not args.debug, nodocker, args.image, + args.account_id, args.signer_keys) else: print("****************************************************") print("* Running NEAR validator node for Stake Wars *") print("****************************************************") - start_stakewars(args.home, not args.debug, nodocker, args.image, telemetry_url=TELEMETRY_URL, verbose=args.verbose, tracked_shards=args.tracked_shards) + start_stakewars(args.home, + not args.debug, + nodocker, + args.image, + telemetry_url=TELEMETRY_URL, + verbose=args.verbose, + tracked_shards=args.tracked_shards) diff --git a/scripts/start_testnet.py b/scripts/start_testnet.py index 628246e7b89..1a3ff7b5bd1 100755 --- a/scripts/start_testnet.py +++ b/scripts/start_testnet.py @@ -5,7 +5,6 @@ from nodelib import setup_and_run - if __name__ == "__main__": print("****************************************************") print("* Running NEAR validator node for Official TestNet *") @@ -20,23 +19,43 @@ TELEMETRY_URL = 'https://explorer.nearprotocol.com/api/nodes' parser = argparse.ArgumentParser() - parser.add_argument('--local', action='store_true', help='deprecated: use --nodocker') - parser.add_argument('--nodocker', action='store_true', help='If set, compiles and runs the node on the machine directly (not inside the docker).') - parser.add_argument('--debug', action='store_true', help='If set, compiles local nearcore in debug mode') - parser.add_argument('--verbose', action='store_true', help='If set, prints verbose logs') - parser.add_argument('--home', default=os.path.expanduser('~/.near/'), help='Home path for storing configs, keys and chain data (Default: ~/.near)') + parser.add_argument('--local', + action='store_true', + help='deprecated: use --nodocker') parser.add_argument( - '--image', default='nearprotocol/nearcore', - help='Image to run in docker (default: nearprotocol/nearcore)') + '--nodocker', + action='store_true', + help= + 'If set, compiles and runs the node on the machine directly (not inside the docker).' + ) + parser.add_argument('--debug', + action='store_true', + help='If set, compiles local nearcore in debug mode') + parser.add_argument('--verbose', + action='store_true', + help='If set, prints verbose logs') parser.add_argument( - '--boot-nodes', default=DEFAULT_BOOT_NODE, - help='Specify boot nodes to load from (Default: %s)' % DEFAULT_BOOT_NODE) + '--home', + default=os.path.expanduser('~/.near/'), + help= + 'Home path for storing configs, keys and chain data (Default: ~/.near)') + parser.add_argument( + '--image', + default='nearprotocol/nearcore', + help='Image to run in docker (default: nearprotocol/nearcore)') + parser.add_argument('--boot-nodes', + default=DEFAULT_BOOT_NODE, + help='Specify boot nodes to load from (Default: %s)' % + DEFAULT_BOOT_NODE) args = parser.parse_args() if args.local: print("Flag --local deprecated, please use --nodocker") nodocker = args.nodocker or args.local - setup_and_run(nodocker, not args.debug, args.image, args.home, + setup_and_run(nodocker, + not args.debug, + args.image, + args.home, init_flags=['--chain-id=testnet'], boot_nodes=args.boot_nodes, telemetry_url=TELEMETRY_URL, diff --git a/scripts/start_unittest.py b/scripts/start_unittest.py index a50085f5ace..4f55bd400b1 100755 --- a/scripts/start_unittest.py +++ b/scripts/start_unittest.py @@ -6,22 +6,39 @@ from nodelib import setup_and_run - if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--local', action='store_true', help='If set, runs in the local version instead of auto-updatable docker. Otherwise runs locally') - parser.add_argument('--release', action='store_true', help='If set, compiles nearcore in release mode') - parser.add_argument('--verbose', action='store_true', help='If set, prints verbose logs') parser.add_argument( - '--image', default='nearprotocol/nearcore', + '--local', + action='store_true', + help= + 'If set, runs in the local version instead of auto-updatable docker. Otherwise runs locally' + ) + parser.add_argument('--release', + action='store_true', + help='If set, compiles nearcore in release mode') + parser.add_argument('--verbose', + action='store_true', + help='If set, prints verbose logs') + parser.add_argument( + '--image', + default='nearprotocol/nearcore', help='Image to run in docker (default: nearprotocol/nearcore)') args = parser.parse_args() - print("Starting unittest nodes with test.near account and seed key of alice.near") + print( + "Starting unittest nodes with test.near account and seed key of alice.near" + ) home_dir = os.path.join(os.getcwd(), 'testdir') subprocess.call(['rm', '-rf', home_dir]) - setup_and_run(args.local, args.release, args.image, home_dir, - init_flags=['--chain-id=', '--test-seed=alice.near', '--account-id=test.near', '--fast'], + setup_and_run(args.local, + args.release, + args.image, + home_dir, + init_flags=[ + '--chain-id=', '--test-seed=alice.near', + '--account-id=test.near', '--fast' + ], boot_nodes='', telemetry_url='', verbose=args.verbose, diff --git a/scripts/state/mega-migrate.py b/scripts/state/mega-migrate.py index 3886eae5638..47d36ceba0d 100755 --- a/scripts/state/mega-migrate.py +++ b/scripts/state/mega-migrate.py @@ -18,7 +18,8 @@ import json import os -filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../near/res/testnet.json') +filename = os.path.join(os.path.dirname(os.path.realpath(__file__)), + '../../near/res/testnet.json') q = json.loads(open(filename).read()) config_version = q.get('config_version', 0) @@ -27,9 +28,12 @@ num_sec_per_year = 31556952 # The rest of `runtime_config` fields are default q['runtime_config'] = { - 'poke_threshold': 24 * 3600, - 'storage_cost_byte_per_block': str(5 * 10**6), - 'account_length_baseline_cost_per_block': str(10**24 * 3**8 // num_sec_per_year), + 'poke_threshold': + 24 * 3600, + 'storage_cost_byte_per_block': + str(5 * 10**6), + 'account_length_baseline_cost_per_block': + str(10**24 * 3**8 // num_sec_per_year), } config_version = 1 diff --git a/scripts/state/split-genesis.py b/scripts/state/split-genesis.py index 3ba0849d854..42ce2c8ea54 100755 --- a/scripts/state/split-genesis.py +++ b/scripts/state/split-genesis.py @@ -16,5 +16,7 @@ records = q['records'] q['records'] = [] -open(os.path.join(os.path.dirname(filename), 'genesis_config.json'), 'w').write(json.dumps(q, indent=2)) -open(os.path.join(os.path.dirname(filename), '_genesis_records.json'), 'w').write(json.dumps(records, indent=2)) +open(os.path.join(os.path.dirname(filename), 'genesis_config.json'), + 'w').write(json.dumps(q, indent=2)) +open(os.path.join(os.path.dirname(filename), '_genesis_records.json'), + 'w').write(json.dumps(records, indent=2)) diff --git a/scripts/state/update_res.py b/scripts/state/update_res.py index 603da439fcc..a1cc2f29fbe 100755 --- a/scripts/state/update_res.py +++ b/scripts/state/update_res.py @@ -9,6 +9,7 @@ import os from collections import OrderedDict + def main(): if len(sys.argv) == 1: update_res() @@ -18,32 +19,46 @@ def main(): print('Usage: update-res.py | update-res.py check') exit(2) -genesis_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../neard/res/genesis_config.json') + +genesis_config_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), + '../../neard/res/genesis_config.json') + def near_init_genesis(): - subprocess.check_output('rm -rf /tmp/near/update_res && mkdir -p /tmp/near/update_res', shell=True) - subprocess.check_output('cargo run -p neard --bin neard -- --home /tmp/near/update_res init --chain-id sample', shell=True) - genesis = json.load(open('/tmp/near/update_res/genesis.json'), object_pairs_hook=OrderedDict) + subprocess.check_output( + 'rm -rf /tmp/near/update_res && mkdir -p /tmp/near/update_res', + shell=True) + subprocess.check_output( + 'cargo run -p neard --bin neard -- --home /tmp/near/update_res init --chain-id sample', + shell=True) + genesis = json.load(open('/tmp/near/update_res/genesis.json'), + object_pairs_hook=OrderedDict) genesis['records'] = [] # To avoid neard/res/genesis_config.json doesn't change everytime genesis['genesis_time'] = '1970-01-01T00:00:00.000000000Z' # secret key is seed from test.near - genesis['validators'][0]['public_key'] = 'ed25519:9BmAFNRTa5mRRXpSAm6MxSEeqRASDGNh2FuuwZ4gyxTw' + genesis['validators'][0][ + 'public_key'] = 'ed25519:9BmAFNRTa5mRRXpSAm6MxSEeqRASDGNh2FuuwZ4gyxTw' return genesis + def update_res(): genesis = near_init_genesis() json.dump(genesis, open(genesis_config_path, 'w'), indent=2) print('neard/res/genesis_config.json updated') + def check_res(): genesis = near_init_genesis() - res_genesis_config = json.load(open(genesis_config_path), object_pairs_hook=OrderedDict) + res_genesis_config = json.load(open(genesis_config_path), + object_pairs_hook=OrderedDict) if genesis != res_genesis_config: - print('neard/res/genesis_config.json does not match `near init` generated') + print( + 'neard/res/genesis_config.json does not match `near init` generated' + ) print('Please update by run scripts/state/update_res.py') exit(1) if __name__ == '__main__': - main() \ No newline at end of file + main() diff --git a/scripts/stop.py b/scripts/stop.py index da3979c6a50..6fb369e3ac9 100755 --- a/scripts/stop.py +++ b/scripts/stop.py @@ -5,4 +5,3 @@ if __name__ == "__main__": print("Stopping NEAR docker containers...") nodelib.stop_docker() - diff --git a/scripts/testlib.py b/scripts/testlib.py index e318207901b..e30f556ac2c 100644 --- a/scripts/testlib.py +++ b/scripts/testlib.py @@ -9,10 +9,8 @@ import re import filecmp - fcntl.fcntl(1, fcntl.F_SETFL, 0) - current_path = os.path.dirname(os.path.abspath(__file__)) target_debug = os.path.abspath(os.path.join(current_path, '../target/debug')) @@ -48,7 +46,8 @@ def test_binaries(exclude=None): for f in glob.glob(f'{target_debug}/deps/*'): fname = os.path.basename(f) ext = os.path.splitext(fname)[1] - is_near_binary = filecmp.cmp(f, f'{target_debug}/near') or filecmp.cmp(f, f'{target_debug}/neard') + is_near_binary = filecmp.cmp(f, f'{target_debug}/near') or filecmp.cmp( + f, f'{target_debug}/neard') if os.path.isfile(f) and not is_near_binary and ext == '': if not exclude: binaries.append(f) @@ -62,17 +61,19 @@ def test_binaries(exclude=None): def run_test(test_binary, isolate=True): """ Run a single test, save exitcode, stdout and stderr """ if isolate: - cmd = ['docker', 'run', '--rm', - '-u', f'{os.getuid()}:{os.getgid()}', - '-v', f'{test_binary}:{test_binary}', - 'nearprotocol/near-test-runtime', - 'bash', '-c', f'RUST_BACKTRACE=1 {test_binary}'] + cmd = [ + 'docker', 'run', '--rm', '-u', f'{os.getuid()}:{os.getgid()}', '-v', + f'{test_binary}:{test_binary}', 'nearprotocol/near-test-runtime', + 'bash', '-c', f'RUST_BACKTRACE=1 {test_binary}' + ] else: cmd = [test_binary] print(f'========= run test {test_binary}') if os.path.isfile(test_binary): p = subprocess.Popen(cmd, - stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) stdout, stderr = p.communicate() return (p.returncode, stdout, stderr) return -1, '', f'{test_binary} does not exist'