diff --git a/Cargo.lock b/Cargo.lock
index 854868822aa..956c8189e3e 100644
--- a/Cargo.lock
+++ b/Cargo.lock
@@ -672,59 +672,6 @@ name = "core-foundation-sys"
 version = "0.6.2"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
-[[package]]
-name = "cranelift-bforest"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cranelift-entity 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "cranelift-codegen"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cranelift-bforest 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-codegen-meta 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-codegen-shared 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-entity 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "target-lexicon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "cranelift-codegen-meta"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cranelift-codegen-shared 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-entity 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "cranelift-codegen-shared"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "cranelift-entity"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-
-[[package]]
-name = "cranelift-native"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cranelift-codegen 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "raw-cpuid 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "target-lexicon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "crc32fast"
 version = "1.2.0"
@@ -1221,7 +1168,7 @@ dependencies = [
  "chrono 0.4.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-crypto 0.1.0",
  "near-network 0.1.0",
  "near-primitives 0.1.0",
@@ -1239,7 +1186,7 @@ dependencies = [
  "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "indicatif 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-chain 0.1.0",
  "near-client 0.1.0",
  "near-crypto 0.1.0",
@@ -1578,7 +1525,7 @@ name = "keypair-generator"
 version = "0.1.0"
 dependencies = [
  "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-crypto 0.1.0",
 ]
 
@@ -1681,7 +1628,7 @@ dependencies = [
  "git-version 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-crypto 0.1.0",
  "near-jsonrpc 0.1.0",
  "near-primitives 0.1.0",
@@ -1871,7 +1818,7 @@ dependencies = [
 
 [[package]]
 name = "near"
-version = "0.4.5"
+version = "0.4.6"
 dependencies = [
  "actix 0.8.3 (registry+https://github.com/rust-lang/crates.io-index)",
  "borsh 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -1963,7 +1910,7 @@ dependencies = [
  "kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-chain 0.1.0",
  "near-chunks 0.1.0",
  "near-crypto 0.1.0",
@@ -2009,6 +1956,7 @@ dependencies = [
  "near-primitives 0.1.0",
  "near-store 0.1.0",
  "rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2188,6 +2136,7 @@ name = "near-vm-logic"
 version = "0.4.0"
 dependencies = [
  "bs58 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
  "near-runtime-fees 0.4.0",
  "near-vm-errors 0.4.0",
  "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -2232,7 +2181,7 @@ dependencies = [
  "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-crypto 0.1.0",
  "near-jsonrpc 0.1.0",
  "near-network 0.1.0",
@@ -2282,6 +2231,7 @@ dependencies = [
  "kvdb 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
+ "near 0.4.6",
  "near-crypto 0.1.0",
  "near-metrics 0.1.0",
  "near-primitives 0.1.0",
@@ -2792,21 +2742,19 @@ dependencies = [
 ]
 
 [[package]]
-name = "rand_xoshiro"
-version = "0.3.1"
+name = "rand_xorshift"
+version = "0.2.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
-name = "raw-cpuid"
-version = "6.1.0"
+name = "rand_xoshiro"
+version = "0.3.1"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
- "bitflags 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "cc 1.0.46 (registry+https://github.com/rust-lang/crates.io-index)",
- "rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_core 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
 [[package]]
@@ -2971,12 +2919,16 @@ dependencies = [
  "csv 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
  "gnuplot 0.0.32 (registry+https://github.com/rust-lang/crates.io-index)",
  "indicatif 0.12.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-crypto 0.1.0",
  "near-primitives 0.1.0",
+ "near-runtime-fees 0.4.0",
  "near-store 0.1.0",
+ "near-vm-logic 0.4.0",
+ "near-vm-runner 0.4.0",
  "node-runtime 0.0.1",
  "rand 0.7.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)",
  "tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)",
@@ -3240,7 +3192,7 @@ dependencies = [
  "borsh 0.2.9 (registry+https://github.com/rust-lang/crates.io-index)",
  "clap 2.33.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "hex 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-chain 0.1.0",
  "near-crypto 0.1.0",
  "near-network 0.1.0",
@@ -3321,16 +3273,6 @@ dependencies = [
  "xattr 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
-[[package]]
-name = "target-lexicon"
-version = "0.8.1"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_json 1.0.41 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "tempdir"
 version = "0.3.7"
@@ -3381,7 +3323,7 @@ dependencies = [
  "futures 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "near 0.4.5",
+ "near 0.4.6",
  "near-chain 0.1.0",
  "near-client 0.1.0",
  "near-crypto 0.1.0",
@@ -3867,56 +3809,6 @@ name = "wasi"
 version = "0.7.0"
 source = "registry+https://github.com/rust-lang/crates.io-index"
 
-[[package]]
-name = "wasmer-clif-backend"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "byteorder 1.3.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-codegen 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-entity 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-native 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
- "nix 0.15.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde 1.0.102 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde-bench 0.0.7 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_bytes 0.11.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "serde_derive 1.0.101 (registry+https://github.com/rust-lang/crates.io-index)",
- "target-lexicon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-clif-fork-frontend 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-clif-fork-wasm 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-runtime-core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-win-exception-handler 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmparser 0.39.2 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "wasmer-clif-fork-frontend"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cranelift-codegen 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "smallvec 0.6.10 (registry+https://github.com/rust-lang/crates.io-index)",
- "target-lexicon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
-[[package]]
-name = "wasmer-clif-fork-wasm"
-version = "0.44.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cranelift-codegen 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "cranelift-entity 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "failure 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "failure_derive 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
- "log 0.4.8 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-clif-fork-frontend 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmparser 0.39.2 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "wasmer-runtime"
 version = "0.9.0"
@@ -3924,7 +3816,6 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
 dependencies = [
  "lazy_static 1.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "memmap 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-clif-backend 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "wasmer-runtime-core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
  "wasmer-singlepass-backend 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
@@ -3971,17 +3862,6 @@ dependencies = [
  "wasmer-runtime-core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
 ]
 
-[[package]]
-name = "wasmer-win-exception-handler"
-version = "0.9.0"
-source = "registry+https://github.com/rust-lang/crates.io-index"
-dependencies = [
- "cmake 0.1.42 (registry+https://github.com/rust-lang/crates.io-index)",
- "libc 0.2.65 (registry+https://github.com/rust-lang/crates.io-index)",
- "wasmer-runtime-core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)",
- "winapi 0.3.8 (registry+https://github.com/rust-lang/crates.io-index)",
-]
-
 [[package]]
 name = "wasmparser"
 version = "0.39.2"
@@ -4151,12 +4031,6 @@ dependencies = [
 "checksum copyless 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff9c56c9fb2a49c05ef0e431485a22400af20d33226dc0764d891d09e724127"
 "checksum core-foundation 0.6.4 (registry+https://github.com/rust-lang/crates.io-index)" = "25b9e03f145fd4f2bf705e07b900cd41fc636598fe5dc452fd0db1441c3f496d"
 "checksum core-foundation-sys 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e7ca8a5221364ef15ce201e8ed2f609fc312682a8f4e0e3d4aa5879764e0fa3b"
-"checksum cranelift-bforest 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fff04f4ad82c9704a22e753c6268cc6a89add76f094b837cefbba1c665411451"
-"checksum cranelift-codegen 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "6ff4a221ec1b95df4b1d20a99fec4fe92a28bebf3a815f2eca72b26f9a627485"
-"checksum cranelift-codegen-meta 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "dd47f665e2ee8f177b97d1f5ce2bd70f54d3b793abb26d92942bfaa4a381fe9f"
-"checksum cranelift-codegen-shared 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "05bb95945fd940bd5fc2616b063ce69e55de3d9449a32fa40f6bb99a927085bf"
-"checksum cranelift-entity 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "e8753f15d9bde04988834705d437b6f6e4b4da0527968b8d40d7342262d43052"
-"checksum cranelift-native 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fd16b58e95af9ee837218cf41e70306becc1fc7d7dada55dac42df5130a4a4ba"
 "checksum crc32fast 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ba125de2af0df55319f41944744ad91c71113bf74a4646efff39afe1f6842db1"
 "checksum criterion 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "938703e165481c8d612ea3479ac8342e5615185db37765162e762ec3523e2fc6"
 "checksum criterion-plot 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "eccdc6ce8bbe352ca89025bee672aa6d24f4eb8c53e3a8b5d1bc58011da072a2"
@@ -4332,8 +4206,8 @@ dependencies = [
 "checksum rand_os 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "a788ae3edb696cfcba1c19bfd388cc4b8c21f8a408432b199c072825084da58a"
 "checksum rand_pcg 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "abf9b09b01790cfe0364f52bf32995ea3c39f4d2dd011eac241d2914146d0b44"
 "checksum rand_xorshift 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "cbf7e9e623549b0e21f6e97cf8ecf247c1a8fd2e8a992ae265314300b2455d5c"
+"checksum rand_xorshift 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "77d416b86801d23dde1aa643023b775c3a462efc0ed96443add11546cdf1dca8"
 "checksum rand_xoshiro 0.3.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0e18c91676f670f6f0312764c759405f13afb98d5d73819840cf72a518487bff"
-"checksum raw-cpuid 6.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "30a9d219c32c9132f7be513c18be77c9881c7107d2ab5569d205a6a0f0e6dc7d"
 "checksum rayon 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "83a27732a533a1be0a0035a111fe76db89ad312f6f0347004c220c57f209a123"
 "checksum rayon-core 1.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "98dcf634205083b17d0861252431eb2acbfb698ab7478a2d20de07954f47ec7b"
 "checksum rdrand 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "678054eb77286b51581ba43620cc911abf02758c91f93f479767aed0f90458b2"
@@ -4387,7 +4261,6 @@ dependencies = [
 "checksum sysinfo 0.9.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d5bd3b813d94552a8033c650691645f8dd5a63d614dddd62428a95d3931ef7b6"
 "checksum take_mut 0.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "f764005d11ee5f36500a149ace24e00e3da98b0158b3e2d53a7495660d3f4d60"
 "checksum tar 0.4.26 (registry+https://github.com/rust-lang/crates.io-index)" = "b3196bfbffbba3e57481b6ea32249fbaf590396a52505a2615adbb79d9d826d3"
-"checksum target-lexicon 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7975cb2c6f37d77b190bc5004a2bb015971464756fde9514651a525ada2a741a"
 "checksum tempdir 0.3.7 (registry+https://github.com/rust-lang/crates.io-index)" = "15f2b5fb00ccdf689e0149d1b1b3c03fead81c2b37735d812fa8bddbbf41b6d8"
 "checksum tempfile 3.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e24d9338a0a5be79593e2fa15a648add6138caa803e2d5bc782c371732ca9"
 "checksum termcolor 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "96d6098003bde162e4277c70665bd87c326f5a0c3f3fbfb285787fa482d54e6e"
@@ -4439,13 +4312,9 @@ dependencies = [
 "checksum walkdir 2.2.9 (registry+https://github.com/rust-lang/crates.io-index)" = "9658c94fa8b940eab2250bd5a457f9c48b748420d71293b165c8cdbe2f55f71e"
 "checksum want 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6395efa4784b027708f7451087e647ec73cc74f5d9bc2e418404248d679a230"
 "checksum wasi 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b89c3ce4ce14bdc6fb6beaf9ec7928ca331de5df7e5ea278375642a2f478570d"
-"checksum wasmer-clif-backend 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c9b933278c0e93fa84cc6bf09f965e1649b69e81fae48f3ac49f3d959207a2bb"
-"checksum wasmer-clif-fork-frontend 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0cf2f552a9c1fda0555087170424bd8fedc63a079a97bb5638a4ef9b0d9656aa"
-"checksum wasmer-clif-fork-wasm 0.44.0 (registry+https://github.com/rust-lang/crates.io-index)" = "0073b512e1af5948d34be7944b74c747bbe735ccff2e2f28c26ed4c90725de8e"
 "checksum wasmer-runtime 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "918d8f7aa25cb20698e8e80023dc275b5b27501a4190b8a447e59dbd112b0b65"
 "checksum wasmer-runtime-core 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7d8f7ba03f40f62d25335196b5b0b0b9aab27d601d621d849b850e9efb8cd2da"
 "checksum wasmer-singlepass-backend 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b6c51a89315b21d9cf3d50e53a64a64b9b58c7e4831fb0461a9c2027c23859bd"
-"checksum wasmer-win-exception-handler 0.9.0 (registry+https://github.com/rust-lang/crates.io-index)" = "30a321928330e7dffa707b946ab0da1268fd373eedd2dc7519abb86a374f474d"
 "checksum wasmparser 0.39.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e5083b449454f7de0b15f131eee17de54b5a71dcb9adcf11df2b2f78fad0cd82"
 "checksum webpki 0.19.1 (registry+https://github.com/rust-lang/crates.io-index)" = "4f7e1cd7900a3a6b65a3e8780c51a3e6b59c0e2c55c6dc69578c288d69f7d082"
 "checksum webpki-roots 0.16.0 (registry+https://github.com/rust-lang/crates.io-index)" = "c10fa4212003ba19a564f25cd8ab572c6791f99a03cc219c13ed35ccab00de0e"
diff --git a/chain/chain/src/chain.rs b/chain/chain/src/chain.rs
index d365d90f644..1f16fb68277 100644
--- a/chain/chain/src/chain.rs
+++ b/chain/chain/src/chain.rs
@@ -26,7 +26,7 @@ use near_primitives::views::{
     ExecutionOutcomeView, ExecutionOutcomeWithIdView, ExecutionStatusView,
     FinalExecutionOutcomeView, FinalExecutionStatus,
 };
-use near_store::{Store, COL_STATE_HEADERS};
+use near_store::{Store, COL_STATE_HEADERS, COL_STATE_PARTS};
 
 use crate::byzantine_assert;
 use crate::error::{Error, ErrorKind};
@@ -36,7 +36,7 @@ use crate::store::{ChainStore, ChainStoreAccess, ChainStoreUpdate, ShardInfo, St
 use crate::types::{
     AcceptedBlock, ApplyTransactionResult, Block, BlockHeader, BlockStatus, Provenance,
     ReceiptList, ReceiptProofResponse, ReceiptResponse, RootProof, RuntimeAdapter,
-    ShardStateSyncResponseHeader, ShardStateSyncResponsePart, StateHeaderKey, Tip,
+    ShardStateSyncResponseHeader, StateHeaderKey, StatePartKey, Tip,
     ValidatorSignatureVerificationResult,
 };
 use crate::validate::{validate_challenge, validate_chunk_proofs, validate_chunk_with_chunk_extra};
@@ -189,9 +189,9 @@ pub struct Chain {
     orphans: OrphanBlockPool,
     blocks_with_missing_chunks: OrphanBlockPool,
     genesis: BlockHeader,
-    pub finality_gadget: FinalityGadget,
     pub transaction_validity_period: BlockIndex,
     pub epoch_length: BlockIndex,
+    gas_price_adjustment_rate: u8,
 }
 
 impl Chain {
@@ -212,7 +212,6 @@ impl Chain {
         let genesis = Block::genesis(
             genesis_chunks.iter().map(|chunk| chunk.header.clone()).collect(),
             chain_genesis.time,
-            chain_genesis.gas_limit,
             chain_genesis.gas_price,
             chain_genesis.total_supply,
         );
@@ -257,6 +256,7 @@ impl Chain {
                         CryptoHash::default(),
                         genesis.hash(),
                         genesis.header.inner.height,
+                        0,
                         vec![],
                         vec![],
                         vec![],
@@ -302,7 +302,7 @@ impl Chain {
         }
         store_update.commit()?;
 
-        info!(target: "chain", "Init: head: {} @ {} [{}]", head.total_weight.to_num(), head.height, head.last_block_hash);
+        info!(target: "chain", "Init: head: weight: {}, score: {} @ {} [{}]", head.weight_and_score.weight.to_num(), head.weight_and_score.score.to_num(), head.height, head.last_block_hash);
 
         Ok(Chain {
             store,
@@ -310,9 +310,9 @@ impl Chain {
             orphans: OrphanBlockPool::new(),
             blocks_with_missing_chunks: OrphanBlockPool::new(),
             genesis: genesis.header,
-            finality_gadget: FinalityGadget {},
             transaction_validity_period: chain_genesis.transaction_validity_period,
             epoch_length: chain_genesis.epoch_length,
+            gas_price_adjustment_rate: chain_genesis.gas_price_adjustment_rate,
         })
     }
 
@@ -322,7 +322,7 @@ impl Chain {
         approval: &Approval,
     ) -> Result<(), Error> {
         let mut chain_store_update = ChainStoreUpdate::new(&mut self.store);
-        self.finality_gadget.process_approval(me, approval, &mut chain_store_update)?;
+        FinalityGadget::process_approval(me, approval, &mut chain_store_update)?;
         chain_store_update.commit()?;
         Ok(())
     }
@@ -331,27 +331,33 @@ impl Chain {
         &mut self,
         approval: &Approval,
     ) -> Result<(), ApprovalVerificationError> {
-        self.finality_gadget.verify_approval_conditions(approval, &mut self.store)
+        FinalityGadget::verify_approval_conditions(approval, &mut self.store)
     }
 
     pub fn get_my_approval_reference_hash(&mut self, last_hash: CryptoHash) -> Option<CryptoHash> {
-        self.finality_gadget.get_my_approval_reference_hash(last_hash, &mut self.store)
+        FinalityGadget::get_my_approval_reference_hash(last_hash, &mut self.store)
     }
 
     pub fn compute_quorums(
-        &mut self,
         prev_hash: CryptoHash,
+        epoch_id: EpochId,
         height: BlockIndex,
         approvals: Vec<Approval>,
         total_block_producers: usize,
+        runtime_adapter: &dyn RuntimeAdapter,
+        chain_store: &mut dyn ChainStoreAccess,
     ) -> Result<FinalityGadgetQuorums, Error> {
-        self.finality_gadget.compute_quorums(
+        let mut ret = FinalityGadget::compute_quorums(
             prev_hash,
+            epoch_id,
             height,
             approvals,
-            &mut self.store,
+            chain_store,
             total_block_producers,
-        )
+        )?;
+        ret.last_quorum_pre_commit = runtime_adapter
+            .push_final_block_back_if_needed(prev_hash, ret.last_quorum_pre_commit)?;
+        Ok(ret)
     }
 
     /// Reset "sync" head to current header head.
@@ -395,6 +401,7 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
         chain_update.process_block_header(header, on_challenge)?;
         Ok(())
@@ -412,6 +419,7 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
         chain_update.mark_block_as_challenged(block_hash, Some(challenger_hash))?;
         chain_update.commit()?;
@@ -472,6 +480,7 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
         match chain_update.verify_challenges(
             &vec![challenge.clone()],
@@ -522,6 +531,7 @@ impl Chain {
                     &self.blocks_with_missing_chunks,
                     self.transaction_validity_period,
                     self.epoch_length,
+                    self.gas_price_adjustment_rate,
                 );
 
                 match chain_update.check_header_known(header) {
@@ -541,6 +551,7 @@ impl Chain {
                     header.inner.prev_hash,
                     header.hash(),
                     header.inner.height,
+                    self.store.get_block_height(&header.inner.last_quorum_pre_commit)?,
                     header.inner.validator_proposals.clone(),
                     vec![],
                     header.inner.chunk_mask.clone(),
@@ -558,6 +569,7 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
 
         if let Some(header) = headers.last() {
@@ -579,7 +591,7 @@ impl Chain {
         let header_head = self.header_head()?;
         let mut hashes = vec![];
 
-        if block_head.total_weight >= header_head.total_weight {
+        if block_head.weight_and_score >= header_head.weight_and_score {
             return Ok((false, hashes));
         }
 
@@ -741,6 +753,7 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
         let maybe_new_head = chain_update.process_block(me, &block, &provenance, on_challenge);
 
@@ -786,13 +799,7 @@ impl Chain {
                 let status = self.determine_status(head.clone(), prev_head);
 
                 // Notify other parts of the system of the update.
-                block_accepted(AcceptedBlock {
-                    hash: block.hash(),
-                    status,
-                    provenance,
-                    gas_used: block.header.inner.gas_used,
-                    gas_limit: block.header.inner.gas_limit,
-                });
+                block_accepted(AcceptedBlock { hash: block.hash(), status, provenance });
 
                 Ok(head)
             }
@@ -1127,6 +1134,9 @@ impl Chain {
             root_proofs.push(root_proofs_cur);
         }
 
+        let state_root_node =
+            self.runtime_adapter.get_state_root_node(&chunk_header.inner.prev_state_root);
+
         Ok(ShardStateSyncResponseHeader {
             chunk,
             chunk_proof,
@@ -1134,6 +1144,7 @@ impl Chain {
             prev_chunk_proof,
             incoming_receipts_proofs,
             root_proofs,
+            state_root_node,
         })
     }
 
@@ -1141,8 +1152,9 @@ impl Chain {
         &mut self,
         shard_id: ShardId,
         part_id: u64,
+        num_parts: u64,
         sync_hash: CryptoHash,
-    ) -> Result<ShardStateSyncResponsePart, Error> {
+    ) -> Result<Vec<u8>, Error> {
         let sync_block = self.get_block(&sync_hash)?;
         let sync_block_header = sync_block.header.clone();
         if shard_id as usize >= sync_block.chunks.len() {
@@ -1160,18 +1172,15 @@ impl Chain {
         }
         let state_root = sync_prev_block.chunks[shard_id as usize].inner.prev_state_root.clone();
 
-        if part_id >= state_root.num_parts {
+        if part_id >= num_parts {
             return Err(ErrorKind::Other(
                 "get_state_response_part fail: part_id out of bound".to_string(),
             )
             .into());
         }
-        let (state_part, proof) = self
-            .runtime_adapter
-            .obtain_state_part(shard_id, part_id, &state_root)
-            .map_err(|err| ErrorKind::Other(err.to_string()))?;
+        let state_part = self.runtime_adapter.obtain_state_part(&state_root, part_id, num_parts);
 
-        Ok(ShardStateSyncResponsePart { state_part, proof })
+        Ok(state_part)
     }
 
     pub fn set_state_header(
@@ -1189,6 +1198,7 @@ impl Chain {
             prev_chunk_proof,
             incoming_receipts_proofs,
             root_proofs,
+            state_root_node,
         } = &shard_state_header;
 
         // 1-2. Checking chunk validity
@@ -1333,6 +1343,18 @@ impl Chain {
             .into());
         }
 
+        // 5. Checking that state_root_node is valid
+        if !self
+            .runtime_adapter
+            .validate_state_root_node(state_root_node, &chunk.header.inner.prev_state_root)
+        {
+            byzantine_assert!(false);
+            return Err(ErrorKind::Other(
+                "set_shard_state failed: state_root_node is invalid".into(),
+            )
+            .into());
+        }
+
         // Saving the header data.
         let mut store_update = self.store.owned_store().store_update();
         let key = StateHeaderKey(shard_id, sync_hash).try_to_vec()?;
@@ -1361,14 +1383,26 @@ impl Chain {
         &mut self,
         shard_id: ShardId,
         sync_hash: CryptoHash,
-        part: ShardStateSyncResponsePart,
+        part_id: u64,
+        num_parts: u64,
+        data: &Vec<u8>,
     ) -> Result<(), Error> {
         let shard_state_header = self.get_received_state_header(shard_id, sync_hash)?;
         let ShardStateSyncResponseHeader { chunk, .. } = shard_state_header;
-        let state_root = &chunk.header.inner.prev_state_root;
-        self.runtime_adapter
-            .accept_state_part(state_root, &part.state_part, &part.proof)
-            .map_err(|_| ErrorKind::InvalidStatePayload)?;
+        let state_root = chunk.header.inner.prev_state_root;
+        if !self.runtime_adapter.validate_state_part(&state_root, part_id, num_parts, data) {
+            byzantine_assert!(false);
+            return Err(ErrorKind::Other(
+                "set_state_part failed: validate_state_part failed".into(),
+            )
+            .into());
+        }
+
+        // Saving the part data.
+        let mut store_update = self.store.owned_store().store_update();
+        let key = StatePartKey(shard_id, part_id, state_root).try_to_vec()?;
+        store_update.set_ser(COL_STATE_PARTS, &key, data)?;
+        store_update.commit()?;
         Ok(())
     }
 
@@ -1376,8 +1410,21 @@ impl Chain {
         &mut self,
         shard_id: ShardId,
         sync_hash: CryptoHash,
+        num_parts: u64,
     ) -> Result<(), Error> {
         let shard_state_header = self.get_received_state_header(shard_id, sync_hash)?;
+        let mut height = shard_state_header.chunk.header.height_included;
+        let state_root = shard_state_header.chunk.header.inner.prev_state_root.clone();
+        let mut parts = vec![];
+        for i in 0..num_parts {
+            let key = StatePartKey(shard_id, i, state_root.clone()).try_to_vec()?;
+            parts.push(self.store.owned_store().get_ser(COL_STATE_PARTS, &key)?.unwrap());
+        }
+
+        // Confirm that state matches the parts we received
+        self.runtime_adapter.confirm_state(&state_root, &parts)?;
+
+        // Applying the chunk starts here
         let mut chain_update = ChainUpdate::new(
             &mut self.store,
             self.runtime_adapter.clone(),
@@ -1385,8 +1432,8 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
-        let mut height = shard_state_header.chunk.header.height_included;
         chain_update.set_state_finalize(shard_id, sync_hash, shard_state_header)?;
         chain_update.commit()?;
 
@@ -1401,6 +1448,7 @@ impl Chain {
                 &self.blocks_with_missing_chunks,
                 self.transaction_validity_period,
                 self.epoch_length,
+                self.gas_price_adjustment_rate,
             );
             // Result of successful execution of set_state_finalize_on_height is bool,
             // should we commit and continue or stop.
@@ -1413,6 +1461,22 @@ impl Chain {
         Ok(())
     }
 
+    pub fn clear_downloaded_parts(
+        &mut self,
+        shard_id: ShardId,
+        sync_hash: CryptoHash,
+        num_parts: u64,
+    ) -> Result<(), Error> {
+        let shard_state_header = self.get_received_state_header(shard_id, sync_hash)?;
+        let state_root = shard_state_header.chunk.header.inner.prev_state_root.clone();
+        let mut store_update = self.store.owned_store().store_update();
+        for part_id in 0..num_parts {
+            let key = StatePartKey(shard_id, part_id, state_root).try_to_vec()?;
+            store_update.delete(COL_STATE_PARTS, &key);
+        }
+        Ok(store_update.commit()?)
+    }
+
     /// Apply transactions in chunks for the next epoch in blocks that were blocked on the state sync
     pub fn catchup_blocks<F, F2, F3>(
         &mut self,
@@ -1442,6 +1506,7 @@ impl Chain {
             &self.blocks_with_missing_chunks,
             self.transaction_validity_period,
             self.epoch_length,
+            self.gas_price_adjustment_rate,
         );
         chain_update.apply_chunks(me, &block, &prev_block, ApplyChunksMode::NextEpoch)?;
         chain_update.commit()?;
@@ -1472,6 +1537,7 @@ impl Chain {
                     &self.blocks_with_missing_chunks,
                     self.transaction_validity_period,
                     self.epoch_length,
+                    self.gas_price_adjustment_rate,
                 );
 
                 chain_update.apply_chunks(me, &block, &prev_block, ApplyChunksMode::NextEpoch)?;
@@ -1771,6 +1837,7 @@ pub struct ChainUpdate<'a> {
     blocks_with_missing_chunks: &'a OrphanBlockPool,
     transaction_validity_period: BlockIndex,
     epoch_length: BlockIndex,
+    gas_price_adjustment_rate: u8,
 }
 
 impl<'a> ChainUpdate<'a> {
@@ -1781,6 +1848,7 @@ impl<'a> ChainUpdate<'a> {
         blocks_with_missing_chunks: &'a OrphanBlockPool,
         transaction_validity_period: BlockIndex,
         epoch_length: BlockIndex,
+        gas_price_adjustment_rate: u8,
     ) -> Self {
         let chain_store_update: ChainStoreUpdate = store.store_update();
         ChainUpdate {
@@ -1790,6 +1858,7 @@ impl<'a> ChainUpdate<'a> {
             blocks_with_missing_chunks,
             transaction_validity_period,
             epoch_length,
+            gas_price_adjustment_rate,
         }
     }
 
@@ -1979,6 +2048,7 @@ impl<'a> ChainUpdate<'a> {
                 &prev_chunk.transactions,
                 &prev_chunk.header.inner.validator_proposals,
                 prev_block.header.inner.gas_price,
+                prev_chunk.header.inner.gas_limit,
                 &challenges_result,
                 true,
             )
@@ -2105,6 +2175,7 @@ impl<'a> ChainUpdate<'a> {
                             &chunk.transactions,
                             &chunk.header.inner.validator_proposals,
                             block.header.inner.gas_price,
+                            chunk.header.inner.gas_limit,
                             &block.header.inner.challenges_result,
                         )
                         .map_err(|e| ErrorKind::Other(e.to_string()))?;
@@ -2163,6 +2234,7 @@ impl<'a> ChainUpdate<'a> {
                             &vec![],
                             &new_extra.validator_proposals,
                             block.header.inner.gas_price,
+                            new_extra.gas_limit,
                             &block.header.inner.challenges_result,
                         )
                         .map_err(|e| ErrorKind::Other(e.to_string()))?;
@@ -2204,6 +2276,7 @@ impl<'a> ChainUpdate<'a> {
         let prev = self.get_previous_header(&block.header)?;
         let prev_hash = prev.hash();
         let prev_prev_hash = prev.inner.prev_hash;
+        let prev_gas_price = prev.inner.gas_price;
 
         // Block is an orphan if we do not know about the previous full block.
         if !is_next && !self.chain_store_update.block_exists(&prev_hash)? {
@@ -2236,8 +2309,7 @@ impl<'a> ChainUpdate<'a> {
         self.process_header_for_block(&block.header, provenance, on_challenge)?;
 
         for approval in block.header.inner.approvals.iter() {
-            let fg = FinalityGadget {};
-            fg.process_approval(me, approval, &mut self.chain_store_update)?;
+            FinalityGadget::process_approval(me, approval, &mut self.chain_store_update)?;
         }
 
         // We need to know the last approval on the previous block to later compute the reference
@@ -2256,6 +2328,11 @@ impl<'a> ChainUpdate<'a> {
             return Err(ErrorKind::Other("Invalid block".into()).into());
         }
 
+        if !block.verify_gas_price(prev_gas_price, self.gas_price_adjustment_rate) {
+            byzantine_assert!(false);
+            return Err(ErrorKind::InvalidGasPrice.into());
+        }
+
         let prev_block = self.chain_store_update.get_block(&prev_hash)?.clone();
 
         self.ping_missing_chunks(me, prev_hash, &block)?;
@@ -2297,10 +2374,17 @@ impl<'a> ChainUpdate<'a> {
         }
 
         // If block checks out, record validator proposals for given block.
+        let last_quorum_pre_commit = &block.header.inner.last_quorum_pre_commit;
+        let last_finalized_height = if last_quorum_pre_commit == &CryptoHash::default() {
+            0
+        } else {
+            self.chain_store_update.get_block_header(last_quorum_pre_commit)?.inner.height
+        };
         self.runtime_adapter.add_validator_proposals(
             block.header.inner.prev_hash,
             block.hash(),
             block.header.inner.height,
+            last_finalized_height,
             block.header.inner.validator_proposals.clone(),
             block.header.inner.challenges_result.clone(),
             block.header.inner.chunk_mask.clone(),
@@ -2420,7 +2504,6 @@ impl<'a> ChainUpdate<'a> {
         // producer, confirmation signatures to check that total weight is correct.
         if *provenance != Provenance::PRODUCED {
             // first verify aggregated signature
-            let prev_header = self.get_previous_header(header)?.clone();
             if !self.runtime_adapter.verify_approval_signature(
                 &prev_header.inner.epoch_id,
                 &prev_header.hash,
@@ -2433,6 +2516,24 @@ impl<'a> ChainUpdate<'a> {
             if weight != header.inner.total_weight {
                 return Err(ErrorKind::InvalidBlockWeight.into());
             }
+
+            let quorums = Chain::compute_quorums(
+                header.inner.prev_hash,
+                header.inner.epoch_id.clone(),
+                header.inner.height,
+                header.inner.approvals.clone(),
+                self.runtime_adapter
+                    .get_epoch_block_producers(&header.inner.epoch_id, &header.inner.prev_hash)?
+                    .len(),
+                &*self.runtime_adapter,
+                &mut self.chain_store_update,
+            )?;
+
+            if header.inner.last_quorum_pre_commit != quorums.last_quorum_pre_commit
+                || header.inner.last_quorum_pre_vote != quorums.last_quorum_pre_vote
+            {
+                return Err(ErrorKind::InvalidFinalityInfo.into());
+            }
         }
 
         Ok(())
@@ -2444,7 +2545,7 @@ impl<'a> ChainUpdate<'a> {
         header: &BlockHeader,
     ) -> Result<Option<Tip>, Error> {
         let header_head = self.chain_store_update.header_head()?;
-        if header.inner.total_weight > header_head.total_weight {
+        if header.inner.weight_and_score() > header_head.weight_and_score {
             let tip = Tip::from_header(header);
             self.chain_store_update.save_header_head_if_not_challenged(&tip)?;
             debug!(target: "chain", "Header head updated to {} at {}", tip.last_block_hash, tip.height);
@@ -2462,7 +2563,7 @@ impl<'a> ChainUpdate<'a> {
         // if we made a fork with more weight than the head (which should also be true
         // when extending the head), update it
         let head = self.chain_store_update.head()?;
-        if block.header.inner.total_weight > head.total_weight {
+        if block.header.inner.weight_and_score() > head.weight_and_score {
             let tip = Tip::from_header(&block.header);
 
             self.chain_store_update.save_body_head(&tip)?;
@@ -2635,19 +2736,14 @@ impl<'a> ChainUpdate<'a> {
             prev_chunk_proof: _,
             incoming_receipts_proofs,
             root_proofs: _,
+            state_root_node: _,
         } = shard_state_header;
-        let state_root = &chunk.header.inner.prev_state_root;
 
         let block_header = self
             .chain_store_update
             .get_header_on_chain_by_height(&sync_hash, chunk.header.height_included)?
             .clone();
 
-        // Applying chunk starts here.
-
-        // Confirm that state matches the parts we received.
-        self.runtime_adapter.confirm_state(&state_root)?;
-
         // Getting actual incoming receipts.
         let mut receipt_proof_response: Vec<ReceiptProofResponse> = vec![];
         for incoming_receipt_proof in incoming_receipts_proofs.iter() {
@@ -2671,6 +2767,7 @@ impl<'a> ChainUpdate<'a> {
             &chunk.transactions,
             &chunk.header.inner.validator_proposals,
             block_header.inner.gas_price,
+            chunk.header.inner.gas_limit,
             &block_header.inner.challenges_result,
         )?;
 
@@ -2749,6 +2846,7 @@ impl<'a> ChainUpdate<'a> {
             &vec![],
             &chunk_extra.validator_proposals,
             block_header.inner.gas_price,
+            chunk_extra.gas_limit,
             &block_header.inner.challenges_result,
         )?;
 
diff --git a/chain/chain/src/error.rs b/chain/chain/src/error.rs
index eb884ad0af0..5c4a22e0bba 100644
--- a/chain/chain/src/error.rs
+++ b/chain/chain/src/error.rs
@@ -100,6 +100,9 @@ pub enum ErrorKind {
     /// Invalid epoch hash
     #[fail(display = "Invalid Epoch Hash")]
     InvalidEpochHash,
+    /// Invalid quorum_pre_vote or quorum_pre_commit
+    #[fail(display = "Invalid Finality Info")]
+    InvalidFinalityInfo,
     /// Invalid validator proposals in the block.
     #[fail(display = "Invalid Validator Proposals")]
     InvalidValidatorProposals,
@@ -109,6 +112,24 @@ pub enum ErrorKind {
     /// Invalid Approvals
     #[fail(display = "Invalid Approvals")]
     InvalidApprovals,
+    /// Invalid Gas Limit
+    #[fail(display = "Invalid Gas Limit")]
+    InvalidGasLimit,
+    /// Invalid Gas Limit
+    #[fail(display = "Invalid Gas Price")]
+    InvalidGasPrice,
+    /// Invalid Gas Used
+    #[fail(display = "Invalid Gas Used")]
+    InvalidGasUsed,
+    /// Invalid Rent Paid
+    #[fail(display = "Invalid Rent Paid")]
+    InvalidRent,
+    /// Invalid Validator Reward
+    #[fail(display = "Invalid Validator Reward")]
+    InvalidReward,
+    /// Invalid Balance Burnt
+    #[fail(display = "Invalid Balance Burnt")]
+    InvalidBalanceBurnt,
     /// Validator error.
     #[fail(display = "Validator Error: {}", _0)]
     ValidatorError(String),
@@ -199,9 +220,16 @@ impl Error {
             | ErrorKind::MaliciousChallenge
             | ErrorKind::IncorrectNumberOfChunkHeaders
             | ErrorKind::InvalidEpochHash
+            | ErrorKind::InvalidFinalityInfo
             | ErrorKind::InvalidValidatorProposals
             | ErrorKind::InvalidSignature
-            | ErrorKind::InvalidApprovals => true,
+            | ErrorKind::InvalidApprovals
+            | ErrorKind::InvalidGasLimit
+            | ErrorKind::InvalidGasPrice
+            | ErrorKind::InvalidGasUsed
+            | ErrorKind::InvalidReward
+            | ErrorKind::InvalidBalanceBurnt
+            | ErrorKind::InvalidRent => true,
         }
     }
 
diff --git a/chain/chain/src/finality.rs b/chain/chain/src/finality.rs
index f27f5163b15..8110679ed5f 100644
--- a/chain/chain/src/finality.rs
+++ b/chain/chain/src/finality.rs
@@ -2,9 +2,13 @@ use crate::error::{Error, ErrorKind};
 use crate::{ChainStoreAccess, ChainStoreUpdate};
 use near_primitives::block::{Approval, BlockHeader, BlockHeaderInner, Weight};
 use near_primitives::hash::CryptoHash;
-use near_primitives::types::{AccountId, BlockIndex};
+use near_primitives::types::{AccountId, BlockIndex, EpochId};
 use std::collections::{HashMap, HashSet};
 
+// How many blocks back to search for a new reference hash when the chain switches and the block
+//     producer cannot use the same reference hash as the last approval on chain
+const REFERENCE_HASH_LOOKUP_DEPTH: usize = 10;
+
 #[derive(Clone, Eq, PartialEq, Debug)]
 pub struct FinalityGadgetQuorums {
     pub last_quorum_pre_vote: CryptoHash,
@@ -18,7 +22,6 @@ pub struct FinalityGadget {}
 
 impl FinalityGadget {
     pub fn process_approval(
-        &self,
         me: &Option<AccountId>,
         approval: &Approval,
         chain_store_update: &mut ChainStoreUpdate,
@@ -62,7 +65,6 @@ impl FinalityGadget {
     }
 
     pub fn verify_approval_conditions(
-        &mut self,
         _approval: &Approval,
         _chain_store: &mut dyn ChainStoreAccess,
     ) -> Result<(), ApprovalVerificationError> {
@@ -71,7 +73,6 @@ impl FinalityGadget {
     }
 
     pub fn get_my_approval_reference_hash(
-        &self,
         prev_hash: CryptoHash,
         chain_store: &mut dyn ChainStoreAccess,
     ) -> Option<CryptoHash> {
@@ -101,7 +102,7 @@ impl FinalityGadget {
         let last_approval_on_chain =
             chain_store.get_my_last_approval(&prev_prev_hash).ok().cloned();
 
-        self.get_my_approval_reference_hash_inner(
+        FinalityGadget::get_my_approval_reference_hash_inner(
             prev_hash,
             last_approval_on_chain,
             largest_weight_approved,
@@ -111,7 +112,6 @@ impl FinalityGadget {
     }
 
     pub fn get_my_approval_reference_hash_inner(
-        &self,
         prev_hash: CryptoHash,
         last_approval_on_chain: Option<Approval>,
         largest_weight_approved: Weight,
@@ -121,15 +121,24 @@ impl FinalityGadget {
         let default_f = |chain_store: &mut dyn ChainStoreAccess| match chain_store
             .get_block_header(&prev_hash)
         {
-            Ok(header) => {
-                if header.inner.total_weight > largest_weight_approved
-                    && (header.inner.score > largest_score_approved
-                        || largest_score_approved == 0.into())
-                {
-                    Some(prev_hash)
-                } else {
-                    None
+            Ok(mut header) => {
+                let mut candidate = None;
+                // Get the reference_hash up to `REFERENCE_HASH_LOOKUP_DEPTH` blocks into the past
+                for _ in 0..REFERENCE_HASH_LOOKUP_DEPTH {
+                    if header.inner.total_weight > largest_weight_approved
+                        && header.inner.score >= largest_score_approved
+                    {
+                        candidate = Some(header.hash());
+                        let prev_hash = header.inner.prev_hash;
+                        match chain_store.get_block_header(&prev_hash) {
+                            Ok(new_header) => header = new_header,
+                            Err(_) => break,
+                        }
+                    } else {
+                        break;
+                    }
                 }
+                return candidate;
             }
             Err(_) => None,
         };
@@ -162,8 +171,8 @@ impl FinalityGadget {
     }
 
     pub fn compute_quorums(
-        &self,
         mut prev_hash: CryptoHash,
+        epoch_id: EpochId,
         mut height: BlockIndex,
         mut approvals: Vec<Approval>,
         chain_store: &mut dyn ChainStoreAccess,
@@ -248,6 +257,21 @@ impl FinalityGadget {
             approvals = last_block_header.inner.approvals.clone();
             height = last_block_header.inner.height;
 
+            if last_block_header.inner.epoch_id != epoch_id {
+                // Do not cross the epoch boundary. It is safe to get the last quorums from the last
+                //     block of the previous epoch, since no approval in the current epoch could
+                //     have finalized anything else in the previous epoch (they would exit here),
+                //     and if anything was finalized / had a prevote in this epoch, it would have
+                //     been found in previous iterations of the surrounding loop
+                if quorum_pre_vote.is_none() {
+                    quorum_pre_vote = Some(last_block_header.inner.last_quorum_pre_vote);
+                }
+                if quorum_pre_commit.is_none() {
+                    quorum_pre_commit = Some(last_block_header.inner.last_quorum_pre_commit);
+                }
+                break;
+            }
+
             // Move `highest_height_no_quorum` if needed
             while accounts_surrounding_no_quroum > total_block_producers * 2 / 3 {
                 accounts_surrounding_no_quroum -= height_to_accounts_to_remove
diff --git a/chain/chain/src/store.rs b/chain/chain/src/store.rs
index 854ab2352a2..769c0f5b772 100644
--- a/chain/chain/src/store.rs
+++ b/chain/chain/src/store.rs
@@ -465,6 +465,14 @@ impl ChainStore {
         }
         Err(InvalidTxError::Expired)
     }
+
+    pub fn get_block_height(&mut self, hash: &CryptoHash) -> Result<BlockIndex, Error> {
+        if hash == &CryptoHash::default() {
+            Ok(0)
+        } else {
+            Ok(self.get_block_header(hash)?.inner.height)
+        }
+    }
 }
 
 impl ChainStoreAccess for ChainStore {
diff --git a/chain/chain/src/test_utils.rs b/chain/chain/src/test_utils.rs
index 89cbe75ecfb..522c392d88c 100644
--- a/chain/chain/src/test_utils.rs
+++ b/chain/chain/src/test_utils.rs
@@ -12,7 +12,6 @@ use near_primitives::account::Account;
 use near_primitives::challenge::ChallengesResult;
 use near_primitives::errors::RuntimeError;
 use near_primitives::hash::{hash, CryptoHash};
-use near_primitives::merkle::{merklize, verify_path, MerklePath};
 use near_primitives::receipt::{ActionReceipt, Receipt, ReceiptEnum};
 use near_primitives::serialize::to_base;
 use near_primitives::sharding::ShardChunkHeader;
@@ -21,9 +20,10 @@ use near_primitives::transaction::{
     TransferAction,
 };
 use near_primitives::types::{
-    AccountId, Balance, BlockIndex, EpochId, MerkleHash, Nonce, ShardId, StateRoot, ValidatorStake,
+    AccountId, Balance, BlockIndex, EpochId, Gas, Nonce, ShardId, StateRoot, StateRootNode,
+    ValidatorStake,
 };
-use near_primitives::views::QueryResponse;
+use near_primitives::views::{EpochValidatorInfo, QueryResponse};
 use near_store::test_utils::create_test_store;
 use near_store::{
     PartialStorage, Store, StoreUpdate, Trie, TrieChanges, WrappedTrieChanges, COL_BLOCK_HEADER,
@@ -32,14 +32,12 @@ use near_store::{
 use crate::error::{Error, ErrorKind};
 use crate::store::ChainStoreAccess;
 use crate::types::{
-    ApplyTransactionResult, BlockHeader, RuntimeAdapter, StatePart, StatePartKey,
-    ValidatorSignatureVerificationResult, Weight,
+    ApplyTransactionResult, BlockHeader, RuntimeAdapter, ValidatorSignatureVerificationResult,
+    Weight,
 };
 use crate::{Chain, ChainGenesis, ValidTransaction};
 use near_primitives::block::Approval;
 
-pub const DEFAULT_STATE_NUM_PARTS: u64 = 17; /* TODO MOO */
-
 #[derive(BorshSerialize, BorshDeserialize, Hash, PartialEq, Eq, Ord, PartialOrd, Clone, Debug)]
 struct AccountNonce(AccountId, Nonce);
 
@@ -61,9 +59,8 @@ pub struct KeyValueRuntime {
     epoch_length: u64,
 
     // A mapping state_root => {account id => amounts}, for transactions and receipts
-    state: RwLock<HashMap<MerkleHash, KVState>>,
-    state_parts: RwLock<HashMap<CryptoHash, StatePart>>,
-    state_proofs: RwLock<HashMap<CryptoHash, MerklePath>>,
+    state: RwLock<HashMap<StateRoot, KVState>>,
+    state_size: RwLock<HashMap<StateRoot, u64>>,
 
     headers_cache: RwLock<HashMap<CryptoHash, BlockHeader>>,
     hash_to_epoch: RwLock<HashMap<CryptoHash, EpochId>>,
@@ -114,17 +111,18 @@ impl KeyValueRuntime {
         map_with_default_hash3.insert(EpochId::default(), 0);
 
         let mut state = HashMap::new();
-        state.insert(
-            MerkleHash::default(),
-            KVState {
-                amounts: initial_amounts,
-                receipt_nonces: HashSet::default(),
-                tx_nonces: HashSet::default(),
-            },
-        );
-        // TODO MOO initializing for StateRoot::default()?
-        let state_parts = HashMap::new();
-        let state_proofs = HashMap::new();
+        let kv_state = KVState {
+            amounts: initial_amounts,
+            receipt_nonces: HashSet::default(),
+            tx_nonces: HashSet::default(),
+        };
+        let mut state_size = HashMap::new();
+        let data = kv_state.try_to_vec().unwrap();
+        let data_len = data.len() as u64;
+        // StateRoot is actually faked here.
+        // We cannot do any reasonable validations of it in test_utils.
+        state.insert(StateRoot::default(), kv_state);
+        state_size.insert(StateRoot::default(), data_len);
         KeyValueRuntime {
             store,
             trie,
@@ -147,8 +145,7 @@ impl KeyValueRuntime {
             num_shards,
             epoch_length,
             state: RwLock::new(state),
-            state_parts: RwLock::new(state_parts),
-            state_proofs: RwLock::new(state_proofs),
+            state_size: RwLock::new(state_size),
             headers_cache: RwLock::new(HashMap::new()),
             hash_to_epoch: RwLock::new(HashMap::new()),
             hash_to_next_epoch: RwLock::new(map_with_default_hash1),
@@ -158,7 +155,7 @@ impl KeyValueRuntime {
     }
 
     pub fn get_root(&self) -> CryptoHash {
-        self.root.hash
+        self.root
     }
 
     fn get_block_header(&self, hash: &CryptoHash) -> Result<Option<BlockHeader>, Error> {
@@ -261,12 +258,7 @@ impl RuntimeAdapter for KeyValueRuntime {
     fn genesis_state(&self) -> (StoreUpdate, Vec<StateRoot>) {
         (
             self.store.store_update(),
-            ((0..self.num_shards())
-                .map(|_| StateRoot {
-                    hash: CryptoHash::default(),
-                    num_parts: DEFAULT_STATE_NUM_PARTS, /* TODO MOO */
-                })
-                .collect()),
+            ((0..self.num_shards()).map(|_| StateRoot::default()).collect()),
         )
     }
 
@@ -445,7 +437,8 @@ impl RuntimeAdapter for KeyValueRuntime {
         &self,
         _block_index: u64,
         _block_timestamp: u64,
-        _gas_price: u128,
+        _gas_price: Balance,
+        _gas_limit: Gas,
         _state_root: StateRoot,
         transactions: Vec<SignedTransaction>,
     ) -> Vec<SignedTransaction> {
@@ -468,6 +461,7 @@ impl RuntimeAdapter for KeyValueRuntime {
         _parent_hash: CryptoHash,
         _current_hash: CryptoHash,
         _block_index: u64,
+        _last_finalized_height: u64,
         _proposals: Vec<ValidatorStake>,
         _slashed_validators: Vec<AccountId>,
         _validator_mask: Vec<bool>,
@@ -490,13 +484,14 @@ impl RuntimeAdapter for KeyValueRuntime {
         transactions: &[SignedTransaction],
         _last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        _gas_limit: Gas,
         _challenges: &ChallengesResult,
         generate_storage_proof: bool,
     ) -> Result<ApplyTransactionResult, Error> {
         assert!(!generate_storage_proof);
         let mut tx_results = vec![];
 
-        let mut state = self.state.read().unwrap().get(&state_root.hash).cloned().unwrap();
+        let mut state = self.state.read().unwrap().get(&state_root).cloned().unwrap();
 
         let mut balance_transfers = vec![];
 
@@ -629,34 +624,17 @@ impl RuntimeAdapter for KeyValueRuntime {
         }
 
         let data = state.try_to_vec()?;
-        let state_num_parts = DEFAULT_STATE_NUM_PARTS as usize;
-        let mut parts = vec![];
-        for i in 0..state_num_parts {
-            let begin = data.len() / state_num_parts * i;
-            let mut end = data.len() / state_num_parts * (i + 1);
-            if i + 1 == state_num_parts {
-                end = data.len();
-            }
-            let part = StatePart { shard_id, part_id: i as u64, data: data[begin..end].to_vec() };
-            parts.push(part);
-        }
-        let (state_hash, proofs) = merklize(&parts);
-        let new_state_root = StateRoot { hash: state_hash, num_parts: state_num_parts as u64 };
-
-        self.state.write().unwrap().insert(new_state_root.hash, state);
-        for i in 0..state_num_parts {
-            let key = hash(&StatePartKey(i as u64, new_state_root.clone()).try_to_vec().unwrap());
-            assert!(verify_path(new_state_root.hash, &proofs[i], &parts[i]));
-            self.state_parts.write().unwrap().insert(key, parts[i].clone());
-            self.state_proofs.write().unwrap().insert(key, proofs[i].clone());
-        }
+        let state_size = data.len() as u64;
+        let state_root = hash(&data);
+        self.state.write().unwrap().insert(state_root.clone(), state);
+        self.state_size.write().unwrap().insert(state_root.clone(), state_size);
 
         Ok(ApplyTransactionResult {
             trie_changes: WrappedTrieChanges::new(
                 self.trie.clone(),
-                TrieChanges::empty(state_root.hash),
+                TrieChanges::empty(state_root),
             ),
-            new_root: new_state_root,
+            new_root: state_root,
             outcomes: tx_results,
             receipt_result: new_receipts,
             validator_proposals: vec![],
@@ -681,6 +659,7 @@ impl RuntimeAdapter for KeyValueRuntime {
         _transactions: &[SignedTransaction],
         _last_validator_proposals: &[ValidatorStake],
         _gas_price: Balance,
+        _gas_limit: Gas,
         _challenges: &ChallengesResult,
     ) -> Result<ApplyTransactionResult, Error> {
         unimplemented!();
@@ -703,7 +682,7 @@ impl RuntimeAdapter for KeyValueRuntime {
                     .state
                     .read()
                     .unwrap()
-                    .get(&state_root.hash)
+                    .get(&state_root)
                     .map_or_else(|| 0, |state| *state.amounts.get(&account_id2).unwrap_or(&0)),
                 locked: 0,
                 code_hash: CryptoHash::default(),
@@ -714,60 +693,67 @@ impl RuntimeAdapter for KeyValueRuntime {
         ))
     }
 
-    fn obtain_state_part(
-        &self,
-        shard_id: ShardId,
-        part_id: u64,
-        state_root: &StateRoot,
-    ) -> Result<(StatePart, Vec<u8>), Box<dyn std::error::Error>> {
-        if part_id >= state_root.num_parts {
-            return Err("Invalid part_id in obtain_state_part".to_string().into());
-        }
-        if shard_id >= self.num_shards() {
-            return Err("Invalid shard_id in obtain_state_part".to_string().into());
+    fn obtain_state_part(&self, state_root: &StateRoot, part_id: u64, num_parts: u64) -> Vec<u8> {
+        assert!(part_id < num_parts);
+        let state = self.state.read().unwrap().get(&state_root).unwrap().clone();
+        let data = state.try_to_vec().expect("should never fall");
+        let state_size = data.len() as u64;
+        let begin = state_size / num_parts * part_id;
+        let mut end = state_size / num_parts * (part_id + 1);
+        if part_id + 1 == num_parts {
+            end = state_size;
         }
-        let key = hash(&StatePartKey(part_id, state_root.clone()).try_to_vec().unwrap());
-        let part = self.state_parts.read().unwrap().get(&key).unwrap().clone();
-        let proof = self.state_proofs.read().unwrap().get(&key).unwrap().clone();
-        assert!(verify_path(state_root.hash, &proof, &part));
-        Ok((part, proof.try_to_vec()?))
+        data[begin as usize..end as usize].to_vec()
     }
 
-    fn accept_state_part(
+    fn validate_state_part(
         &self,
-        state_root: &StateRoot,
-        part: &StatePart,
-        proof: &Vec<u8>,
-    ) -> Result<(), Box<dyn std::error::Error>> {
-        let merkle_proof = MerklePath::try_from_slice(&proof)?;
-        if !verify_path(state_root.hash, &merkle_proof, part) {
-            return Err("set_shard_state failed: invalid StatePart".into());
-        }
-        let key = hash(&StatePartKey(part.part_id, state_root.clone()).try_to_vec().unwrap());
-        self.state_parts.write().unwrap().insert(key, part.clone());
-        self.state_proofs.write().unwrap().insert(key, merkle_proof);
-        Ok(())
+        _state_root: &StateRoot,
+        part_id: u64,
+        num_parts: u64,
+        _data: &Vec<u8>,
+    ) -> bool {
+        assert!(part_id < num_parts);
+        // We do not care about deeper validation in test_utils
+        true
     }
 
-    fn confirm_state(&self, state_root: &StateRoot) -> Result<bool, Error> {
+    fn confirm_state(&self, state_root: &StateRoot, parts: &Vec<Vec<u8>>) -> Result<(), Error> {
         let mut data = vec![];
-        for i in 0..state_root.num_parts as usize {
-            let key = hash(&StatePartKey(i as u64, state_root.clone()).try_to_vec().unwrap());
-            match self.state_parts.read().unwrap().get(&key) {
-                Some(part) => {
-                    data.push(part.data.clone());
-                }
-                None => {
-                    return Err(format!("Invalid accept_state, no part {:?}", i)
-                        .to_string()
-                        .into());
-                }
-            }
+        for part in parts {
+            data.push(part.clone());
         }
         let data_flatten: Vec<u8> = data.iter().flatten().cloned().collect();
         let state = KVState::try_from_slice(&data_flatten).unwrap();
-        self.state.write().unwrap().insert(state_root.hash, state);
-        Ok(true)
+        self.state.write().unwrap().insert(state_root.clone(), state.clone());
+        let data = state.try_to_vec()?;
+        let state_size = data.len() as u64;
+        self.state_size.write().unwrap().insert(state_root.clone(), state_size);
+        Ok(())
+    }
+
+    fn get_state_root_node(&self, state_root: &StateRoot) -> StateRootNode {
+        StateRootNode {
+            data: self
+                .state
+                .read()
+                .unwrap()
+                .get(&state_root)
+                .unwrap()
+                .clone()
+                .try_to_vec()
+                .expect("should never fall"),
+            memory_usage: self.state_size.read().unwrap().get(&state_root).unwrap().clone(),
+        }
+    }
+
+    fn validate_state_root_node(
+        &self,
+        _state_root_node: &StateRootNode,
+        _state_root: &StateRoot,
+    ) -> bool {
+        // We do not care about deeper validation in test_utils
+        true
     }
 
     fn is_next_block_epoch_start(&self, parent_hash: &CryptoHash) -> Result<bool, Error> {
@@ -807,6 +793,22 @@ impl RuntimeAdapter for KeyValueRuntime {
     fn get_epoch_inflation(&self, _epoch_id: &EpochId) -> Result<u128, Error> {
         Ok(0)
     }
+
+    fn get_validator_info(&self, _block_hash: &CryptoHash) -> Result<EpochValidatorInfo, Error> {
+        Ok(EpochValidatorInfo {
+            current_validators: vec![],
+            next_validators: vec![],
+            current_proposals: vec![],
+        })
+    }
+
+    fn push_final_block_back_if_needed(
+        &self,
+        _prev_block: CryptoHash,
+        last_final: CryptoHash,
+    ) -> Result<CryptoHash, Error> {
+        Ok(last_final)
+    }
 }
 
 pub fn setup() -> (Chain, Arc<KeyValueRuntime>, Arc<InMemorySigner>) {
diff --git a/chain/chain/src/types.rs b/chain/chain/src/types.rs
index f0439035d2a..d387de571c1 100644
--- a/chain/chain/src/types.rs
+++ b/chain/chain/src/types.rs
@@ -3,7 +3,7 @@ use std::collections::HashMap;
 use borsh::{BorshDeserialize, BorshSerialize};
 
 use near_crypto::Signature;
-use near_primitives::block::Approval;
+use near_primitives::block::{Approval, WeightAndScore};
 pub use near_primitives::block::{Block, BlockHeader, Weight};
 use near_primitives::challenge::ChallengesResult;
 use near_primitives::errors::RuntimeError;
@@ -13,9 +13,10 @@ use near_primitives::receipt::Receipt;
 use near_primitives::sharding::{ReceiptProof, ShardChunk, ShardChunkHeader};
 use near_primitives::transaction::{ExecutionOutcomeWithId, SignedTransaction};
 use near_primitives::types::{
-    AccountId, Balance, BlockIndex, EpochId, Gas, MerkleHash, ShardId, StateRoot, ValidatorStake,
+    AccountId, Balance, BlockIndex, EpochId, Gas, MerkleHash, ShardId, StateRoot, StateRootNode,
+    ValidatorStake,
 };
-use near_primitives::views::QueryResponse;
+use near_primitives::views::{EpochValidatorInfo, QueryResponse};
 use near_store::{PartialStorage, StoreUpdate, WrappedTrieChanges};
 
 use crate::error::Error;
@@ -33,14 +34,7 @@ pub struct RootProof(pub CryptoHash, pub MerklePath);
 pub struct StateHeaderKey(pub ShardId, pub CryptoHash);
 
 #[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize)]
-pub struct StatePartKey(pub u64, pub StateRoot);
-
-#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize)]
-pub struct StatePart {
-    pub shard_id: ShardId,
-    pub part_id: u64,
-    pub data: Vec<u8>,
-}
+pub struct StatePartKey(pub ShardId, pub u64 /* PartId */, pub StateRoot);
 
 #[derive(Eq, PartialEq, Debug, Clone)]
 pub enum BlockStatus {
@@ -80,8 +74,6 @@ pub struct AcceptedBlock {
     pub hash: CryptoHash,
     pub status: BlockStatus,
     pub provenance: Provenance,
-    pub gas_used: Gas,
-    pub gas_limit: Gas,
 }
 
 /// Information about valid transaction that was processed by chain + runtime.
@@ -164,6 +156,7 @@ pub trait RuntimeAdapter: Send + Sync {
         block_index: BlockIndex,
         block_timestamp: u64,
         gas_price: Balance,
+        gas_limit: Gas,
         state_root: StateRoot,
         transactions: Vec<SignedTransaction>,
     ) -> Vec<SignedTransaction>;
@@ -282,12 +275,19 @@ pub trait RuntimeAdapter: Send + Sync {
     /// Get inflation for a certain epoch
     fn get_epoch_inflation(&self, epoch_id: &EpochId) -> Result<Balance, Error>;
 
+    fn push_final_block_back_if_needed(
+        &self,
+        parent_hash: CryptoHash,
+        last_final_hash: CryptoHash,
+    ) -> Result<CryptoHash, Error>;
+
     /// Add proposals for validators.
     fn add_validator_proposals(
         &self,
         parent_hash: CryptoHash,
         current_hash: CryptoHash,
         block_index: BlockIndex,
+        last_finalized_height: BlockIndex,
         proposals: Vec<ValidatorStake>,
         slashed_validators: Vec<AccountId>,
         validator_mask: Vec<bool>,
@@ -310,6 +310,7 @@ pub trait RuntimeAdapter: Send + Sync {
         transactions: &[SignedTransaction],
         last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        gas_limit: Gas,
         challenges_result: &ChallengesResult,
     ) -> Result<ApplyTransactionResult, Error> {
         self.apply_transactions_with_optional_storage_proof(
@@ -323,6 +324,7 @@ pub trait RuntimeAdapter: Send + Sync {
             transactions,
             last_validator_proposals,
             gas_price,
+            gas_limit,
             challenges_result,
             false,
         )
@@ -340,6 +342,7 @@ pub trait RuntimeAdapter: Send + Sync {
         transactions: &[SignedTransaction],
         last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        gas_limit: Gas,
         challenges_result: &ChallengesResult,
         generate_storage_proof: bool,
     ) -> Result<ApplyTransactionResult, Error>;
@@ -357,6 +360,7 @@ pub trait RuntimeAdapter: Send + Sync {
         transactions: &[SignedTransaction],
         last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        gas_limit: Gas,
         challenges_result: &ChallengesResult,
     ) -> Result<ApplyTransactionResult, Error>;
 
@@ -371,29 +375,35 @@ pub trait RuntimeAdapter: Send + Sync {
         data: &[u8],
     ) -> Result<QueryResponse, Box<dyn std::error::Error>>;
 
-    /// Get the part of the state from given state root + proof.
-    fn obtain_state_part(
+    fn get_validator_info(&self, block_hash: &CryptoHash) -> Result<EpochValidatorInfo, Error>;
+
+    /// Get the part of the state from given state root.
+    fn obtain_state_part(&self, state_root: &StateRoot, part_id: u64, num_parts: u64) -> Vec<u8>;
+
+    /// Validate state part that expected to be given state root with provided data.
+    /// Returns false if the resulting part doesn't match the expected one.
+    fn validate_state_part(
         &self,
-        shard_id: ShardId,
-        part_id: u64,
         state_root: &StateRoot,
-    ) -> Result<(StatePart, Vec<u8>), Box<dyn std::error::Error>>;
-
-    /// Set state part that expected to be given state root with provided data.
-    /// Returns error if:
-    /// 1. Failed to parse, or
-    /// 2. The proof is invalid, or
-    /// 3. The resulting part doesn't match the expected one.
-    fn accept_state_part(
+        part_id: u64,
+        num_parts: u64,
+        data: &Vec<u8>,
+    ) -> bool;
+
+    /// Should be executed after accepting all the parts to set up a new state.
+    fn confirm_state(&self, state_root: &StateRoot, parts: &Vec<Vec<u8>>) -> Result<(), Error>;
+
+    /// Returns StateRootNode of a state.
+    /// Panics if requested hash is not in storage.
+    /// Never returns Error
+    fn get_state_root_node(&self, state_root: &StateRoot) -> StateRootNode;
+
+    /// Validate StateRootNode of a state.
+    fn validate_state_root_node(
         &self,
+        state_root_node: &StateRootNode,
         state_root: &StateRoot,
-        part: &StatePart,
-        proof: &Vec<u8>,
-    ) -> Result<(), Box<dyn std::error::Error>>;
-
-    /// Should be executed after accepting all the parts.
-    /// Returns `true` if state is set successfully.
-    fn confirm_state(&self, state_root: &StateRoot) -> Result<bool, Error>;
+    ) -> bool;
 
     /// Build receipts hashes.
     fn build_receipts_hashes(&self, receipts: &Vec<Receipt>) -> Result<Vec<CryptoHash>, Error> {
@@ -434,7 +444,7 @@ pub struct Tip {
     /// Previous block
     pub prev_block_hash: CryptoHash,
     /// Total weight on that fork
-    pub total_weight: Weight,
+    pub weight_and_score: WeightAndScore,
     /// Previous epoch id. Used for getting validator info.
     pub epoch_id: EpochId,
 }
@@ -446,7 +456,7 @@ impl Tip {
             height: header.inner.height,
             last_block_hash: header.hash(),
             prev_block_hash: header.inner.prev_hash,
-            total_weight: header.inner.total_weight,
+            weight_and_score: header.inner.weight_and_score(),
             epoch_id: header.inner.epoch_id.clone(),
         }
     }
@@ -460,18 +470,20 @@ pub struct ShardStateSyncResponseHeader {
     pub prev_chunk_proof: Option<MerklePath>,
     pub incoming_receipts_proofs: Vec<ReceiptProofResponse>,
     pub root_proofs: Vec<Vec<RootProof>>,
-}
-
-#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
-pub struct ShardStateSyncResponsePart {
-    pub state_part: StatePart,
-    pub proof: Vec<u8>,
+    pub state_root_node: StateRootNode,
 }
 
 #[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize)]
 pub struct ShardStateSyncResponse {
     pub header: Option<ShardStateSyncResponseHeader>,
-    pub parts: Vec<ShardStateSyncResponsePart>,
+    pub part_ids: Vec<u64>,
+    pub data: Vec<Vec<u8>>,
+}
+
+#[derive(Debug, Clone, PartialEq, Eq, BorshSerialize, BorshDeserialize, Default)]
+pub struct StateRequestParts {
+    pub ids: Vec<u64>,
+    pub num_parts: u64,
 }
 
 #[cfg(test)]
@@ -486,15 +498,10 @@ mod tests {
     #[test]
     fn test_block_produce() {
         let num_shards = 32;
-        let genesis_chunks = genesis_chunks(
-            vec![StateRoot { hash: CryptoHash::default(), num_parts: 9 /* TODO MOO */ }],
-            num_shards,
-            1_000_000,
-        );
+        let genesis_chunks = genesis_chunks(vec![StateRoot::default()], num_shards, 1_000_000);
         let genesis = Block::genesis(
             genesis_chunks.into_iter().map(|chunk| chunk.header).collect(),
             Utc::now(),
-            1_000_000,
             100,
             1_000_000_000,
         );
diff --git a/chain/chain/src/validate.rs b/chain/chain/src/validate.rs
index 40a94fc4b4a..05f040f407b 100644
--- a/chain/chain/src/validate.rs
+++ b/chain/chain/src/validate.rs
@@ -14,6 +14,9 @@ use crate::byzantine_assert;
 use crate::types::{ApplyTransactionResult, ValidatorSignatureVerificationResult};
 use crate::{ChainStore, Error, ErrorKind, RuntimeAdapter};
 
+/// Gas limit cannot be adjusted for more than 0.1% at a time.
+const GAS_LIMIT_ADJUSTMENT_FACTOR: u64 = 1000;
+
 /// Verifies that chunk's proofs in the header match the body.
 pub fn validate_chunk_proofs(
     chunk: &ShardChunk,
@@ -73,6 +76,26 @@ pub fn validate_chunk_with_chunk_extra(
         return Err(ErrorKind::InvalidValidatorProposals.into());
     }
 
+    if prev_chunk_extra.gas_limit != chunk_header.inner.gas_limit {
+        return Err(ErrorKind::InvalidGasLimit.into());
+    }
+
+    if prev_chunk_extra.gas_used != chunk_header.inner.gas_used {
+        return Err(ErrorKind::InvalidGasUsed.into());
+    }
+
+    if prev_chunk_extra.rent_paid != chunk_header.inner.rent_paid {
+        return Err(ErrorKind::InvalidRent.into());
+    }
+
+    if prev_chunk_extra.validator_reward != chunk_header.inner.validator_reward {
+        return Err(ErrorKind::InvalidReward.into());
+    }
+
+    if prev_chunk_extra.balance_burnt != chunk_header.inner.balance_burnt {
+        return Err(ErrorKind::InvalidBalanceBurnt.into());
+    }
+
     let receipt_response = chain_store.get_outgoing_receipts_for_shard(
         *prev_block_hash,
         chunk_header.inner.shard_id,
@@ -85,6 +108,14 @@ pub fn validate_chunk_with_chunk_extra(
         return Err(ErrorKind::InvalidReceiptsProof.into());
     }
 
+    let prev_gas_limit = prev_chunk_extra.gas_limit;
+    if chunk_header.inner.gas_limit < prev_gas_limit - prev_gas_limit / GAS_LIMIT_ADJUSTMENT_FACTOR
+        || chunk_header.inner.gas_limit
+            > prev_gas_limit + prev_gas_limit / GAS_LIMIT_ADJUSTMENT_FACTOR
+    {
+        return Err(ErrorKind::InvalidGasLimit.into());
+    }
+
     Ok(())
 }
 
@@ -233,7 +264,8 @@ fn validate_chunk_state_challenge(
             &chunk_state.prev_chunk.receipts,
             &chunk_state.prev_chunk.transactions,
             &[],
-            0,
+            prev_block_header.inner.gas_price,
+            chunk_state.prev_chunk.header.inner.gas_limit,
             &ChallengesResult::default(),
         )
         .map_err(|_| Error::from(ErrorKind::MaliciousChallenge))?;
diff --git a/chain/chain/tests/finality.rs b/chain/chain/tests/finality.rs
index 6a47cd82501..11da103398d 100644
--- a/chain/chain/tests/finality.rs
+++ b/chain/chain/tests/finality.rs
@@ -2,9 +2,9 @@ use near_chain::test_utils::setup;
 use near_chain::{ChainStore, ChainStoreAccess, ChainStoreUpdate};
 use near_chain::{FinalityGadget, FinalityGadgetQuorums};
 use near_crypto::{Signature, Signer};
-use near_primitives::block::{Approval, Block, Weight};
+use near_primitives::block::{Approval, Block};
 use near_primitives::hash::CryptoHash;
-use near_primitives::types::{AccountId, BlockIndex};
+use near_primitives::types::{AccountId, BlockIndex, EpochId};
 use near_store::test_utils::create_test_store;
 use rand::seq::SliceRandom;
 use rand::Rng;
@@ -99,7 +99,6 @@ fn create_block(
     approvals: Vec<Approval>,
     total_block_producers: usize,
 ) -> Block {
-    let fg = FinalityGadget {};
     let mut block = Block::empty(prev, signer);
     block.header.inner.approvals = approvals.clone();
     block.header.inner.height = height;
@@ -115,10 +114,16 @@ fn create_block(
     let slow_quorums =
         compute_quorums_slow(prev.hash(), approvals.clone(), chain_store, total_block_producers)
             .clone();
-    let fast_quorums = fg
-        .compute_quorums(prev.hash(), height, approvals.clone(), chain_store, total_block_producers)
-        .unwrap()
-        .clone();
+    let fast_quorums = FinalityGadget::compute_quorums(
+        prev.hash(),
+        EpochId(CryptoHash::default()),
+        height,
+        approvals.clone(),
+        chain_store,
+        total_block_producers,
+    )
+    .unwrap()
+    .clone();
 
     block.header.inner.last_quorum_pre_vote = fast_quorums.last_quorum_pre_vote;
     block.header.inner.last_quorum_pre_commit = fast_quorums.last_quorum_pre_commit;
@@ -166,7 +171,6 @@ fn test_finality_genesis() {
 #[test]
 fn test_finality_genesis2() {
     let (mut chain, _, signer) = setup();
-    let fg = FinalityGadget {};
     let total_block_producers = 4;
 
     let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone();
@@ -192,10 +196,16 @@ fn test_finality_genesis2() {
     let slow_quorums =
         compute_quorums_slow(block1.hash(), vec![], chain.mut_store(), total_block_producers)
             .clone();
-    let fast_quorums = fg
-        .compute_quorums(block1.hash(), 2, vec![], chain.mut_store(), total_block_producers)
-        .unwrap()
-        .clone();
+    let fast_quorums = FinalityGadget::compute_quorums(
+        block1.hash(),
+        EpochId(CryptoHash::default()),
+        2,
+        vec![],
+        chain.mut_store(),
+        total_block_producers,
+    )
+    .unwrap()
+    .clone();
 
     assert_eq!(expected_quorums, slow_quorums);
     assert_eq!(expected_quorums, fast_quorums);
@@ -204,7 +214,6 @@ fn test_finality_genesis2() {
 #[test]
 fn test_finality_basic() {
     let (mut chain, _, signer) = setup();
-    let fg = FinalityGadget {};
     let total_block_producers = 4;
 
     let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone();
@@ -244,10 +253,16 @@ fn test_finality_basic() {
     let slow_quorums =
         compute_quorums_slow(block3.hash(), vec![], chain.mut_store(), total_block_producers)
             .clone();
-    let fast_quorums = fg
-        .compute_quorums(block3.hash(), 4, vec![], chain.mut_store(), total_block_producers)
-        .unwrap()
-        .clone();
+    let fast_quorums = FinalityGadget::compute_quorums(
+        block3.hash(),
+        EpochId(CryptoHash::default()),
+        4,
+        vec![],
+        chain.mut_store(),
+        total_block_producers,
+    )
+    .unwrap()
+    .clone();
 
     assert_eq!(expected_quorums, slow_quorums);
     assert_eq!(expected_quorums, fast_quorums);
@@ -256,7 +271,6 @@ fn test_finality_basic() {
 #[test]
 fn test_finality_fewer_approvals_per_block() {
     let (mut chain, _, signer) = setup();
-    let fg = FinalityGadget {};
     let total_block_producers = 4;
 
     let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone();
@@ -316,10 +330,16 @@ fn test_finality_fewer_approvals_per_block() {
     let slow_quorums =
         compute_quorums_slow(block5.hash(), vec![], chain.mut_store(), total_block_producers)
             .clone();
-    let fast_quorums = fg
-        .compute_quorums(block5.hash(), 6, vec![], chain.mut_store(), total_block_producers)
-        .unwrap()
-        .clone();
+    let fast_quorums = FinalityGadget::compute_quorums(
+        block5.hash(),
+        EpochId(CryptoHash::default()),
+        6,
+        vec![],
+        chain.mut_store(),
+        total_block_producers,
+    )
+    .unwrap()
+    .clone();
 
     assert_eq!(expected_quorums, slow_quorums);
     assert_eq!(expected_quorums, fast_quorums);
@@ -329,7 +349,6 @@ fn test_finality_fewer_approvals_per_block() {
 fn test_finality_quorum_precommit_cases() {
     for target in 0..=1 {
         let (mut chain, _, signer) = setup();
-        let fg = FinalityGadget {};
         let total_block_producers = 4;
 
         let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone();
@@ -381,10 +400,16 @@ fn test_finality_quorum_precommit_cases() {
         let slow_quorums =
             compute_quorums_slow(block4.hash(), vec![], chain.mut_store(), total_block_producers)
                 .clone();
-        let fast_quorums = fg
-            .compute_quorums(block4.hash(), 5, vec![], chain.mut_store(), total_block_producers)
-            .unwrap()
-            .clone();
+        let fast_quorums = FinalityGadget::compute_quorums(
+            block4.hash(),
+            EpochId(CryptoHash::default()),
+            5,
+            vec![],
+            chain.mut_store(),
+            total_block_producers,
+        )
+        .unwrap()
+        .clone();
 
         assert_eq!(expected_quorums, slow_quorums);
         assert_eq!(expected_quorums, fast_quorums);
@@ -394,7 +419,6 @@ fn test_finality_quorum_precommit_cases() {
 #[test]
 fn test_my_approvals() {
     let (mut chain, _, signer) = setup();
-    let fg = FinalityGadget {};
     let total_block_producers = 4;
     let account_id = "test".to_string();
 
@@ -441,15 +465,22 @@ fn test_my_approvals() {
         println!("Block {}", i);
 
         let reference_hash =
-            fg.get_my_approval_reference_hash(block.hash(), chain.mut_store()).unwrap();
+            FinalityGadget::get_my_approval_reference_hash(block.hash(), chain.mut_store())
+                .unwrap();
         assert_eq!(reference_hash, expected_reference);
         let approval = Approval::new(block.hash(), reference_hash, &*signer, account_id.clone());
         let mut chain_store_update = ChainStoreUpdate::new(chain.mut_store());
-        fg.process_approval(&Some(account_id.clone()), &approval, &mut chain_store_update).unwrap();
+        FinalityGadget::process_approval(
+            &Some(account_id.clone()),
+            &approval,
+            &mut chain_store_update,
+        )
+        .unwrap();
         chain_store_update.commit().unwrap();
     }
 
-    let reference_hash = fg.get_my_approval_reference_hash(block8.hash(), chain.mut_store());
+    let reference_hash =
+        FinalityGadget::get_my_approval_reference_hash(block8.hash(), chain.mut_store());
     assert!(reference_hash.is_none());
 }
 
@@ -530,176 +561,3 @@ fn test_fuzzy_finality() {
         }
     }
 }
-
-#[test]
-fn test_fuzzy_safety() {
-    for adversaries in vec![false, true] {
-        for (complexity, num_iters) in vec![(10, 100), (20, 100), (50, 10), (100, 2)] {
-            let mut good_iters = 0;
-
-            let block_producers = vec![
-                "test1".to_string(),
-                "test2".to_string(),
-                "test3".to_string(),
-                "test4".to_string(),
-                "test5".to_string(),
-                "test6".to_string(),
-                "test7".to_string(),
-            ];
-            let total_block_producers = block_producers.len();
-
-            for iter in 0..num_iters {
-                println!("Starting iteration {} at complexity {}", iter, complexity);
-                let (mut chain, _, signer) = setup();
-
-                let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone();
-
-                let mut last_final_block_hash = CryptoHash::default();
-                let mut last_final_block_height = 0;
-                let mut largest_weight: HashMap<AccountId, Weight> = HashMap::new();
-                let mut largest_score: HashMap<AccountId, Weight> = HashMap::new();
-                let mut last_approvals: HashMap<CryptoHash, HashMap<AccountId, Approval>> =
-                    HashMap::new();
-
-                let mut all_blocks = vec![genesis_block.clone()];
-                for _i in 0..complexity {
-                    let max_score =
-                        all_blocks.iter().map(|block| block.header.inner.score).max().unwrap();
-                    let prev_block = [
-                        all_blocks.choose(&mut rand::thread_rng()).unwrap().clone(),
-                        all_blocks
-                            .iter()
-                            .filter(|block| block.header.inner.score == max_score)
-                            .collect::<Vec<_>>()
-                            .choose(&mut rand::thread_rng())
-                            .unwrap()
-                            .clone()
-                            .clone(),
-                        all_blocks.last().unwrap().clone(),
-                        all_blocks.last().unwrap().clone(),
-                    ]
-                    .choose(&mut rand::thread_rng())
-                    .unwrap()
-                    .clone();
-                    let mut last_approvals_entry =
-                        last_approvals.get(&prev_block.hash()).unwrap_or(&HashMap::new()).clone();
-                    let mut approvals = vec![];
-                    for (i, block_producer) in block_producers.iter().enumerate() {
-                        if rand::thread_rng().gen::<bool>() {
-                            continue;
-                        }
-
-                        let reference_hash = if i < 2 && adversaries {
-                            // malicious
-                            let prev_reference = if let Some(prev_approval) =
-                                last_approvals_entry.get(block_producer)
-                            {
-                                prev_approval.reference_hash
-                            } else {
-                                genesis_block.hash().clone()
-                            };
-
-                            let mut possible_references = vec![prev_reference];
-                            {
-                                let mut prev_block_hash = prev_block.hash();
-                                for _j in 0..10 {
-                                    if prev_block_hash == prev_reference {
-                                        break;
-                                    }
-                                    possible_references.push(prev_block_hash);
-                                    prev_block_hash = chain
-                                        .mut_store()
-                                        .get_block_header(&prev_block_hash)
-                                        .unwrap()
-                                        .inner
-                                        .prev_hash;
-                                }
-                            }
-
-                            possible_references.choose(&mut rand::thread_rng()).unwrap().clone()
-                        } else {
-                            // honest
-                            let fg = FinalityGadget {};
-                            let old_largest_weight =
-                                *largest_weight.get(block_producer).unwrap_or(&0u128.into());
-                            let old_largest_score =
-                                *largest_score.get(block_producer).unwrap_or(&0u128.into());
-
-                            match fg.get_my_approval_reference_hash_inner(
-                                prev_block.hash(),
-                                last_approvals_entry.get(block_producer).cloned(),
-                                old_largest_weight,
-                                old_largest_score,
-                                chain.mut_store(),
-                            ) {
-                                Some(hash) => hash,
-                                None => continue,
-                            }
-                        };
-
-                        let approval =
-                            apr(block_producer.clone(), reference_hash.clone(), prev_block.hash());
-                        approvals.push(approval.clone());
-                        last_approvals_entry.insert(block_producer.clone(), approval);
-                        largest_weight
-                            .insert(block_producer.clone(), prev_block.header.inner.total_weight);
-                        largest_score.insert(block_producer.clone(), prev_block.header.inner.score);
-                    }
-
-                    let new_block = create_block(
-                        &prev_block,
-                        prev_block.header.inner.height + 1,
-                        chain.mut_store(),
-                        &*signer,
-                        approvals,
-                        total_block_producers,
-                    );
-
-                    let final_block = new_block.header.inner.last_quorum_pre_commit;
-                    if final_block != CryptoHash::default() {
-                        let new_final_block_height =
-                            chain.get_block_header(&final_block).unwrap().inner.height;
-                        if last_final_block_height != 0 {
-                            if new_final_block_height > last_final_block_height {
-                                assert_eq!(
-                                    chain
-                                        .get_header_on_chain_by_height(
-                                            &final_block,
-                                            last_final_block_height
-                                        )
-                                        .unwrap()
-                                        .hash(),
-                                    last_final_block_hash
-                                );
-                            } else if new_final_block_height < last_final_block_height {
-                                assert_eq!(
-                                    chain
-                                        .get_header_on_chain_by_height(
-                                            &last_final_block_hash,
-                                            new_final_block_height
-                                        )
-                                        .unwrap()
-                                        .hash(),
-                                    final_block
-                                );
-                            } else {
-                                assert_eq!(final_block, last_final_block_hash);
-                            }
-                        }
-                        last_final_block_hash = final_block;
-                        last_final_block_height = new_final_block_height;
-                    }
-
-                    last_approvals.insert(new_block.hash().clone(), last_approvals_entry);
-
-                    all_blocks.push(new_block);
-                }
-                if last_final_block_height > 0 {
-                    good_iters += 1;
-                }
-            }
-            println!("Good iterations: {}/{}", good_iters, num_iters);
-            assert!(good_iters > num_iters / 4);
-        }
-    }
-}
diff --git a/chain/client/src/client.rs b/chain/client/src/client.rs
index fb2c9e4dcc3..5727dd02ab0 100644
--- a/chain/client/src/client.rs
+++ b/chain/client/src/client.rs
@@ -27,7 +27,7 @@ use near_primitives::merkle::{merklize, MerklePath};
 use near_primitives::receipt::Receipt;
 use near_primitives::sharding::{EncodedShardChunk, PartialEncodedChunk, ShardChunkHeader};
 use near_primitives::transaction::SignedTransaction;
-use near_primitives::types::{AccountId, BlockIndex, EpochId, ShardId, StateRoot};
+use near_primitives::types::{AccountId, BlockIndex, EpochId, ShardId};
 use near_primitives::unwrap_or_return;
 use near_primitives::utils::to_timestamp;
 use near_primitives::views::{FinalExecutionOutcomeView, QueryResponse};
@@ -278,10 +278,27 @@ impl Client {
             }
         }
 
-        let quorums = self
-            .chain
-            .compute_quorums(prev_hash, next_height, approvals.clone(), total_block_producers)?
-            .clone();
+        // At this point, the previous epoch hash must be available
+        let epoch_id = self
+            .runtime_adapter
+            .get_epoch_id_from_prev_block(&head.last_block_hash)
+            .expect("Epoch hash should exist at this point");
+
+        // Here `total_block_producers` is the number of block producers in the epoch of the previous
+        // block. It would be more correct to pass the number of block producers in the current epoch.
+        // However, in the case when the epochs differ the `compute_quorums` will exit on the very
+        // first iteration without using `total_block_producers`, thus it doesn't affect the
+        // correctness of the computation.
+        let quorums = Chain::compute_quorums(
+            prev_hash,
+            epoch_id.clone(),
+            next_height,
+            approvals.clone(),
+            total_block_producers,
+            &*self.runtime_adapter,
+            self.chain.mut_store(),
+        )?
+        .clone();
 
         let score = if quorums.last_quorum_pre_vote == CryptoHash::default() {
             0.into()
@@ -294,8 +311,7 @@ impl Client {
         let prev_block = self.chain.get_block(&head.last_block_hash)?;
         let mut chunks = prev_block.chunks.clone();
 
-        // TODO (#1675): this assert can currently trigger due to epoch switches not handled properly
-        //assert!(score >= prev_block.header.inner.score);
+        assert!(score >= prev_block.header.inner.score);
 
         // Collect new chunks.
         for (shard_id, mut chunk_header) in new_chunks {
@@ -305,12 +321,6 @@ impl Client {
 
         let prev_header = &prev_block.header;
 
-        // At this point, the previous epoch hash must be available
-        let epoch_id = self
-            .runtime_adapter
-            .get_epoch_id_from_prev_block(&head.last_block_hash)
-            .expect("Epoch hash should exist at this point");
-
         let inflation = if self.runtime_adapter.is_next_block_epoch_start(&head.last_block_hash)? {
             let next_epoch_id =
                 self.runtime_adapter.get_next_epoch_id_from_prev_block(&head.last_block_hash)?;
@@ -418,6 +428,7 @@ impl Client {
             next_height,
             prev_block_timestamp,
             block_header.inner.gas_price,
+            chunk_extra.gas_limit,
             chunk_extra.state_root.clone(),
             transactions,
         );
@@ -973,69 +984,46 @@ impl Client {
         let header =
             unwrap_or_return!(self.chain.head_header(), NetworkClientResponses::NoResponse).clone();
         let path_parts: Vec<&str> = path.split('/').collect();
-        let state_root = {
-            if path_parts[0] == "validators" && path_parts.len() == 1 {
-                // for querying validators we don't need state root
-                StateRoot { hash: CryptoHash::default(), num_parts: 0 }
-            } else {
-                let account_id = AccountId::from(path_parts[1]);
-                let shard_id = self.runtime_adapter.account_id_to_shard_id(&account_id);
-                match self.chain.get_chunk_extra(&header.hash, shard_id) {
-                    Ok(chunk_extra) => chunk_extra.state_root.clone(),
-                    Err(e) => match e.kind() {
-                        ErrorKind::DBNotFoundErr(_) => {
-                            let me = self.block_producer.as_ref().map(|bp| &bp.account_id);
-                            let validator = unwrap_or_return!(
-                                self.find_validator_for_forwarding(shard_id),
-                                {
-                                    warn!(target: "client", "Me: {:?} Dropping query: {:?}", me, path);
-                                    NetworkClientResponses::NoResponse
-                                }
-                            );
-                            // TODO: remove this duplicate code
-                            if let Some(account_id) = me {
-                                if account_id == &validator {
-                                    // this probably means that we are crossing epoch boundary and the current node
-                                    // does not have state for the next epoch. TODO: figure out what to do in this case
-                                    return NetworkClientResponses::NoResponse;
-                                }
-                            }
-                            self.query_requests.cache_set(id.clone(), ());
-                            self.network_adapter.send(NetworkRequests::Query {
-                                account_id: validator,
-                                path,
-                                data,
-                                id,
-                            });
-                            return NetworkClientResponses::RequestRouted;
-                        }
-                        _ => {
-                            warn!(target: "client", "Getting chunk extra failed: {}", e.to_string());
-                            return NetworkClientResponses::NoResponse;
-                        }
-                    },
-                }
-            }
-        };
-
-        let response = unwrap_or_return!(
-            self.runtime_adapter
-                .query(
+        let account_id = AccountId::from(path_parts[1].clone());
+        let shard_id = self.runtime_adapter.account_id_to_shard_id(&account_id);
+        match self.chain.get_chunk_extra(&header.hash, shard_id) {
+            Ok(chunk_extra) => {
+                let state_root = chunk_extra.state_root.clone();
+                if let Ok(response) = self.runtime_adapter.query(
                     &state_root,
                     header.inner.height,
                     header.inner.timestamp,
                     &header.hash,
-                    path_parts,
+                    path_parts.clone(),
                     &data,
-                )
-                .map_err(|err| err.to_string()),
-            {
-                warn!(target: "client", "Query {} failed", path);
-                NetworkClientResponses::NoResponse
+                ) {
+                    return NetworkClientResponses::QueryResponse { response, id };
+                }
             }
-        );
+            Err(e) => match e.kind() {
+                ErrorKind::DBNotFoundErr(_) => {}
+                _ => {
+                    warn!(target: "client", "Getting chunk extra failed: {}", e.to_string());
+                    return NetworkClientResponses::NoResponse;
+                }
+            },
+        }
+
+        // route request
+        let me = self.block_producer.as_ref().map(|bp| &bp.account_id);
+        let validator = unwrap_or_return!(self.find_validator_for_forwarding(shard_id), {
+            warn!(target: "client", "Me: {:?} Dropping query: {:?}", me, path);
+            NetworkClientResponses::NoResponse
+        });
+        self.query_requests.cache_set(id.clone(), ());
+        self.network_adapter.send(NetworkRequests::Query {
+            account_id: validator,
+            path: path.clone(),
+            data: data.clone(),
+            id: id.clone(),
+        });
 
-        NetworkClientResponses::QueryResponse { response, id }
+        NetworkClientResponses::RequestRouted
     }
 
     /// Process transaction and either add it to the mempool or return to redirect to another validator.
diff --git a/chain/client/src/client_actor.rs b/chain/client/src/client_actor.rs
index 26a69a58469..9d21105f48f 100644
--- a/chain/client/src/client_actor.rs
+++ b/chain/client/src/client_actor.rs
@@ -28,7 +28,7 @@ use near_network::{
 };
 use near_primitives::block::GenesisId;
 use near_primitives::hash::CryptoHash;
-use near_primitives::types::{BlockIndex, EpochId, Range};
+use near_primitives::types::{BlockIndex, EpochId};
 use near_primitives::unwrap_or_return;
 use near_primitives::utils::{from_timestamp, to_timestamp};
 use near_primitives::views::ValidatorInfo;
@@ -44,6 +44,9 @@ use crate::types::{
 };
 use crate::{sync, StatusResponse};
 
+/// Multiplier on `max_block_time` to wait until deciding that chain stalled.
+const STATUS_WAIT_TIME_MULTIPLIER: u64 = 10;
+
 enum AccountAnnounceVerificationResult {
     Valid,
     UnknownEpoch,
@@ -239,7 +242,7 @@ impl Handler<NetworkClientMessages> for ClientActor {
                         hash: self.client.chain.genesis().hash(),
                     },
                     height: head.height,
-                    total_weight: head.total_weight,
+                    weight_and_score: head.weight_and_score,
                     tracked_shards: self.client.config.tracked_shards.clone(),
                 },
                 Err(err) => {
@@ -266,21 +269,18 @@ impl Handler<NetworkClientMessages> for ClientActor {
                     // NetworkClientResponses::Ban { ban_reason: ReasonForBan::BadBlockApproval }
                 }
             }
-            NetworkClientMessages::StateRequest(
-                shard_id,
-                hash,
-                need_header,
-                parts_ranges,
-                route_back,
-            ) => {
-                let mut parts = vec![];
-                for Range(from, to) in parts_ranges {
-                    for part_id in from..to {
-                        if let Ok(part) =
-                            self.client.chain.get_state_response_part(shard_id, part_id, hash)
-                        {
-                            parts.push(part);
-                        } else {
+            NetworkClientMessages::StateRequest(shard_id, hash, need_header, parts, route_back) => {
+                let mut data = vec![];
+                for part_id in parts.ids.iter() {
+                    match self.client.chain.get_state_response_part(
+                        shard_id,
+                        *part_id,
+                        parts.num_parts,
+                        hash,
+                    ) {
+                        Ok(part) => data.push(part),
+                        Err(e) => {
+                            error!(target: "sync", "Cannot build sync part (get_state_response_part): {}", e);
                             return NetworkClientResponses::NoResponse;
                         }
                     }
@@ -294,13 +294,15 @@ impl Handler<NetworkClientMessages> for ClientActor {
                                     hash,
                                     shard_state: ShardStateSyncResponse {
                                         header: Some(header),
-                                        parts,
+                                        part_ids: parts.ids,
+                                        data,
                                     },
                                 },
                                 route_back,
                             );
                         }
-                        Err(_) => {
+                        Err(e) => {
+                            error!(target: "sync", "Cannot build sync header (get_state_response_header): {}", e);
                             return NetworkClientResponses::NoResponse;
                         }
                     }
@@ -309,7 +311,11 @@ impl Handler<NetworkClientMessages> for ClientActor {
                         StateResponseInfo {
                             shard_id,
                             hash,
-                            shard_state: ShardStateSyncResponse { header: None, parts },
+                            shard_state: ShardStateSyncResponse {
+                                header: None,
+                                part_ids: parts.ids,
+                                data,
+                            },
                         },
                         route_back,
                     );
@@ -379,17 +385,20 @@ impl Handler<NetworkClientMessages> for ClientActor {
                             }
                         }
                         ShardSyncStatus::StateDownloadParts => {
-                            for part in shard_state.parts.iter() {
-                                let part_id = part.state_part.part_id as usize;
-                                if part_id >= shard_sync_download.downloads.len() {
-                                    // TODO ???
+                            let num_parts = shard_sync_download.downloads.len();
+                            for (i, part_id) in shard_state.part_ids.iter().enumerate() {
+                                let part_id = *part_id as usize;
+                                if part_id >= num_parts {
+                                    // This may happen only if we somehow have accepted wrong header
                                     continue;
                                 }
                                 if !shard_sync_download.downloads[part_id].done {
                                     match self.client.chain.set_state_part(
                                         shard_id,
                                         hash,
-                                        part.clone(),
+                                        part_id as u64,
+                                        num_parts as u64,
+                                        &shard_state.data[i],
                                     ) {
                                         Ok(()) => {
                                             shard_sync_download.downloads[part_id].done = true;
@@ -470,6 +479,15 @@ impl Handler<Status> for ClientActor {
             .get_block_header(&head.last_block_hash)
             .map_err(|err| err.to_string())?;
         let latest_block_time = prev_header.inner.timestamp.clone();
+        let elapsed = (Utc::now() - from_timestamp(latest_block_time)).to_std().unwrap();
+        if elapsed
+            > Duration::from_millis(
+                self.client.config.max_block_production_delay.as_millis() as u64
+                    * STATUS_WAIT_TIME_MULTIPLIER,
+            )
+        {
+            return Err(format!("No blocks for {:?}.", elapsed));
+        }
         let validators = self
             .client
             .runtime_adapter
@@ -727,8 +745,11 @@ impl ClientActor {
                 accepted_block.status,
                 accepted_block.provenance,
             );
+            let block = self.client.chain.get_block(&accepted_block.hash).unwrap();
+            let gas_used = Block::compute_gas_used(&block.chunks, block.header.inner.height);
+            let gas_limit = Block::compute_gas_limit(&block.chunks, block.header.inner.height);
 
-            self.info_helper.block_processed(accepted_block.gas_used, accepted_block.gas_limit);
+            self.info_helper.block_processed(gas_used, gas_limit);
             self.check_send_announce_account(accepted_block.hash);
         }
     }
@@ -905,23 +926,27 @@ impl ClientActor {
             };
 
         if is_syncing {
-            if full_peer_info.chain_info.total_weight <= head.total_weight {
-                info!(target: "client", "Sync: synced at {} @ {} [{}]", head.total_weight.to_num(), head.height, head.last_block_hash);
+            if full_peer_info.chain_info.weight_and_score <= head.weight_and_score {
+                info!(target: "client", "Sync: synced at weight: {}, score: {} @ {} [{}]", head.weight_and_score.weight.to_num(), head.weight_and_score.score.to_num(), head.height, head.last_block_hash);
                 is_syncing = false;
             }
         } else {
-            if full_peer_info.chain_info.total_weight.to_num()
-                > head.total_weight.to_num() + self.client.config.sync_weight_threshold
+            if full_peer_info
+                .chain_info
+                .weight_and_score
+                .beyond_threshold(&head.weight_and_score, self.client.config.sync_weight_threshold)
                 && full_peer_info.chain_info.height
                     > head.height + self.client.config.sync_height_threshold
             {
                 info!(
                     target: "client",
-                    "Sync: height/weight: {}/{}, peer height/weight: {}/{}, enabling sync",
+                    "Sync: height/weight/score: {}/{}/{}, peer height/weight/score: {}/{}/{}, enabling sync",
                     head.height,
-                    head.total_weight,
+                    head.weight_and_score.weight,
+                    head.weight_and_score.score,
                     full_peer_info.chain_info.height,
-                    full_peer_info.chain_info.total_weight
+                    full_peer_info.chain_info.weight_and_score.weight,
+                    full_peer_info.chain_info.weight_and_score.score,
                 );
                 is_syncing = true;
             }
diff --git a/chain/client/src/lib.rs b/chain/client/src/lib.rs
index 5b7fb0cdff8..17410620cf8 100644
--- a/chain/client/src/lib.rs
+++ b/chain/client/src/lib.rs
@@ -4,8 +4,8 @@ extern crate lazy_static;
 pub use crate::client::Client;
 pub use crate::client_actor::ClientActor;
 pub use crate::types::{
-    BlockProducer, ClientConfig, Error, GetBlock, GetChunk, GetNetworkInfo, Query, Status,
-    StatusResponse, SyncStatus, TxStatus,
+    BlockProducer, ClientConfig, Error, GetBlock, GetChunk, GetNetworkInfo, GetValidatorInfo,
+    Query, Status, StatusResponse, SyncStatus, TxStatus,
 };
 pub use crate::view_client::ViewClientActor;
 
diff --git a/chain/client/src/sync.rs b/chain/client/src/sync.rs
index ffcb49e18bb..ed6be05c91e 100644
--- a/chain/client/src/sync.rs
+++ b/chain/client/src/sync.rs
@@ -6,13 +6,13 @@ use chrono::{DateTime, Duration, Utc};
 use log::{debug, error, info};
 use rand::{thread_rng, Rng};
 
-use near_chain::types::ShardStateSyncResponseHeader;
+use near_chain::types::StateRequestParts;
 use near_chain::{Chain, RuntimeAdapter, Tip};
 use near_chunks::NetworkAdapter;
 use near_network::types::{AccountOrPeerIdOrHash, ReasonForBan};
 use near_network::{FullPeerInfo, NetworkRequests};
 use near_primitives::hash::CryptoHash;
-use near_primitives::types::{BlockIndex, Range, ShardId};
+use near_primitives::types::{BlockIndex, ShardId, StateRootNode};
 use near_primitives::unwrap_or_return;
 
 use crate::types::{DownloadStatus, ShardSyncDownload, ShardSyncStatus, SyncStatus};
@@ -104,7 +104,7 @@ impl HeaderSync {
             let header_head = chain.header_head()?;
             self.syncing_peer = None;
             if let Some(peer) = most_weight_peer(&most_weight_peers) {
-                if peer.chain_info.total_weight > header_head.total_weight {
+                if peer.chain_info.weight_and_score > header_head.weight_and_score {
                     self.syncing_peer = self.request_headers(chain, peer);
                 }
             }
@@ -150,8 +150,8 @@ impl HeaderSync {
                                 if now > *stalling_ts + Duration::seconds(120)
                                     && *highest_height == peer.chain_info.height
                                 {
-                                    info!(target: "sync", "Sync: ban a fraudulent peer: {}, claimed height: {}, total weight: {}",
-                                        peer.peer_info, peer.chain_info.height, peer.chain_info.total_weight);
+                                    info!(target: "sync", "Sync: ban a fraudulent peer: {}, claimed height: {}, total weight: {}, score: {}",
+                                        peer.peer_info, peer.chain_info.height, peer.chain_info.weight_and_score.weight, peer.chain_info.weight_and_score.score);
                                     self.network_adapter.send(NetworkRequests::BanPeer {
                                         peer_id: peer.peer_info.id.clone(),
                                         ban_reason: ReasonForBan::HeightFraud,
@@ -404,49 +404,6 @@ pub enum StateSyncResult {
     Completed,
 }
 
-pub struct StateSyncStrategy {}
-
-impl StateSyncStrategy {
-    pub fn download_by_one(downloads: &Vec<DownloadStatus>) -> Vec<Vec<Range>> {
-        let mut strategy = vec![];
-        for (i, download) in downloads.iter().enumerate() {
-            if download.run_me {
-                strategy.push(vec![Range(i as u64, i as u64 + 1)]);
-            }
-        }
-        strategy
-    }
-
-    pub fn download_sqrt(downloads: &Vec<DownloadStatus>) -> Vec<Vec<Range>> {
-        let len = downloads.len();
-        let run_count = downloads.iter().filter(|d| d.run_me).count();
-        if run_count * 5 < len {
-            // We downloaded more than 80% of the state.
-            // Let's distribute all small pieces between all nodes.
-            return StateSyncStrategy::download_by_one(downloads);
-        }
-        let mut strategy = vec![];
-        let mut begin = 0;
-        for (i, download) in downloads.iter().enumerate() {
-            if download.run_me {
-                if i - begin >= (len as f64).sqrt() as usize {
-                    strategy.push(vec![Range(begin as u64, i as u64)]);
-                    begin = i;
-                }
-            } else {
-                if begin != i {
-                    strategy.push(vec![Range(begin as u64, i as u64)]);
-                }
-                begin = i + 1;
-            }
-        }
-        if begin != len {
-            strategy.push(vec![Range(begin as u64, len as u64)]);
-        }
-        strategy
-    }
-}
-
 /// Helper to track state sync.
 pub struct StateSync {
     network_adapter: Arc<dyn NetworkAdapter>,
@@ -464,6 +421,15 @@ impl StateSync {
         }
     }
 
+    pub fn get_num_parts(&self, state_root_node: &StateRootNode) -> u64 {
+        let state_size = state_root_node.memory_usage;
+        // We assume that 1 Mb is a good limit for state part size.
+        // On the other side, it's important to divide any state into
+        // several parts to make sure that partitioning always works.
+        // TODO #1708
+        state_size / (1024 * 1024) + 3
+    }
+
     pub fn sync_block_status(
         &mut self,
         sync_hash: CryptoHash,
@@ -532,8 +498,8 @@ impl StateSync {
                     if shard_sync_download.downloads[0].done {
                         let shard_state_header =
                             chain.get_received_state_header(shard_id, sync_hash)?;
-                        let ShardStateSyncResponseHeader { chunk, .. } = shard_state_header;
-                        let state_num_parts = chunk.header.inner.prev_state_root.num_parts;
+                        let state_num_parts =
+                            self.get_num_parts(&shard_state_header.state_root_node);
                         *shard_sync_download = ShardSyncDownload {
                             downloads: vec![
                                 DownloadStatus {
@@ -588,7 +554,10 @@ impl StateSync {
                     }
                 }
                 ShardSyncStatus::StateDownloadFinalize => {
-                    match chain.set_state_finalize(shard_id, sync_hash) {
+                    let shard_state_header =
+                        chain.get_received_state_header(shard_id, sync_hash)?;
+                    let state_num_parts = self.get_num_parts(&shard_state_header.state_root_node);
+                    match chain.set_state_finalize(shard_id, sync_hash, state_num_parts) {
                         Ok(_) => {
                             update_sync_status = true;
                             *shard_sync_download = ShardSyncDownload {
@@ -602,11 +571,16 @@ impl StateSync {
                             error!(target: "sync", "State sync finalizing error, shard = {}, hash = {}: {:?}", shard_id, sync_hash, e);
                             update_sync_status = true;
                             *shard_sync_download = init_sync_download.clone();
+                            chain.clear_downloaded_parts(shard_id, sync_hash, state_num_parts)?;
                         }
                     }
                 }
                 ShardSyncStatus::StateDownloadComplete => {
                     this_done = true;
+                    let shard_state_header =
+                        chain.get_received_state_header(shard_id, sync_hash)?;
+                    let state_num_parts = self.get_num_parts(&shard_state_header.state_root_node);
+                    chain.clear_downloaded_parts(shard_id, sync_hash, state_num_parts)?;
                 }
             }
             all_done &= this_done;
@@ -685,7 +659,7 @@ impl StateSync {
                     shard_id,
                     hash,
                     need_header: true,
-                    parts_ranges: vec![],
+                    parts: StateRequestParts::default(),
                     target: possible_targets[thread_rng().gen_range(0, possible_targets.len())]
                         .clone(),
                 });
@@ -693,16 +667,22 @@ impl StateSync {
                 new_shard_sync_download.downloads[0].run_me = false;
             }
             ShardSyncStatus::StateDownloadParts => {
-                let download_strategy =
-                    StateSyncStrategy::download_sqrt(&shard_sync_download.downloads);
-                self.apply_download_strategy(
-                    shard_id,
-                    hash,
-                    &possible_targets,
-                    download_strategy,
-                    &shard_sync_download,
-                    &mut new_shard_sync_download,
-                )?;
+                for (i, download) in new_shard_sync_download.downloads.iter().enumerate() {
+                    if download.run_me {
+                        self.network_adapter.send(NetworkRequests::StateRequest {
+                            shard_id,
+                            hash,
+                            need_header: false,
+                            parts: StateRequestParts {
+                                ids: vec![i as u64],
+                                num_parts: new_shard_sync_download.downloads.len() as u64,
+                            },
+                            target: possible_targets
+                                [thread_rng().gen_range(0, possible_targets.len())]
+                            .clone(),
+                        });
+                    }
+                }
             }
             _ => {}
         }
@@ -753,35 +733,6 @@ impl StateSync {
             StateSyncResult::Unchanged
         })
     }
-
-    pub fn apply_download_strategy(
-        &mut self,
-        shard_id: ShardId,
-        hash: CryptoHash,
-        possible_targets: &Vec<AccountOrPeerIdOrHash>,
-        download_strategy: Vec<Vec<Range>>,
-        shard_sync_download: &ShardSyncDownload,
-        new_shard_sync_download: &mut ShardSyncDownload,
-    ) -> Result<(), near_chain::Error> {
-        let state_num_parts = shard_sync_download.downloads.len();
-        assert_eq!(state_num_parts, new_shard_sync_download.downloads.len());
-        for parts_ranges in download_strategy {
-            for Range(from, to) in parts_ranges.iter() {
-                for i in *from as usize..*to as usize {
-                    assert!(new_shard_sync_download.downloads[i].run_me);
-                    new_shard_sync_download.downloads[i].run_me = false;
-                }
-            }
-            self.network_adapter.send(NetworkRequests::StateRequest {
-                shard_id,
-                hash,
-                need_header: false,
-                parts_ranges,
-                target: possible_targets[thread_rng().gen_range(0, possible_targets.len())].clone(),
-            });
-        }
-        Ok(())
-    }
 }
 
 #[cfg(test)]
@@ -847,7 +798,7 @@ mod test {
                     hash: chain.genesis().hash(),
                 },
                 height: chain2.head().unwrap().height,
-                total_weight: chain2.head().unwrap().total_weight,
+                weight_and_score: chain2.head().unwrap().weight_and_score,
                 tracked_shards: vec![],
             },
             edge_info: EdgeInfo::default(),
diff --git a/chain/client/src/test_utils.rs b/chain/client/src/test_utils.rs
index f5b0a6dd267..770bf71a88f 100644
--- a/chain/client/src/test_utils.rs
+++ b/chain/client/src/test_utils.rs
@@ -1,5 +1,5 @@
 use std::cmp::max;
-use std::collections::{HashSet, VecDeque};
+use std::collections::{BTreeSet, HashMap, HashSet, VecDeque};
 use std::ops::DerefMut;
 use std::sync::{Arc, RwLock};
 use std::time::Duration;
@@ -21,7 +21,7 @@ use near_network::{
     FullPeerInfo, NetworkClientMessages, NetworkClientResponses, NetworkRequests, NetworkResponses,
     PeerInfo, PeerManagerActor,
 };
-use near_primitives::block::{Block, GenesisId, Weight};
+use near_primitives::block::{Block, GenesisId, WeightAndScore};
 use near_primitives::transaction::SignedTransaction;
 use near_primitives::types::{AccountId, BlockIndex, ShardId, ValidatorId};
 use near_store::test_utils::create_test_store;
@@ -29,7 +29,8 @@ use near_store::Store;
 use near_telemetry::TelemetryActor;
 
 use crate::{BlockProducer, Client, ClientActor, ClientConfig, ViewClientActor};
-use near_primitives::hash::hash;
+use near_primitives::hash::{hash, CryptoHash};
+use std::ops::Bound::{Excluded, Included, Unbounded};
 
 pub type NetworkMock = Mocker<PeerManagerActor>;
 
@@ -58,7 +59,8 @@ pub fn setup(
     epoch_length: u64,
     account_id: &str,
     skip_sync_wait: bool,
-    block_prod_time: u64,
+    min_block_prod_time: u64,
+    max_block_prod_time: u64,
     recipient: Recipient<NetworkRequests>,
     tx_validity_period: BlockIndex,
     genesis_time: DateTime<Utc>,
@@ -88,7 +90,12 @@ pub fn setup(
     let signer = Arc::new(InMemorySigner::from_seed(account_id, KeyType::ED25519, account_id));
     let telemetry = TelemetryActor::default().start();
     let view_client = ViewClientActor::new(store.clone(), &chain_genesis, runtime.clone()).unwrap();
-    let config = ClientConfig::test(skip_sync_wait, block_prod_time, num_validators);
+    let config = ClientConfig::test(
+        skip_sync_wait,
+        min_block_prod_time,
+        max_block_prod_time,
+        num_validators,
+    );
     let client = ClientActor::new(
         config,
         store,
@@ -150,6 +157,7 @@ pub fn setup_mock_with_validity_period(
             account_id,
             skip_sync_wait,
             100,
+            200,
             pm.recipient(),
             validity_period,
             Utc::now(),
@@ -165,6 +173,36 @@ fn sample_binary(n: u64, k: u64) -> bool {
 }
 
 /// Sets up ClientActor and ViewClientActor with mock PeerManager.
+///
+/// # Arguments
+/// * `validators` - a vector or vector of validator names. Each vector is a set of validators for a
+///                 particular epoch. E.g. if `validators` has three elements, then the each epoch
+///                 with id % 3 == 0 will have the first set of validators, with id % 3 == 1 will
+///                 have the second set of validators, and with id % 3 == 2 will have the third
+/// * `key_pairs` - a flattened list of key pairs for the `validators`
+/// * `validator_groups` - how many groups to split validators into. E.g. say there are four shards,
+///                 and four validators in a particular epoch. If `validator_groups == 1`, all vals
+///                 will validate all shards. If `validator_groups == 2`, shards 0 and 1 will have
+///                 two validators validating them, and shards 2 and 3 will have the remaining two.
+///                 If `validator_groups == 4`, each validator will validate a single shard
+/// `skip_sync_wait`
+/// `block_prod_time` - Minimum block production time, assuming there is enough approvals. The
+///                 maximum block production time depends on the value of `tamper_with_fg`, and is
+///                 equal to `block_prod_time` if `tamper_with_fg` is `true`, otherwise it is
+///                 `block_prod_time * 2`
+/// `drop_chunks` - if set to true, 10% of all the chunk messages / requests will be dropped
+/// `tamper_with_fg` - if set to true, will split the heights into groups of 100. For some groups
+///                 all the approvals will be dropped (thus completely disabling the finality gadget
+///                 and introducing severe forkfulness if `block_prod_time` is sufficiently small),
+///                 for some groups will keep all the approvals (and test the fg invariants), and
+///                 for some will drop 50% of the approvals.
+/// `epoch_length` - approximate number of heights per epoch
+/// `network_mock` - the callback that is called for each message sent. The `mock` is called before
+///                 the default processing. `mock` returns `(response, perform_default)`. If
+///                 `perform_default` is false, then the message is not processed or broadcasted
+///                 further and `response` is returned to the requester immediately. Otherwise
+///                 the default action is performed, that might (and likely will) overwrite the
+///                 `response` before it is sent back to the requester.
 pub fn setup_mock_all_validators(
     validators: Vec<Vec<&'static str>>,
     key_pairs: Vec<PeerInfo>,
@@ -172,6 +210,7 @@ pub fn setup_mock_all_validators(
     skip_sync_wait: bool,
     block_prod_time: u64,
     drop_chunks: bool,
+    tamper_with_fg: bool,
     epoch_length: u64,
     network_mock: Arc<RwLock<dyn FnMut(String, &NetworkRequests) -> (NetworkResponses, bool)>>,
 ) -> (Block, Vec<(Addr<ClientActor>, Addr<ViewClientActor>)>) {
@@ -193,7 +232,11 @@ pub fn setup_mock_all_validators(
     let genesis_block = Arc::new(RwLock::new(None));
     let num_shards = validators.iter().map(|x| x.len()).min().unwrap() as ShardId;
 
-    let last_height_weight = Arc::new(RwLock::new(vec![(0, Weight::from(0)); key_pairs.len()]));
+    let last_height_weight =
+        Arc::new(RwLock::new(vec![(0, WeightAndScore::from_ints(0, 0)); key_pairs.len()]));
+    let hash_to_score = Arc::new(RwLock::new(HashMap::new()));
+    let approval_intervals: Arc<RwLock<Vec<BTreeSet<(WeightAndScore, WeightAndScore)>>>> =
+        Arc::new(RwLock::new(key_pairs.iter().map(|_| BTreeSet::new()).collect()));
 
     for account_id in validators.iter().flatten().cloned() {
         let view_client_addr = Arc::new(RwLock::new(None));
@@ -207,6 +250,8 @@ pub fn setup_mock_all_validators(
         let network_mock1 = network_mock.clone();
         let announced_accounts1 = announced_accounts.clone();
         let last_height_weight1 = last_height_weight.clone();
+        let hash_to_score1 = hash_to_score.clone();
+        let approval_intervals1 = approval_intervals.clone();
         let client_addr = ClientActor::create(move |ctx| {
             let _client_addr = ctx.address();
             let pm = NetworkMock::mock(Box::new(move |msg, _ctx| {
@@ -246,7 +291,7 @@ pub fn setup_mock_all_validators(
                                             hash: Default::default(),
                                         },
                                         height: last_height_weight1[i].0,
-                                        total_weight: last_height_weight1[i].1,
+                                        weight_and_score: last_height_weight1[i].1,
                                         tracked_shards: vec![],
                                     },
                                     edge_info: EdgeInfo::default(),
@@ -278,7 +323,12 @@ pub fn setup_mock_all_validators(
 
                             my_height_weight.0 = max(my_height_weight.0, block.header.inner.height);
                             my_height_weight.1 =
-                                max(my_height_weight.1, block.header.inner.total_weight);
+                                max(my_height_weight.1, block.header.inner.weight_and_score());
+
+                            hash_to_score1
+                                .write()
+                                .unwrap()
+                                .insert(block.header.hash(), block.header.inner.weight_and_score());
                         }
                         NetworkRequests::PartialEncodedChunkRequest {
                             account_id: their_account_id,
@@ -395,7 +445,7 @@ pub fn setup_mock_all_validators(
                             shard_id,
                             hash,
                             need_header,
-                            parts_ranges,
+                            parts,
                             target: target_account_id,
                         } => {
                             let target_account_id = match target_account_id {
@@ -412,7 +462,7 @@ pub fn setup_mock_all_validators(
                                                 *shard_id,
                                                 *hash,
                                                 *need_header,
-                                                parts_ranges.to_vec(),
+                                                parts.clone(),
                                                 my_address,
                                             ))
                                             .then(move |response| {
@@ -455,19 +505,73 @@ pub fn setup_mock_all_validators(
                             }
                         }
                         NetworkRequests::BlockHeaderAnnounce {
-                            header: _,
+                            header,
                             approval_message: Some(approval_message),
                         } => {
-                            for (i, name) in validators_clone2.iter().flatten().enumerate() {
-                                if name == &approval_message.target {
-                                    connectors1.read().unwrap()[i].0.do_send(
-                                        NetworkClientMessages::BlockApproval(
-                                            approval_message.approval.clone(),
-                                            my_key_pair.id.clone(),
-                                        ),
-                                    );
+                            let height_mod = header.inner.height % 300;
+
+                            let do_propagate = if tamper_with_fg {
+                                if height_mod < 100 {
+                                    false
+                                } else if height_mod < 200 {
+                                    let mut rng = rand::thread_rng();
+                                    rng.gen()
+                                } else {
+                                    true
+                                }
+                            } else {
+                                true
+                            };
+
+                            let approval = approval_message.approval.clone();
+
+                            if do_propagate {
+                                for (i, name) in validators_clone2.iter().flatten().enumerate() {
+                                    if name == &approval_message.target {
+                                        connectors1.read().unwrap()[i].0.do_send(
+                                            NetworkClientMessages::BlockApproval(
+                                                approval.clone(),
+                                                my_key_pair.id.clone(),
+                                            ),
+                                        );
+                                    }
                                 }
                             }
+
+                            // Ensure the finality gadget invariant that no two approvals intersect
+                            //     is maintained
+                            let hh = hash_to_score1.read().unwrap();
+                            let arange =
+                                (hh.get(&approval.reference_hash), hh.get(&approval.parent_hash));
+                            if let (Some(left), Some(right)) = arange {
+                                let arange = (*left, *right);
+                                assert!(arange.0 <= arange.1);
+
+                                let approval_intervals =
+                                    &mut approval_intervals1.write().unwrap()[my_ord];
+                                let prev = approval_intervals
+                                    .range((Unbounded, Excluded((arange.0, arange.0))))
+                                    .next_back();
+                                let mut next_weight_and_score = arange.0;
+                                next_weight_and_score.weight =
+                                    (next_weight_and_score.weight.to_num() + 1).into();
+                                let next = approval_intervals
+                                    .range((
+                                        Included((next_weight_and_score, next_weight_and_score)),
+                                        Unbounded,
+                                    ))
+                                    .next();
+
+                                if let Some(prev) = prev {
+                                    assert!(prev.1 < arange.0);
+                                }
+
+                                if let Some(next) = next {
+                                    assert!(next.0 > arange.1);
+                                }
+
+                                approval_intervals.insert(arange);
+                            }
                         }
                         NetworkRequests::ForwardTx(_, _)
                         | NetworkRequests::Sync { .. }
@@ -494,6 +598,13 @@ pub fn setup_mock_all_validators(
                 account_id,
                 skip_sync_wait,
                 block_prod_time,
+                // When we tamper with fg, some blocks will have enough approvals, some will not,
+                //     and the `block_prod_timeout` is carefully chosen to get enough forkfulness,
+                //     but not to break the synchrony assumption, so we can't allow the timeout to
+                //     be too different for blocks with and without enough approvals.
+                // When not tampering with fg, make the relationship between constants closer to the
+                //     actual relationship.
+                if tamper_with_fg { block_prod_time } else { block_prod_time * 2 },
                 pm.recipient(),
                 10000,
                 genesis_time,
@@ -504,6 +615,11 @@ pub fn setup_mock_all_validators(
         });
         ret.push((client_addr, view_client_addr.clone().read().unwrap().clone().unwrap()));
     }
+    hash_to_score.write().unwrap().insert(CryptoHash::default(), WeightAndScore::from_ints(0, 0));
+    hash_to_score.write().unwrap().insert(
+        genesis_block.read().unwrap().as_ref().unwrap().header.clone().hash(),
+        WeightAndScore::from_ints(0, 0),
+    );
     *locked_connectors = ret.clone();
     let value = genesis_block.read().unwrap();
     (value.clone().unwrap(), ret)
@@ -560,7 +676,7 @@ pub fn setup_client_with_runtime(
 ) -> Client {
     let block_producer =
         account_id.map(|x| Arc::new(InMemorySigner::from_seed(x, KeyType::ED25519, x)).into());
-    let mut config = ClientConfig::test(true, 10, num_validators);
+    let mut config = ClientConfig::test(true, 10, 20, num_validators);
     config.epoch_length = chain_genesis.epoch_length;
     Client::new(config, store, chain_genesis, runtime_adapter, network_adapter, block_producer)
         .unwrap()
diff --git a/chain/client/src/types.rs b/chain/client/src/types.rs
index 9c91fe71f19..25cb5ce1ccf 100644
--- a/chain/client/src/types.rs
+++ b/chain/client/src/types.rs
@@ -11,7 +11,9 @@ use near_network::PeerInfo;
 use near_primitives::hash::CryptoHash;
 use near_primitives::sharding::ChunkHash;
 use near_primitives::types::{AccountId, BlockIndex, ShardId, ValidatorId, Version};
-use near_primitives::views::{BlockView, ChunkView, FinalExecutionOutcomeView, QueryResponse};
+use near_primitives::views::{
+    BlockView, ChunkView, EpochValidatorInfo, FinalExecutionOutcomeView, QueryResponse,
+};
 pub use near_primitives::views::{StatusResponse, StatusSyncInfo};
 
 /// Combines errors coming from chain, tx pool and block producer.
@@ -136,7 +138,8 @@ pub struct ClientConfig {
 impl ClientConfig {
     pub fn test(
         skip_sync_wait: bool,
-        block_prod_time: u64,
+        min_block_prod_time: u64,
+        max_block_prod_time: u64,
         num_block_producers: ValidatorId,
     ) -> Self {
         ClientConfig {
@@ -145,11 +148,11 @@ impl ClientConfig {
             rpc_addr: "0.0.0.0:3030".to_string(),
             block_production_tracking_delay: Duration::from_millis(std::cmp::max(
                 10,
-                block_prod_time / 5,
+                min_block_prod_time / 5,
             )),
-            min_block_production_delay: Duration::from_millis(block_prod_time),
-            max_block_production_delay: Duration::from_millis(2 * block_prod_time),
-            max_block_wait_delay: Duration::from_millis(3 * block_prod_time),
+            min_block_production_delay: Duration::from_millis(min_block_prod_time),
+            max_block_production_delay: Duration::from_millis(max_block_prod_time),
+            max_block_wait_delay: Duration::from_millis(3 * min_block_prod_time),
             reduce_wait_for_missing_block: Duration::from_millis(0),
             block_expected_weight: 1000,
             skip_sync_wait,
@@ -167,8 +170,8 @@ impl ClientConfig {
             ttl_account_id_router: Duration::from_secs(60 * 60),
             block_fetch_horizon: 50,
             state_fetch_horizon: 5,
-            catchup_step_period: Duration::from_millis(block_prod_time / 2),
-            chunk_request_retry_period: Duration::from_millis(block_prod_time / 5),
+            catchup_step_period: Duration::from_millis(min_block_prod_time / 2),
+            chunk_request_retry_period: Duration::from_millis(min_block_prod_time / 5),
             block_header_fetch_horizon: 50,
             tracked_accounts: vec![],
             tracked_shards: vec![],
@@ -306,3 +309,11 @@ pub struct TxStatus {
 impl Message for TxStatus {
     type Result = Result<FinalExecutionOutcomeView, String>;
 }
+
+pub struct GetValidatorInfo {
+    pub last_block_hash: CryptoHash,
+}
+
+impl Message for GetValidatorInfo {
+    type Result = Result<EpochValidatorInfo, String>;
+}
diff --git a/chain/client/src/view_client.rs b/chain/client/src/view_client.rs
index aa07730ee34..66769367a9f 100644
--- a/chain/client/src/view_client.rs
+++ b/chain/client/src/view_client.rs
@@ -6,13 +6,14 @@ use std::sync::Arc;
 use actix::{Actor, Context, Handler};
 
 use near_chain::{Chain, ChainGenesis, RuntimeAdapter};
-use near_primitives::hash::CryptoHash;
 use near_primitives::types::{AccountId, StateRoot};
-use near_primitives::views::{BlockView, ChunkView, FinalExecutionOutcomeView, QueryResponse};
+use near_primitives::views::{
+    BlockView, ChunkView, EpochValidatorInfo, FinalExecutionOutcomeView, QueryResponse,
+};
 use near_store::Store;
 
 use crate::types::{Error, GetBlock, Query, TxStatus};
-use crate::GetChunk;
+use crate::{GetChunk, GetValidatorInfo};
 
 /// View client provides currently committed (to the storage) view of the current chain and state.
 pub struct ViewClientActor {
@@ -49,7 +50,7 @@ impl Handler<Query> for ViewClientActor {
         let state_root = {
             if path_parts[0] == "validators" && path_parts.len() == 1 {
                 // for querying validators we don't need state root
-                StateRoot { hash: CryptoHash::default(), num_parts: 0 }
+                StateRoot::default()
             } else {
                 let account_id = AccountId::from(path_parts[1]);
                 let shard_id = self.runtime_adapter.account_id_to_shard_id(&account_id);
@@ -125,3 +126,11 @@ impl Handler<TxStatus> for ViewClientActor {
         self.chain.get_final_transaction_result(&msg.tx_hash)
     }
 }
+
+impl Handler<GetValidatorInfo> for ViewClientActor {
+    type Result = Result<EpochValidatorInfo, String>;
+
+    fn handle(&mut self, msg: GetValidatorInfo, _: &mut Context<Self>) -> Self::Result {
+        self.runtime_adapter.get_validator_info(&msg.last_block_hash).map_err(|e| e.to_string())
+    }
+}
diff --git a/chain/client/tests/bug_repros.rs b/chain/client/tests/bug_repros.rs
index 8c9a28e7c20..cbf7e96ed30 100644
--- a/chain/client/tests/bug_repros.rs
+++ b/chain/client/tests/bug_repros.rs
@@ -41,6 +41,7 @@ fn repro_1183() {
             true,
             200,
             false,
+            false,
             5,
             Arc::new(RwLock::new(move |_account_id: String, msg: &NetworkRequests| {
                 if let NetworkRequests::Block { block } = msg {
diff --git a/chain/client/tests/catching_up.rs b/chain/client/tests/catching_up.rs
index d0df00c74d1..f950fb6fe7c 100644
--- a/chain/client/tests/catching_up.rs
+++ b/chain/client/tests/catching_up.rs
@@ -110,6 +110,7 @@ mod tests {
                 true,
                 1200,
                 false,
+                false,
                 5,
                 Arc::new(RwLock::new(move |_account_id: String, msg: &NetworkRequests| {
                     let account_from = "test3.3".to_string();
@@ -344,6 +345,7 @@ mod tests {
                 true,
                 1500,
                 false,
+                false,
                 5,
                 Arc::new(RwLock::new(move |_account_id: String, msg: &NetworkRequests| {
                     let mut seen_heights_same_block = seen_heights_same_block.write().unwrap();
@@ -553,6 +555,7 @@ mod tests {
                 true,
                 400,
                 false,
+                false,
                 5,
                 Arc::new(RwLock::new(move |_account_id: String, msg: &NetworkRequests| {
                     if let NetworkRequests::Block { block } = msg {
diff --git a/chain/client/tests/challenges.rs b/chain/client/tests/challenges.rs
index 63c753d3639..18870223c8b 100644
--- a/chain/client/tests/challenges.rs
+++ b/chain/client/tests/challenges.rs
@@ -145,7 +145,7 @@ fn create_invalid_proofs_chunk(
         vec![],
         &*client.block_producer.as_ref().unwrap().signer,
         0.into(),
-        CryptoHash::default(),
+        last_block.header.inner.prev_hash,
         CryptoHash::default(),
     );
     (chunk, merkle_paths, receipts, block)
@@ -207,11 +207,13 @@ fn test_verify_chunk_invalid_state_challenge() {
     // Invalid chunk & block.
     let last_block_hash = env.clients[0].chain.head().unwrap().last_block_hash;
     let last_block = env.clients[0].chain.get_block(&last_block_hash).unwrap().clone();
+    let prev_to_last_block =
+        env.clients[0].chain.get_block(&last_block.header.inner.prev_hash).unwrap().clone();
     let (mut invalid_chunk, merkle_paths) = env.clients[0]
         .shards_mgr
         .create_encoded_shard_chunk(
             last_block.hash(),
-            StateRoot { hash: CryptoHash::default(), num_parts: 1 },
+            StateRoot::default(),
             CryptoHash::default(),
             last_block.header.inner.height + 1,
             0,
@@ -255,8 +257,8 @@ fn test_verify_chunk_invalid_state_challenge() {
         vec![],
         &signer,
         0.into(),
-        CryptoHash::default(),
-        CryptoHash::default(),
+        last_block.header.inner.prev_hash,
+        prev_to_last_block.header.inner.prev_hash,
     );
 
     let challenge_body = {
@@ -274,6 +276,7 @@ fn test_verify_chunk_invalid_state_challenge() {
             &empty_block_pool,
             validity_period,
             epoch_length,
+            0,
         );
 
         chain_update
@@ -286,12 +289,22 @@ fn test_verify_chunk_invalid_state_challenge() {
         assert_eq!(prev_merkle_proofs[0], challenge_body.prev_merkle_proof);
         assert_eq!(merkle_proofs[0], challenge_body.merkle_proof);
         assert_eq!(
-            vec![vec![
-                3, 1, 0, 0, 0, 16, 54, 106, 135, 107, 146, 249, 30, 224, 4, 250, 77, 43, 107, 71,
-                32, 36, 160, 74, 172, 80, 43, 254, 111, 201, 245, 124, 145, 98, 123, 210, 44, 242,
-                167, 124, 2, 0, 0, 0, 0, 0,
-            ]],
-            challenge_body.partial_state
+            challenge_body.partial_state.0,
+            vec![
+                vec![
+                    1, 7, 0, 227, 6, 86, 139, 125, 37, 242, 104, 89, 182, 115, 113, 193, 120, 119,
+                    33, 26, 201, 6, 127, 176, 76, 7, 26, 49, 95, 52, 178, 159, 143, 117, 52, 30,
+                    175, 188, 91, 174, 142, 135, 98, 116, 150, 226, 129, 204, 53, 64, 77, 100, 76,
+                    30, 35, 91, 181, 116, 222, 89, 72, 223, 126, 155, 43, 85, 154, 123, 65, 104,
+                    88, 146, 81, 64, 114, 10, 155, 246, 47, 39, 58, 223, 4, 22, 25, 219, 175, 9,
+                    240, 3, 80, 88, 189, 162, 254, 21, 231, 234, 116, 125, 124, 2, 0, 0, 0, 0, 0
+                ],
+                vec![
+                    3, 1, 0, 0, 0, 16, 54, 106, 135, 107, 146, 249, 30, 224, 4, 250, 77, 43, 107,
+                    71, 32, 36, 160, 74, 172, 80, 43, 254, 111, 201, 245, 124, 145, 98, 123, 210,
+                    44, 242, 167, 124, 2, 0, 0, 0, 0, 0
+                ]
+            ],
         );
     }
     let challenge =
diff --git a/chain/client/tests/chunks_management.rs b/chain/client/tests/chunks_management.rs
index 64fd5671f70..da59a524ad1 100644
--- a/chain/client/tests/chunks_management.rs
+++ b/chain/client/tests/chunks_management.rs
@@ -40,7 +40,7 @@ fn chunks_produced_and_distributed_one_val_per_shard() {
 #[test]
 fn chunks_recovered_from_others() {
     heavy_test(|| {
-        chunks_produced_and_distributed_common(2, true, 1000);
+        chunks_produced_and_distributed_common(2, true, 2000);
     });
 }
 
@@ -48,7 +48,7 @@ fn chunks_recovered_from_others() {
 #[should_panic]
 fn chunks_recovered_from_full_timeout_too_short() {
     heavy_test(|| {
-        chunks_produced_and_distributed_common(4, true, 1000);
+        chunks_produced_and_distributed_common(4, true, 1500);
     });
 }
 
@@ -100,6 +100,7 @@ fn chunks_produced_and_distributed_common(
             true,
             block_timeout,
             false,
+            false,
             5,
             Arc::new(RwLock::new(move |from_whom: String, msg: &NetworkRequests| {
                 match msg {
@@ -122,11 +123,13 @@ fn chunks_produced_and_distributed_common(
                             block.header.inner.last_quorum_pre_commit,
                         );
 
-                        if block.header.inner.height > 1 {
-                            assert_eq!(block.header.inner.last_quorum_pre_vote, *height_to_hash.get(&(block.header.inner.height - 1)).unwrap());
+                        // Make sure blocks are finalized. 6 is the epoch boundary.
+                        let h = block.header.inner.height;
+                        if h > 1 && h != 6 {
+                            assert_eq!(block.header.inner.last_quorum_pre_vote, *height_to_hash.get(&(h - 1)).unwrap());
                         }
-                        if block.header.inner.height > 2 {
-                            assert_eq!(block.header.inner.last_quorum_pre_commit, *height_to_hash.get(&(block.header.inner.height - 2)).unwrap());
+                        if h > 2 && (h != 6 && h != 7) {
+                            assert_eq!(block.header.inner.last_quorum_pre_commit, *height_to_hash.get(&(h - 2)).unwrap());
                         }
 
                         if block.header.inner.height > 1 {
@@ -143,7 +146,7 @@ fn chunks_produced_and_distributed_common(
                             }
                         }
 
-                        if block.header.inner.height >= 6 {
+                        if block.header.inner.height >= 8 {
                             println!("PREV BLOCK HASH: {}", block.header.inner.prev_hash);
                             println!(
                                 "STATS: responses: {} requests: {}",
diff --git a/chain/client/tests/cross_shard_tx.rs b/chain/client/tests/cross_shard_tx.rs
index a486f1ca948..39c8b3f842c 100644
--- a/chain/client/tests/cross_shard_tx.rs
+++ b/chain/client/tests/cross_shard_tx.rs
@@ -32,6 +32,7 @@ fn test_keyvalue_runtime_balances() {
             true,
             100,
             false,
+            false,
             5,
             Arc::new(RwLock::new(move |_account_id: String, _msg: &NetworkRequests| {
                 (NetworkResponses::NoResponse, true)
@@ -117,7 +118,8 @@ mod tests {
                 )))
                 .then(move |x| {
                     match x.unwrap() {
-                        NetworkClientResponses::NoResponse => {
+                        NetworkClientResponses::NoResponse
+                        | NetworkClientResponses::RequestRouted => {
                             assert_eq!(num_validators, 24);
                             send_tx(
                                 num_validators,
@@ -136,7 +138,13 @@ mod tests {
                                 connector_ordinal
                             );
                         }
-                        _ => assert!(false),
+                        other @ _ => {
+                            println!(
+                                "Transaction was rejected with an unexpected outcome: {:?}",
+                                other
+                            );
+                            assert!(false)
+                        }
                     }
                     future::result(Ok(()))
                 }),
@@ -388,8 +396,9 @@ mod tests {
                 key_pairs.clone(),
                 validator_groups,
                 true,
-                if drop_chunks || rotate_validators { 300 } else { 200 },
+                if drop_chunks || rotate_validators { 150 } else { 75 },
                 drop_chunks,
+                true,
                 20,
                 Arc::new(RwLock::new(move |_account_id: String, _msg: &NetworkRequests| {
                     (NetworkResponses::NoResponse, true)
diff --git a/chain/client/tests/process_blocks.rs b/chain/client/tests/process_blocks.rs
index 29b6d04ca64..e2cdc3e92cf 100644
--- a/chain/client/tests/process_blocks.rs
+++ b/chain/client/tests/process_blocks.rs
@@ -17,7 +17,7 @@ use near_network::types::{FullPeerInfo, NetworkInfo, PeerChainInfo};
 use near_network::{
     NetworkClientMessages, NetworkClientResponses, NetworkRequests, NetworkResponses, PeerInfo,
 };
-use near_primitives::block::{Approval, BlockHeader};
+use near_primitives::block::{Approval, BlockHeader, WeightAndScore};
 use near_primitives::errors::InvalidTxError;
 use near_primitives::hash::{hash, CryptoHash};
 use near_primitives::merkle::merklize;
@@ -420,7 +420,7 @@ fn client_sync_headers() {
                         chain_info: PeerChainInfo {
                             genesis_id: Default::default(),
                             height: 5,
-                            total_weight: 100.into(),
+                            weight_and_score: WeightAndScore::from_ints(100, 100),
                             tracked_shards: vec![],
                         },
                         edge_info: EdgeInfo::default(),
@@ -432,7 +432,7 @@ fn client_sync_headers() {
                         chain_info: PeerChainInfo {
                             genesis_id: Default::default(),
                             height: 5,
-                            total_weight: 100.into(),
+                            weight_and_score: WeightAndScore::from_ints(100, 100),
                             tracked_shards: vec![],
                         },
                         edge_info: EdgeInfo::default(),
@@ -599,6 +599,39 @@ fn test_no_double_sign() {
     assert_eq!(env.clients[0].produce_block(1, Duration::from_millis(10)).unwrap(), None);
 }
 
+#[test]
+fn test_invalid_gas_price() {
+    init_test_logger();
+    let store = create_test_store();
+    let network_adapter = Arc::new(MockNetworkAdapter::default());
+    let chain_genesis = ChainGenesis::test();
+    let mut client = setup_client(
+        store,
+        vec![vec!["test1"]],
+        1,
+        1,
+        Some("test1"),
+        network_adapter,
+        chain_genesis,
+    );
+    let signer = InMemorySigner::from_seed("test1", KeyType::ED25519, "test1");
+    let genesis = client.chain.get_block_by_height(0).unwrap();
+    let mut b1 = Block::empty_with_height(genesis, 1, &signer);
+    b1.header.inner.gas_price = 0;
+    let hash = hash(&b1.header.inner.try_to_vec().expect("Failed to serialize"));
+    b1.header.hash = hash;
+    b1.header.signature = signer.sign(hash.as_ref());
+
+    let (_, result) = client.process_block(b1, Provenance::NONE);
+    match result {
+        Err(e) => match e.kind() {
+            ErrorKind::InvalidGasPrice => {}
+            _ => assert!(false, "wrong error: {}", e),
+        },
+        _ => assert!(false, "succeeded, tip: {:?}", result),
+    }
+}
+
 #[test]
 fn test_invalid_block_height() {
     let mut env = TestEnv::new(ChainGenesis::test(), 1, 1);
diff --git a/chain/epoch_manager/Cargo.toml b/chain/epoch_manager/Cargo.toml
index 9e449bd8b86..489c38eba32 100644
--- a/chain/epoch_manager/Cargo.toml
+++ b/chain/epoch_manager/Cargo.toml
@@ -10,6 +10,7 @@ protocol_defining_rand = { package = "rand", version = "0.6.5" }
 log = "0.4"
 cached = "0.9.0"
 borsh = "0.2.9"
+rand = "0.7"
 serde = "1.0"
 serde_derive = "1.0"
 serde_json = "1.0"
@@ -17,5 +18,8 @@ serde_json = "1.0"
 
 near-crypto = { path = "../../core/crypto" }
 near-primitives = { path = "../../core/primitives" }
-near-store = { path = "../../core/store" }
 near-chain = { path = "../chain" }
+near-store = { path = "../../core/store" }
+
+[features]
+expensive_tests = []
diff --git a/chain/epoch_manager/src/lib.rs b/chain/epoch_manager/src/lib.rs
index 133213bf262..8cc08c6ffd6 100644
--- a/chain/epoch_manager/src/lib.rs
+++ b/chain/epoch_manager/src/lib.rs
@@ -8,7 +8,7 @@ use near_primitives::hash::CryptoHash;
 use near_primitives::types::{
     AccountId, Balance, BlockIndex, EpochId, ShardId, ValidatorId, ValidatorStake,
 };
-use near_primitives::views::EpochValidatorInfo;
+use near_primitives::views::{CurrentEpochValidatorInfo, EpochValidatorInfo};
 use near_store::{Store, StoreUpdate, COL_BLOCK_INFO, COL_EPOCH_INFO};
 
 use crate::proposals::proposals_to_epoch_info;
@@ -178,22 +178,22 @@ impl EpochManager {
             let info = self.get_block_info(&hash)?.clone();
             if hash == *last_block_hash {
                 block_validator_tracker = info.block_tracker;
+                for proposal in info.all_proposals.into_iter().rev() {
+                    if !slashed_validators.contains(&proposal.account_id) {
+                        if proposal.amount == 0 && !proposals.contains_key(&proposal.account_id) {
+                            validator_kickout.insert(proposal.account_id.clone());
+                        }
+                        // This code relies on the fact that within a block the proposals are ordered
+                        // in the order they are added. So we only take the last proposal for any given
+                        // account in this manner.
+                        proposals.entry(proposal.account_id.clone()).or_insert(proposal);
+                    }
+                }
             }
             if &info.epoch_id != epoch_id || info.prev_hash == CryptoHash::default() {
                 break;
             }
 
-            for proposal in info.proposals.into_iter().rev() {
-                if !slashed_validators.contains(&proposal.account_id) {
-                    if proposal.amount == 0 && !proposals.contains_key(&proposal.account_id) {
-                        validator_kickout.insert(proposal.account_id.clone());
-                    }
-                    // This code relies on the fact that within a block the proposals are ordered
-                    // in the order they are added. So we only take the last proposal for any given
-                    // account in this manner.
-                    proposals.entry(proposal.account_id.clone()).or_insert(proposal);
-                }
-            }
             produced_block_indices.insert(info.index);
             for (i, mask) in info.chunk_mask.iter().enumerate() {
                 let chunk_validator_id =
@@ -368,12 +368,20 @@ impl EpochManager {
                     block_info.epoch_first_block = prev_block_info.epoch_first_block;
                 }
 
+                let BlockInfo { block_tracker, mut all_proposals, .. } = prev_block_info;
+
                 // Update block produced/expected tracker.
                 block_info.update_block_tracker(
                     &epoch_info,
                     prev_block_info.index,
-                    if is_epoch_start { HashMap::default() } else { prev_block_info.block_tracker },
+                    if is_epoch_start { HashMap::default() } else { block_tracker },
                 );
+                if is_epoch_start {
+                    block_info.all_proposals = block_info.proposals.clone();
+                } else {
+                    all_proposals.extend(block_info.proposals.clone());
+                    block_info.all_proposals = all_proposals;
+                }
 
                 // Save current block info.
                 self.save_block_info(&mut store_update, current_hash, block_info.clone())?;
@@ -606,14 +614,30 @@ impl EpochManager {
         block_hash: &CryptoHash,
     ) -> Result<EpochValidatorInfo, EpochError> {
         let epoch_id = self.get_epoch_id(block_hash)?;
-        let current_validators = self.get_epoch_info(&epoch_id)?.validators.clone();
+        let slashed = self.get_slashed_validators(block_hash)?.clone();
+        let current_validators = self
+            .get_epoch_info(&epoch_id)?
+            .validators
+            .clone()
+            .into_iter()
+            .map(|info| {
+                let num_missing_blocks =
+                    self.get_num_missing_blocks(&epoch_id, &block_hash, &info.account_id)?;
+                Ok(CurrentEpochValidatorInfo {
+                    is_slashed: slashed.contains(&info.account_id),
+                    account_id: info.account_id,
+                    stake: info.amount,
+                    num_missing_blocks,
+                })
+            })
+            .collect::<Result<Vec<CurrentEpochValidatorInfo>, EpochError>>()?;
         let next_epoch_id = self.get_next_epoch_id(block_hash)?;
         let next_validators = self.get_epoch_info(&next_epoch_id)?.validators.clone();
-        let epoch_summary = self.collect_blocks_info(&epoch_id, block_hash)?;
+        let current_proposals = self.get_block_info(block_hash)?.all_proposals.clone();
         Ok(EpochValidatorInfo {
-            current_validators: current_validators.into_iter().map(Into::into).collect(),
+            current_validators,
             next_validators: next_validators.into_iter().map(Into::into).collect(),
-            current_proposals: epoch_summary.all_proposals.into_iter().map(Into::into).collect(),
+            current_proposals: current_proposals.into_iter().map(Into::into).collect(),
         })
     }
 
@@ -687,11 +711,49 @@ impl EpochManager {
             [(index % (epoch_info.chunk_producers[shard_id as usize].len() as BlockIndex)) as usize]
     }
 
-    /// Returns true, if given current block info, next block suppose to be in the next epoch.
+    /// The epoch switches when a block at a particular height gets final. We cannot allow blocks
+    /// beyond that height in the current epoch to get final, otherwise the safety of the finality
+    /// gadget can get violated.
+    pub fn push_final_block_back_if_needed(
+        &mut self,
+        parent_hash: CryptoHash,
+        mut last_final_hash: CryptoHash,
+    ) -> Result<CryptoHash, EpochError> {
+        if last_final_hash == CryptoHash::default() {
+            return Ok(last_final_hash);
+        }
+
+        let block_info = self.get_block_info(&parent_hash)?;
+        let epoch_first_block = block_info.epoch_first_block;
+        let estimated_next_epoch_start =
+            self.get_block_info(&epoch_first_block)?.index + self.config.epoch_length;
+
+        loop {
+            let block_info = self.get_block_info(&last_final_hash)?;
+            let prev_hash = block_info.prev_hash;
+            let prev_block_info = self.get_block_info(&prev_hash)?;
+            // See `is_next_block_in_next_epoch` for details on ` + 3`
+            if prev_block_info.index + 3 >= estimated_next_epoch_start {
+                last_final_hash = prev_hash;
+            } else {
+                return Ok(last_final_hash);
+            }
+        }
+    }
+
+    /// Returns true, if given current block info, next block supposed to be in the next epoch.
     #[allow(clippy::wrong_self_convention)]
     fn is_next_block_in_next_epoch(&mut self, block_info: &BlockInfo) -> Result<bool, EpochError> {
-        Ok(block_info.index + 1
-            >= self.get_block_info(&block_info.epoch_first_block)?.index + self.config.epoch_length)
+        let estimated_next_epoch_start =
+            self.get_block_info(&block_info.epoch_first_block)?.index + self.config.epoch_length;
+        // Say the epoch length is 10, and say all the blocks have all the approvals.
+        // Say the first block of a particular epoch has height 111. We want the block 121 to be
+        //     the first block of the next epoch. For 121 to be the next block, the current block
+        //     has height 120, 119 has the quorum pre-commit and 118 is finalized.
+        // 121 - 118 = 3, hence the `last_finalized_height + 3`
+        Ok((block_info.last_finalized_height + 3 >= estimated_next_epoch_start
+            || self.config.num_block_producers < 4)
+            && block_info.index + 1 >= estimated_next_epoch_start)
     }
 
     /// Returns epoch id for the next epoch (T+1), given an block info in current epoch (T).
@@ -1101,7 +1163,7 @@ mod tests {
         epoch_manager
             .record_block_info(
                 &h[1],
-                BlockInfo::new(1, h[0], vec![], vec![], slashed, 0, 0, DEFAULT_TOTAL_SUPPLY),
+                BlockInfo::new(1, 0, h[0], vec![], vec![], slashed, 0, 0, DEFAULT_TOTAL_SUPPLY),
                 [0; 32],
             )
             .unwrap()
@@ -1203,6 +1265,7 @@ mod tests {
                 &h[0],
                 BlockInfo {
                     index: 0,
+                    last_finalized_height: 0,
                     prev_hash: Default::default(),
                     epoch_first_block: h[0],
                     epoch_id: Default::default(),
@@ -1213,6 +1276,7 @@ mod tests {
                     validator_reward: 0,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1222,6 +1286,7 @@ mod tests {
                 &h[1],
                 BlockInfo {
                     index: 1,
+                    last_finalized_height: 1,
                     prev_hash: h[0],
                     epoch_first_block: h[1],
                     epoch_id: Default::default(),
@@ -1232,6 +1297,7 @@ mod tests {
                     validator_reward: 10,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1241,6 +1307,7 @@ mod tests {
                 &h[2],
                 BlockInfo {
                     index: 2,
+                    last_finalized_height: 2,
                     prev_hash: h[1],
                     epoch_first_block: h[1],
                     epoch_id: Default::default(),
@@ -1251,6 +1318,7 @@ mod tests {
                     validator_reward: 10,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1308,6 +1376,7 @@ mod tests {
                 &h[0],
                 BlockInfo {
                     index: 0,
+                    last_finalized_height: 0,
                     prev_hash: Default::default(),
                     epoch_first_block: h[0],
                     epoch_id: Default::default(),
@@ -1318,6 +1387,7 @@ mod tests {
                     validator_reward: 0,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1327,6 +1397,7 @@ mod tests {
                 &h[1],
                 BlockInfo {
                     index: 1,
+                    last_finalized_height: 1,
                     prev_hash: h[0],
                     epoch_first_block: h[1],
                     epoch_id: Default::default(),
@@ -1337,6 +1408,7 @@ mod tests {
                     validator_reward: 10,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1346,6 +1418,7 @@ mod tests {
                 &h[2],
                 BlockInfo {
                     index: 2,
+                    last_finalized_height: 2,
                     prev_hash: h[1],
                     epoch_first_block: h[1],
                     epoch_id: Default::default(),
@@ -1356,6 +1429,7 @@ mod tests {
                     validator_reward: 10,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1441,6 +1515,7 @@ mod tests {
                 &h[0],
                 BlockInfo {
                     index: 0,
+                    last_finalized_height: 0,
                     prev_hash: Default::default(),
                     epoch_first_block: h[0],
                     epoch_id: Default::default(),
@@ -1451,6 +1526,7 @@ mod tests {
                     validator_reward: 0,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1460,6 +1536,7 @@ mod tests {
                 &h[1],
                 BlockInfo {
                     index: 1,
+                    last_finalized_height: 1,
                     prev_hash: h[0],
                     epoch_first_block: h[1],
                     epoch_id: Default::default(),
@@ -1470,6 +1547,7 @@ mod tests {
                     validator_reward: 0,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1479,6 +1557,7 @@ mod tests {
                 &h[3],
                 BlockInfo {
                     index: 3,
+                    last_finalized_height: 3,
                     prev_hash: h[1],
                     epoch_first_block: h[2],
                     epoch_id: Default::default(),
@@ -1489,6 +1568,7 @@ mod tests {
                     validator_reward: 0,
                     total_supply,
                     block_tracker: Default::default(),
+                    all_proposals: vec![],
                 },
                 rng_seed,
             )
@@ -1597,6 +1677,7 @@ mod tests {
             &h[1],
             BlockInfo {
                 index: 1,
+                last_finalized_height: 1,
                 prev_hash: h[0],
                 epoch_first_block: h[1],
                 epoch_id: Default::default(),
@@ -1607,6 +1688,7 @@ mod tests {
                 validator_reward: 0,
                 total_supply,
                 block_tracker: Default::default(),
+                all_proposals: vec![],
             },
             rng_seed,
         )
@@ -1615,6 +1697,7 @@ mod tests {
             &h[2],
             BlockInfo {
                 index: 2,
+                last_finalized_height: 2,
                 prev_hash: h[1],
                 epoch_first_block: h[1],
                 epoch_id: Default::default(),
@@ -1625,6 +1708,7 @@ mod tests {
                 validator_reward: 0,
                 total_supply,
                 block_tracker: Default::default(),
+                all_proposals: vec![],
             },
             rng_seed,
         )
@@ -1633,6 +1717,7 @@ mod tests {
             &h[3],
             BlockInfo {
                 index: 3,
+                last_finalized_height: 3,
                 prev_hash: h[2],
                 epoch_first_block: h[3],
                 epoch_id: Default::default(),
@@ -1643,6 +1728,7 @@ mod tests {
                 validator_reward: 0,
                 total_supply,
                 block_tracker: Default::default(),
+                all_proposals: vec![],
             },
             rng_seed,
         )
diff --git a/chain/epoch_manager/src/test_utils.rs b/chain/epoch_manager/src/test_utils.rs
index 39e8b854a71..86b8f1f0e2b 100644
--- a/chain/epoch_manager/src/test_utils.rs
+++ b/chain/epoch_manager/src/test_utils.rs
@@ -185,6 +185,7 @@ pub fn record_block(
             &cur_h,
             BlockInfo::new(
                 index,
+                0,
                 prev_h,
                 proposals,
                 vec![],
diff --git a/chain/epoch_manager/src/types.rs b/chain/epoch_manager/src/types.rs
index 1d740d4045a..9202f67bbe0 100644
--- a/chain/epoch_manager/src/types.rs
+++ b/chain/epoch_manager/src/types.rs
@@ -60,10 +60,10 @@ pub struct EpochInfo {
 #[derive(Default, BorshSerialize, BorshDeserialize, Clone, Debug)]
 pub struct BlockInfo {
     pub index: BlockIndex,
+    pub last_finalized_height: BlockIndex,
     pub prev_hash: CryptoHash,
     pub epoch_first_block: CryptoHash,
     pub epoch_id: EpochId,
-
     pub proposals: Vec<ValidatorStake>,
     pub chunk_mask: Vec<bool>,
     pub slashed: HashSet<AccountId>,
@@ -75,11 +75,14 @@ pub struct BlockInfo {
     pub total_supply: Balance,
     /// Map from validator index to (num_blocks_produced, num_blocks_expected) so far in the given epoch.
     pub block_tracker: HashMap<ValidatorId, (BlockIndex, BlockIndex)>,
+    /// All proposals in this epoch up to this block
+    pub all_proposals: Vec<ValidatorStake>,
 }
 
 impl BlockInfo {
     pub fn new(
         index: BlockIndex,
+        last_finalized_height: BlockIndex,
         prev_hash: CryptoHash,
         proposals: Vec<ValidatorStake>,
         validator_mask: Vec<bool>,
@@ -90,6 +93,7 @@ impl BlockInfo {
     ) -> Self {
         Self {
             index,
+            last_finalized_height,
             prev_hash,
             proposals,
             chunk_mask: validator_mask,
@@ -101,6 +105,7 @@ impl BlockInfo {
             epoch_first_block: CryptoHash::default(),
             epoch_id: EpochId::default(),
             block_tracker: HashMap::default(),
+            all_proposals: vec![],
         }
     }
 
diff --git a/chain/epoch_manager/tests/finality.rs b/chain/epoch_manager/tests/finality.rs
new file mode 100644
index 00000000000..b7e1488bcf6
--- /dev/null
+++ b/chain/epoch_manager/tests/finality.rs
@@ -0,0 +1,390 @@
+#[cfg(test)]
+#[cfg(feature = "expensive_tests")]
+mod tests {
+    use near_chain::test_utils::setup;
+    use near_chain::FinalityGadget;
+    use near_chain::{Chain, ChainStore, ChainStoreAccess, ChainStoreUpdate};
+    use near_crypto::{Signature, Signer};
+    use near_epoch_manager::test_utils::{record_block, setup_default_epoch_manager};
+    use near_epoch_manager::EpochManager;
+    use near_primitives::block::{Approval, Block, Weight};
+    use near_primitives::hash::CryptoHash;
+    use near_primitives::types::{AccountId, BlockIndex, EpochId};
+    use rand::seq::SliceRandom;
+    use rand::Rng;
+    use std::collections::{HashMap, HashSet};
+
+    fn create_block(
+        em: &mut EpochManager,
+        prev: &Block,
+        height: BlockIndex,
+        chain_store: &mut ChainStore,
+        signer: &dyn Signer,
+        approvals: Vec<Approval>,
+        total_block_producers: usize,
+    ) -> Block {
+        let is_this_block_epoch_start = em.is_next_block_epoch_start(&prev.hash()).unwrap();
+
+        let epoch_id = if is_this_block_epoch_start {
+            // This is a bit weird way to define an epoch, but replicating the exact way we use today is
+            // unnecessarily complex. Using last `pre_commit` is sufficient to ensure that the `epoch_id`
+            // of the next epoch will be the same for as long as the last committed block in the prev
+            // epoch was the same.
+            EpochId(prev.header.inner.last_quorum_pre_commit)
+        } else {
+            prev.header.inner.epoch_id.clone()
+        };
+
+        let mut block = Block::empty(prev, signer);
+        block.header.inner.approvals = approvals.clone();
+        block.header.inner.height = height;
+        block.header.inner.total_weight = (height as u128).into();
+        block.header.inner.epoch_id = epoch_id.clone();
+
+        let quorums = FinalityGadget::compute_quorums(
+            prev.hash(),
+            epoch_id,
+            height,
+            approvals.clone(),
+            chain_store,
+            total_block_producers,
+        )
+        .unwrap()
+        .clone();
+
+        block.header.inner.last_quorum_pre_vote = quorums.last_quorum_pre_vote;
+        block.header.inner.last_quorum_pre_commit = em
+            .push_final_block_back_if_needed(prev.hash(), quorums.last_quorum_pre_commit)
+            .unwrap();
+
+        block.header.inner.score = if quorums.last_quorum_pre_vote == CryptoHash::default() {
+            0.into()
+        } else {
+            chain_store.get_block_header(&quorums.last_quorum_pre_vote).unwrap().inner.total_weight
+        };
+
+        block.header.init();
+
+        let mut chain_store_update = ChainStoreUpdate::new(chain_store);
+        chain_store_update.save_block_header(block.header.clone());
+        chain_store_update.commit().unwrap();
+
+        record_block(
+            em,
+            block.header.inner.prev_hash,
+            block.hash(),
+            block.header.inner.height,
+            vec![],
+        );
+
+        block
+    }
+
+    fn apr(account_id: AccountId, reference_hash: CryptoHash, parent_hash: CryptoHash) -> Approval {
+        Approval { account_id, reference_hash, parent_hash, signature: Signature::default() }
+    }
+
+    fn print_chain(chain: &mut Chain, mut hash: CryptoHash) {
+        while hash != CryptoHash::default() {
+            let header = chain.get_block_header(&hash).unwrap();
+            println!(
+                "    {}: {} (epoch: {}, qv: {}, qc: {}), approvals: {:?}",
+                header.inner.height,
+                header.hash(),
+                header.inner.epoch_id.0,
+                header.inner.last_quorum_pre_vote,
+                header.inner.last_quorum_pre_commit,
+                header.inner.approvals
+            );
+            hash = header.inner.prev_hash;
+        }
+    }
+
+    fn check_safety(
+        chain: &mut Chain,
+        new_final_hash: CryptoHash,
+        old_final_height: BlockIndex,
+        old_final_hash: CryptoHash,
+    ) {
+        let on_chain_hash =
+            chain.get_header_on_chain_by_height(&new_final_hash, old_final_height).unwrap().hash();
+        let ok = on_chain_hash == old_final_hash;
+
+        if !ok {
+            println!(
+                "New hash: {:?}, new height: {:?}, on_chain_hash: {:?}",
+                new_final_hash,
+                chain.mut_store().get_block_height(&new_final_hash),
+                on_chain_hash
+            );
+            print_chain(chain, new_final_hash);
+            println!("Old hash: {:?}, old height: {:?}", old_final_hash, old_final_height,);
+            print_chain(chain, old_final_hash);
+            assert!(false);
+        }
+    }
+
+    /// Tests safety with epoch switches
+    ///
+    /// Runs many iterations with the following parameters:
+    ///  - complexity: number of blocks created during the iterations
+    ///  - likelihood_random: how likely is a block to be built on top of random prev block
+    ///  - likelihood_heavy: how likely is a block to be built on top of one of the blocks with the highest score
+    ///  - likelihood_last: how likely is a block to be built on top of last built block
+    ///  - adversaries: are there nodes that behave adversarially (if `true` then 2 out of 7 bps create approvals
+    ///       randomly instead of following the protocol)
+    ///
+    /// Uses the same utility to perform epoch switches that the actual epoch manager uses, thus testing
+    /// the actual live conditions. Uses two block producer sets that switch randomly between epochs.
+    /// Makes sure that all the finalized blocks are on the same chain.
+    ///
+    #[test]
+    fn test_fuzzy_safety() {
+        for (complexity, num_iters) in vec![
+            (30, 2000),
+            //(20, 2000),
+            (50, 100),
+            (100, 50),
+            (200, 50),
+            (500, 20),
+            (1000, 10),
+            (2000, 10),
+            (2000, 10),
+        ] {
+            for adversaries in vec![false, true] {
+                let mut good_iters = 0;
+
+                let block_producers1 = vec![
+                    "test1.1".to_string(),
+                    "test1.2".to_string(),
+                    "test1.3".to_string(),
+                    "test1.4".to_string(),
+                    "test1.5".to_string(),
+                    "test1.6".to_string(),
+                    "test1.7".to_string(),
+                ];
+                let block_producers2 = vec![
+                    "test2.1".to_string(),
+                    "test2.2".to_string(),
+                    "test2.3".to_string(),
+                    "test2.4".to_string(),
+                    "test2.5".to_string(),
+                    "test2.6".to_string(),
+                    "test2.7".to_string(),
+                ];
+                let total_block_producers = block_producers1.len();
+
+                let mut epoch_to_bps = HashMap::new();
+
+                for iter in 0..num_iters {
+                    let likelihood_random = rand::thread_rng().gen_range(1, 3);
+                    let likelihood_heavy = rand::thread_rng().gen_range(1, 11);
+                    let likelihood_last = rand::thread_rng().gen_range(1, 11);
+
+                    println!(
+                        "Starting iteration {} at complexity {} and likelihoods {}, {}, {}",
+                        iter, complexity, likelihood_random, likelihood_heavy, likelihood_last
+                    );
+                    let (mut chain, _, signer) = setup();
+                    let mut em = setup_default_epoch_manager(
+                        block_producers1.iter().map(|_| ("test", 1000000)).collect(),
+                        10,
+                        4,
+                        7,
+                        0,
+                        50,
+                        50,
+                    );
+
+                    let genesis_block = chain.get_block(&chain.genesis().hash()).unwrap().clone();
+
+                    let mut last_final_block_hash = CryptoHash::default();
+                    let mut last_final_block_height = 0;
+                    let mut largest_height = 0;
+                    let mut finalized_hashes = HashSet::new();
+                    let mut largest_weight: HashMap<AccountId, Weight> = HashMap::new();
+                    let mut largest_score: HashMap<AccountId, Weight> = HashMap::new();
+                    let mut last_approvals: HashMap<CryptoHash, HashMap<AccountId, Approval>> =
+                        HashMap::new();
+
+                    let mut all_blocks = vec![genesis_block.clone()];
+                    record_block(
+                        &mut em,
+                        genesis_block.header.inner.prev_hash,
+                        genesis_block.hash(),
+                        genesis_block.header.inner.height,
+                        vec![],
+                    );
+                    for _i in 0..complexity {
+                        let max_score =
+                            all_blocks.iter().map(|block| block.header.inner.score).max().unwrap();
+                        let random_max_score_block = all_blocks
+                            .iter()
+                            .filter(|block| block.header.inner.score == max_score)
+                            .collect::<Vec<_>>()
+                            .choose(&mut rand::thread_rng())
+                            .unwrap()
+                            .clone()
+                            .clone();
+                        let last_block = all_blocks.last().unwrap().clone();
+                        let prev_block = (0..likelihood_random)
+                            .map(|_| all_blocks.choose(&mut rand::thread_rng()).unwrap())
+                            .chain((0..likelihood_heavy).map(|_| &random_max_score_block))
+                            .chain((0..likelihood_last).map(|_| &last_block))
+                            .collect::<Vec<_>>()
+                            .choose(&mut rand::thread_rng())
+                            .unwrap()
+                            .clone();
+                        let mut last_approvals_entry = last_approvals
+                            .get(&prev_block.hash())
+                            .unwrap_or(&HashMap::new())
+                            .clone();
+                        let mut approvals = vec![];
+
+                        let block_producers = epoch_to_bps
+                            .entry(prev_block.header.inner.epoch_id.0)
+                            .or_insert_with(|| {
+                                vec![block_producers1.clone(), block_producers2.clone()]
+                                    .choose(&mut rand::thread_rng())
+                                    .unwrap()
+                                    .clone()
+                            });
+
+                        for (i, block_producer) in block_producers.iter().enumerate() {
+                            if rand::thread_rng().gen::<bool>() {
+                                continue;
+                            }
+
+                            let reference_hash = if i < 2 && adversaries {
+                                // malicious
+                                let prev_reference = if let Some(prev_approval) =
+                                    last_approvals_entry.get(block_producer)
+                                {
+                                    prev_approval.reference_hash
+                                } else {
+                                    genesis_block.hash().clone()
+                                };
+
+                                let mut possible_references = vec![prev_reference];
+                                {
+                                    let mut prev_block_hash = prev_block.hash();
+                                    for _j in 0..10 {
+                                        if prev_block_hash == prev_reference {
+                                            break;
+                                        }
+                                        possible_references.push(prev_block_hash);
+                                        prev_block_hash = chain
+                                            .mut_store()
+                                            .get_block_header(&prev_block_hash)
+                                            .unwrap()
+                                            .inner
+                                            .prev_hash;
+                                    }
+                                }
+
+                                possible_references.choose(&mut rand::thread_rng()).unwrap().clone()
+                            } else {
+                                // honest
+                                let old_largest_weight =
+                                    *largest_weight.get(block_producer).unwrap_or(&0u128.into());
+                                let old_largest_score =
+                                    *largest_score.get(block_producer).unwrap_or(&0u128.into());
+
+                                match FinalityGadget::get_my_approval_reference_hash_inner(
+                                    prev_block.hash(),
+                                    last_approvals_entry.get(block_producer).cloned(),
+                                    old_largest_weight,
+                                    old_largest_score,
+                                    chain.mut_store(),
+                                ) {
+                                    Some(hash) => hash,
+                                    None => continue,
+                                }
+                            };
+
+                            let approval = apr(
+                                block_producer.clone(),
+                                reference_hash.clone(),
+                                prev_block.hash(),
+                            );
+                            approvals.push(approval.clone());
+                            last_approvals_entry.insert(block_producer.clone(), approval);
+                            largest_weight.insert(
+                                block_producer.clone(),
+                                prev_block.header.inner.total_weight,
+                            );
+                            largest_score
+                                .insert(block_producer.clone(), prev_block.header.inner.score);
+                        }
+
+                        let new_block = create_block(
+                            &mut em,
+                            &prev_block,
+                            prev_block.header.inner.height + 1,
+                            chain.mut_store(),
+                            &*signer,
+                            approvals,
+                            total_block_producers,
+                        );
+
+                        let final_block = new_block.header.inner.last_quorum_pre_commit;
+                        if final_block != CryptoHash::default() {
+                            let new_final_block_height =
+                                chain.get_block_header(&final_block).unwrap().inner.height;
+                            if last_final_block_height != 0 {
+                                if new_final_block_height > last_final_block_height {
+                                    check_safety(
+                                        &mut chain,
+                                        final_block,
+                                        last_final_block_height,
+                                        last_final_block_hash,
+                                    );
+                                } else if new_final_block_height < last_final_block_height {
+                                    check_safety(
+                                        &mut chain,
+                                        last_final_block_hash,
+                                        new_final_block_height,
+                                        final_block,
+                                    );
+                                } else {
+                                    if final_block != last_final_block_hash {
+                                        print_chain(&mut chain, final_block);
+                                        print_chain(&mut chain, last_final_block_hash);
+                                        assert_eq!(final_block, last_final_block_hash);
+                                    }
+                                }
+                            }
+
+                            finalized_hashes.insert(final_block);
+
+                            if new_final_block_height >= last_final_block_height {
+                                last_final_block_hash = final_block;
+                                last_final_block_height = new_final_block_height;
+                            }
+                        }
+
+                        if new_block.header.inner.height > largest_height {
+                            largest_height = new_block.header.inner.height;
+                        }
+
+                        last_approvals.insert(new_block.hash().clone(), last_approvals_entry);
+
+                        all_blocks.push(new_block);
+                    }
+                    println!("Finished iteration {}, largest finalized height: {}, largest height: {}, final blocks: {}", iter, last_final_block_height, largest_height, finalized_hashes.len());
+                    if last_final_block_height > 0 {
+                        good_iters += 1;
+                    }
+                }
+                println!("Good iterations: {}/{}", good_iters, num_iters);
+                if complexity < 100 {
+                    assert!(good_iters >= num_iters / 4);
+                } else if complexity < 500 {
+                    assert!(good_iters >= num_iters / 2);
+                } else {
+                    assert_eq!(good_iters, num_iters);
+                }
+            }
+        }
+    }
+}
diff --git a/chain/jsonrpc/client/src/lib.rs b/chain/jsonrpc/client/src/lib.rs
index be516779558..77c7fbb2b44 100644
--- a/chain/jsonrpc/client/src/lib.rs
+++ b/chain/jsonrpc/client/src/lib.rs
@@ -8,7 +8,8 @@ use serde::Serialize;
 use near_primitives::hash::CryptoHash;
 use near_primitives::types::{BlockIndex, ShardId};
 use near_primitives::views::{
-    BlockView, ChunkView, FinalExecutionOutcomeView, QueryResponse, StatusResponse,
+    BlockView, ChunkView, EpochValidatorInfo, FinalExecutionOutcomeView, QueryResponse,
+    StatusResponse,
 };
 
 use crate::message::{from_slice, Message};
@@ -187,6 +188,7 @@ jsonrpc_client!(pub struct JsonRpcClient {
     pub fn tx(&mut self, hash: String, account_id: String) -> RpcRequest<FinalExecutionOutcomeView>;
     pub fn block(&mut self, id: BlockId) -> RpcRequest<BlockView>;
     pub fn chunk(&mut self, id: ChunkId) -> RpcRequest<ChunkView>;
+    pub fn validators(&mut self, block_hash: String) -> RpcRequest<EpochValidatorInfo>;
 });
 
 fn create_client() -> Client {
diff --git a/chain/jsonrpc/src/lib.rs b/chain/jsonrpc/src/lib.rs
index bfe0ca4eb70..87f23cd4589 100644
--- a/chain/jsonrpc/src/lib.rs
+++ b/chain/jsonrpc/src/lib.rs
@@ -18,7 +18,8 @@ use async_utils::{delay, timeout};
 use message::Message;
 use message::{Request, RpcError};
 use near_client::{
-    ClientActor, GetBlock, GetChunk, GetNetworkInfo, Status, TxStatus, ViewClientActor,
+    ClientActor, GetBlock, GetChunk, GetNetworkInfo, GetValidatorInfo, Status, TxStatus,
+    ViewClientActor,
 };
 pub use near_jsonrpc_client as client;
 use near_jsonrpc_client::{message, BlockId, ChunkId};
@@ -111,6 +112,13 @@ fn parse_tx(params: Option<Value>) -> Result<SignedTransaction, RpcError> {
         .map_err(|e| RpcError::invalid_params(Some(format!("Failed to decode transaction: {}", e))))
 }
 
+fn parse_hash(params: Option<Value>) -> Result<CryptoHash, RpcError> {
+    let (encoded,) = parse_params::<(String,)>(params)?;
+    from_base_or_parse_err(encoded).and_then(|bytes| {
+        CryptoHash::try_from(bytes).map_err(|err| RpcError::parse_error(err.to_string()))
+    })
+}
+
 fn jsonify_client_response(
     client_response: Result<NetworkClientResponses, MailboxError>,
 ) -> Result<Value, RpcError> {
@@ -161,6 +169,7 @@ impl JsonRpcHandler {
         match request.method.as_ref() {
             "broadcast_tx_async" => self.send_tx_async(request.params).await,
             "broadcast_tx_commit" => self.send_tx_commit(request.params).await,
+            "validators" => self.validators(request.params).await,
             "query" => self.query(request.params).await,
             "health" => self.health().await,
             "status" => self.status().await,
@@ -269,11 +278,19 @@ impl JsonRpcHandler {
     }
 
     async fn health(&self) -> Result<Value, RpcError> {
-        Ok(Value::Null)
+        match self.client_addr.send(Status {}).compat().await {
+            Ok(Ok(_)) => Ok(Value::Null),
+            Ok(Err(err)) => Err(RpcError::new(-32_001, err, None)),
+            Err(_) => Err(RpcError::server_error::<String>(None)),
+        }
     }
 
     pub async fn status(&self) -> Result<Value, RpcError> {
-        jsonify(self.client_addr.send(Status {}).compat().await)
+        match self.client_addr.send(Status {}).compat().await {
+            Ok(Ok(result)) => jsonify(Ok(Ok(result))),
+            Ok(Err(err)) => Err(RpcError::new(-32_001, err, None)),
+            Err(_) => Err(RpcError::server_error::<String>(None)),
+        }
     }
 
     async fn query(&self, params: Option<Value>) -> Result<Value, RpcError> {
@@ -386,6 +403,16 @@ impl JsonRpcHandler {
 
         String::from_utf8(buffer)
     }
+
+    async fn validators(&self, params: Option<Value>) -> Result<Value, RpcError> {
+        let block_hash = parse_hash(params)?;
+        jsonify(
+            self.view_client_addr
+                .send(GetValidatorInfo { last_block_hash: block_hash })
+                .compat()
+                .await,
+        )
+    }
 }
 
 fn rpc_handler(
diff --git a/chain/jsonrpc/tests/rpc_query.rs b/chain/jsonrpc/tests/rpc_query.rs
index c118dee99ab..97c4939a716 100644
--- a/chain/jsonrpc/tests/rpc_query.rs
+++ b/chain/jsonrpc/tests/rpc_query.rs
@@ -1,6 +1,6 @@
 use std::convert::TryFrom;
 
-use actix::System;
+use actix::{Actor, System};
 use futures::future;
 use futures::future::Future;
 
@@ -8,6 +8,7 @@ use near_crypto::Signature;
 use near_jsonrpc::client::new_client;
 use near_jsonrpc::test_utils::start_all;
 use near_jsonrpc_client::{BlockId, ChunkId};
+use near_network::test_utils::WaitOrTimeout;
 use near_primitives::hash::CryptoHash;
 use near_primitives::test_utils::init_test_logger;
 use near_primitives::types::ShardId;
@@ -88,8 +89,7 @@ fn test_chunk_by_hash() {
                     assert_eq!(chunk.header.height_included, 0);
                     assert_eq!(chunk.header.outgoing_receipts_root.as_ref().len(), 32);
                     assert_eq!(chunk.header.prev_block_hash.as_ref().len(), 32);
-                    assert_eq!(chunk.header.prev_state_num_parts, 17);
-                    assert_eq!(chunk.header.prev_state_root_hash.as_ref().len(), 32);
+                    assert_eq!(chunk.header.prev_state_root.as_ref().len(), 32);
                     assert_eq!(chunk.header.rent_paid, 0);
                     assert_eq!(chunk.header.shard_id, 0);
                     assert!(if let Signature::ED25519(_) = chunk.header.signature {
@@ -153,6 +153,32 @@ fn test_status() {
     .unwrap();
 }
 
+/// Retrieve client status failed.
+#[test]
+fn test_status_fail() {
+    init_test_logger();
+
+    System::run(|| {
+        let (_, addr) = start_all(false);
+
+        let mut client = new_client(&format!("http://{}", addr));
+        WaitOrTimeout::new(
+            Box::new(move |_| {
+                actix::spawn(client.status().then(|res| {
+                    if res.is_err() {
+                        System::current().stop();
+                    }
+                    future::result(Ok(()))
+                }));
+            }),
+            100,
+            10000,
+        )
+        .start();
+    })
+    .unwrap();
+}
+
 /// Check health fails when node is absent.
 #[test]
 fn test_health_fail() {
@@ -169,6 +195,32 @@ fn test_health_fail() {
     .unwrap();
 }
 
+/// Health fails when node doesn't produce block for period of time.
+#[test]
+fn test_health_fail_no_blocks() {
+    init_test_logger();
+
+    System::run(|| {
+        let (_, addr) = start_all(false);
+
+        let mut client = new_client(&format!("http://{}", addr));
+        WaitOrTimeout::new(
+            Box::new(move |_| {
+                actix::spawn(client.health().then(|res| {
+                    if res.is_err() {
+                        System::current().stop();
+                    }
+                    future::result(Ok(()))
+                }));
+            }),
+            300,
+            10000,
+        )
+        .start();
+    })
+    .unwrap();
+}
+
 /// Retrieve client health.
 #[test]
 fn test_health_ok() {
diff --git a/chain/network/src/codec.rs b/chain/network/src/codec.rs
index 9995c07b9a3..f9fbc31a95c 100644
--- a/chain/network/src/codec.rs
+++ b/chain/network/src/codec.rs
@@ -78,7 +78,7 @@ mod test {
     };
 
     use super::*;
-    use near_primitives::block::Approval;
+    use near_primitives::block::{Approval, WeightAndScore};
 
     fn test_codec(msg: PeerMessage) {
         let mut codec = Codec::new();
@@ -98,7 +98,7 @@ mod test {
             chain_info: PeerChainInfo {
                 genesis_id: Default::default(),
                 height: 0,
-                total_weight: 0.into(),
+                weight_and_score: WeightAndScore::from_ints(0, 0),
                 tracked_shards: vec![],
             },
             edge_info: EdgeInfo::default(),
diff --git a/chain/network/src/peer.rs b/chain/network/src/peer.rs
index a9e514bda78..029963555d6 100644
--- a/chain/network/src/peer.rs
+++ b/chain/network/src/peer.rs
@@ -227,13 +227,18 @@ impl Peer {
                 Ok(NetworkClientResponses::ChainInfo {
                     genesis_id,
                     height,
-                    total_weight,
+                    weight_and_score,
                     tracked_shards,
                 }) => {
                     let handshake = Handshake::new(
                         act.node_info.id.clone(),
                         act.node_info.addr_port(),
-                        PeerChainInfo { genesis_id, height, total_weight, tracked_shards },
+                        PeerChainInfo {
+                            genesis_id,
+                            height,
+                            weight_and_score: weight_and_score,
+                            tracked_shards,
+                        },
                         act.edge_info.as_ref().unwrap().clone(),
                     );
                     act.send_message(PeerMessage::Handshake(handshake));
@@ -272,16 +277,16 @@ impl Peer {
                 let block_hash = block.hash();
                 self.tracker.push_received(block_hash);
                 self.chain_info.height = max(self.chain_info.height, block.header.inner.height);
-                self.chain_info.total_weight =
-                    max(self.chain_info.total_weight, block.header.inner.total_weight);
+                self.chain_info.weight_and_score =
+                    max(self.chain_info.weight_and_score, block.header.inner.weight_and_score());
                 NetworkClientMessages::Block(block, peer_id, self.tracker.has_request(block_hash))
             }
             PeerMessage::BlockHeaderAnnounce(header) => {
                 let block_hash = header.hash();
                 self.tracker.push_received(block_hash);
                 self.chain_info.height = max(self.chain_info.height, header.inner.height);
-                self.chain_info.total_weight =
-                    max(self.chain_info.total_weight, header.inner.total_weight);
+                self.chain_info.weight_and_score =
+                    max(self.chain_info.weight_and_score, header.inner.weight_and_score());
                 NetworkClientMessages::BlockHeader(header, peer_id)
             }
             PeerMessage::Transaction(transaction) => {
@@ -318,12 +323,12 @@ impl Peer {
                     RoutedMessageBody::QueryResponse { response, id } => {
                         NetworkClientMessages::QueryResponse { response, id }
                     }
-                    RoutedMessageBody::StateRequest(shard_id, hash, need_header, parts_ranges) => {
+                    RoutedMessageBody::StateRequest(shard_id, hash, need_header, parts) => {
                         NetworkClientMessages::StateRequest(
                             shard_id,
                             hash,
                             need_header,
-                            parts_ranges,
+                            parts,
                             msg_hash.clone().unwrap(),
                         )
                     }
diff --git a/chain/network/src/peer_manager.rs b/chain/network/src/peer_manager.rs
index a581669dba9..e7073a29e15 100644
--- a/chain/network/src/peer_manager.rs
+++ b/chain/network/src/peer_manager.rs
@@ -274,7 +274,7 @@ impl PeerManagerActor {
             .values()
             .map(|active_peers| {
                 (
-                    active_peers.full_peer_info.chain_info.total_weight,
+                    active_peers.full_peer_info.chain_info.weight_and_score,
                     active_peers.full_peer_info.chain_info.height,
                 )
             })
@@ -729,12 +729,12 @@ impl Handler<NetworkRequests> for PeerManagerActor {
                 }
                 NetworkResponses::NoResponse
             }
-            NetworkRequests::StateRequest { shard_id, hash, need_header, parts_ranges, target } => {
+            NetworkRequests::StateRequest { shard_id, hash, need_header, parts, target } => {
                 match target {
                     AccountOrPeerIdOrHash::AccountId(account_id) => self.send_message_to_account(
                         ctx,
                         &account_id,
-                        RoutedMessageBody::StateRequest(shard_id, hash, need_header, parts_ranges),
+                        RoutedMessageBody::StateRequest(shard_id, hash, need_header, parts),
                     ),
                     peer_or_hash @ AccountOrPeerIdOrHash::PeerId(_)
                     | peer_or_hash @ AccountOrPeerIdOrHash::Hash(_) => self.send_message_to_peer(
@@ -745,7 +745,7 @@ impl Handler<NetworkRequests> for PeerManagerActor {
                                 shard_id,
                                 hash,
                                 need_header,
-                                parts_ranges,
+                                parts,
                             ),
                         },
                     ),
@@ -829,7 +829,7 @@ impl Handler<NetworkRequests> for PeerManagerActor {
                 let new_accounts = accounts
                     .into_iter()
                     .filter(|announce_account| {
-                        !self.routing_table.contains_account(announce_account.clone())
+                        !self.routing_table.contains_account(&announce_account)
                     })
                     .collect();
 
diff --git a/chain/network/src/routing.rs b/chain/network/src/routing.rs
index ca10dc44f64..2be31cc39c9 100644
--- a/chain/network/src/routing.rs
+++ b/chain/network/src/routing.rs
@@ -344,20 +344,21 @@ impl RoutingTable {
     /// Returns a bool indicating whether this is a new entry or not.
     /// Note: There is at most on peer id per account id.
     pub fn add_account(&mut self, announce_account: AnnounceAccount) -> bool {
-        let account_id = announce_account.account_id.clone();
-        self.account_peers.insert(account_id, announce_account.clone()).map_or_else(
-            || {
-                near_metrics::inc_counter(&metrics::ACCOUNT_KNOWN);
-                true
-            },
-            |old_announce_account| old_announce_account != announce_account,
-        )
+        if !self.contains_account(&announce_account) {
+            let account_id = announce_account.account_id.clone();
+            self.account_peers.insert(account_id, announce_account);
+            near_metrics::inc_counter(&metrics::ACCOUNT_KNOWN);
+            true
+        } else {
+            false
+        }
     }
 
-    pub fn contains_account(&self, announce_account: AnnounceAccount) -> bool {
-        self.account_peers
-            .get(&announce_account.account_id)
-            .map_or(false, |cur_announce_account| *cur_announce_account == announce_account)
+    pub fn contains_account(&self, announce_account: &AnnounceAccount) -> bool {
+        self.account_peers.get(&announce_account.account_id).map_or(false, |cur_announce_account| {
+            assert_eq!(cur_announce_account.account_id, announce_account.account_id);
+            cur_announce_account.peer_id == announce_account.peer_id
+        })
     }
 
     /// Add this edge to the current view of the network.
diff --git a/chain/network/src/test_utils.rs b/chain/network/src/test_utils.rs
index b4288a27009..939c29525be 100644
--- a/chain/network/src/test_utils.rs
+++ b/chain/network/src/test_utils.rs
@@ -1,14 +1,15 @@
+use std::collections::{HashMap, HashSet};
 use std::net::TcpListener;
 use std::time::{Duration, Instant};
 
 use actix::{Actor, AsyncContext, Context, System};
+use futures::future;
 use futures::future::Future;
 use tokio::timer::Delay;
 
-use crate::types::{NetworkConfig, PeerId, PeerInfo};
-use futures::future;
 use near_crypto::{KeyType, SecretKey};
-use std::collections::{HashMap, HashSet};
+
+use crate::types::{NetworkConfig, PeerId, PeerInfo};
 
 /// Returns available port.
 pub fn open_port() -> u16 {
diff --git a/chain/network/src/types.rs b/chain/network/src/types.rs
index dca0017bb2c..679b308b326 100644
--- a/chain/network/src/types.rs
+++ b/chain/network/src/types.rs
@@ -14,17 +14,17 @@ use chrono::{DateTime, Utc};
 use serde_derive::{Deserialize, Serialize};
 use tokio::net::TcpStream;
 
-use near_chain::types::ShardStateSyncResponse;
-use near_chain::{Block, BlockHeader, Weight};
+use near_chain::types::{ShardStateSyncResponse, StateRequestParts};
+use near_chain::{Block, BlockHeader};
 use near_crypto::{PublicKey, SecretKey, Signature};
 use near_metrics;
-use near_primitives::block::{Approval, ApprovalMessage, GenesisId};
+use near_primitives::block::{Approval, ApprovalMessage, GenesisId, WeightAndScore};
 use near_primitives::challenge::Challenge;
 use near_primitives::errors::InvalidTxError;
 use near_primitives::hash::{hash, CryptoHash};
 use near_primitives::sharding::{ChunkHash, PartialEncodedChunk};
 use near_primitives::transaction::SignedTransaction;
-use near_primitives::types::{AccountId, BlockIndex, EpochId, Range, ShardId};
+use near_primitives::types::{AccountId, BlockIndex, EpochId, ShardId};
 use near_primitives::utils::{from_timestamp, to_timestamp};
 use near_primitives::views::{FinalExecutionOutcomeView, QueryResponse};
 
@@ -173,8 +173,8 @@ pub struct PeerChainInfo {
     pub genesis_id: GenesisId,
     /// Last known chain height of the peer.
     pub height: BlockIndex,
-    /// Last known chain weight of the peer.
-    pub total_weight: Weight,
+    /// Last known chain weight/score of the peer.
+    pub weight_and_score: WeightAndScore,
     /// Shards that the peer is tracking
     pub tracked_shards: Vec<ShardId>,
 }
@@ -307,7 +307,7 @@ pub enum RoutedMessageBody {
         response: QueryResponse,
         id: String,
     },
-    StateRequest(ShardId, CryptoHash, bool, Vec<Range>),
+    StateRequest(ShardId, CryptoHash, bool, StateRequestParts),
     StateResponse(StateResponseInfo),
     PartialEncodedChunkRequest(PartialEncodedChunkRequestMsg),
     PartialEncodedChunk(PartialEncodedChunk),
@@ -917,7 +917,7 @@ pub enum NetworkRequests {
         shard_id: ShardId,
         hash: CryptoHash,
         need_header: bool,
-        parts_ranges: Vec<Range>,
+        parts: StateRequestParts,
         target: AccountOrPeerIdOrHash,
     },
     /// Ban given peer.
@@ -1066,7 +1066,7 @@ pub enum NetworkClientMessages {
     /// Request a block.
     BlockRequest(CryptoHash),
     /// State request.
-    StateRequest(ShardId, CryptoHash, bool, Vec<Range>, CryptoHash),
+    StateRequest(ShardId, CryptoHash, bool, StateRequestParts, CryptoHash),
     /// State response.
     StateResponse(StateResponseInfo),
     /// Account announcements that needs to be validated before being processed.
@@ -1099,7 +1099,7 @@ pub enum NetworkClientResponses {
     ChainInfo {
         genesis_id: GenesisId,
         height: BlockIndex,
-        total_weight: Weight,
+        weight_and_score: WeightAndScore,
         tracked_shards: Vec<ShardId>,
     },
     /// Block response.
diff --git a/chain/network/tests/announce_account.rs b/chain/network/tests/announce_account.rs
index 2a752210c0a..93e440bc878 100644
--- a/chain/network/tests/announce_account.rs
+++ b/chain/network/tests/announce_account.rs
@@ -1,6 +1,8 @@
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::Arc;
+use std::time::Instant;
 
+use actix::actors::mocker::Mocker;
 use actix::{Actor, Addr, AsyncContext, System};
 use chrono::{DateTime, Utc};
 use futures::{future, Future};
@@ -10,9 +12,15 @@ use near_chain::ChainGenesis;
 use near_client::{BlockProducer, ClientActor, ClientConfig};
 use near_crypto::{InMemorySigner, KeyType};
 use near_network::test_utils::{convert_boot_nodes, open_port, vec_ref_to_str, WaitOrTimeout};
-use near_network::types::NetworkInfo;
-use near_network::{NetworkConfig, NetworkRequests, NetworkResponses, PeerManagerActor};
+use near_network::types::{AnnounceAccount, NetworkInfo, PeerId, SyncData};
+use near_network::{
+    NetworkClientMessages, NetworkClientResponses, NetworkConfig, NetworkRequests,
+    NetworkResponses, PeerManagerActor,
+};
+use near_primitives::block::{GenesisId, WeightAndScore};
+use near_primitives::hash::hash;
 use near_primitives::test_utils::init_integration_logger;
+use near_primitives::types::EpochId;
 use near_store::test_utils::create_test_store;
 use near_telemetry::{TelemetryActor, TelemetryConfig};
 use testlib::test_helpers::heavy_test;
@@ -55,7 +63,7 @@ pub fn setup_network_node(
 
     let peer_manager = PeerManagerActor::create(move |ctx| {
         let client_actor = ClientActor::new(
-            ClientConfig::test(false, 100, num_validators),
+            ClientConfig::test(false, 100, 200, num_validators),
             store.clone(),
             chain_genesis,
             runtime,
@@ -115,10 +123,9 @@ fn check_account_id_propagation(
 
         WaitOrTimeout::new(
             Box::new(move |_| {
-                for (i, (pm, count)) in peer_managers.iter().enumerate() {
+                for (_, (pm, count)) in peer_managers.iter().enumerate() {
                     let pm = pm.clone();
                     let count = count.clone();
-                    let account_ids_copy = accounts_id.clone();
 
                     let counters: Vec<_> =
                         peer_managers.iter().map(|(_, counter)| counter.clone()).collect();
@@ -128,10 +135,6 @@ fn check_account_id_propagation(
                             if let NetworkResponses::Info(NetworkInfo { known_producers, .. }) =
                                 res.unwrap()
                             {
-                                println!(
-                                    "Known producers of {}: {:?}",
-                                    account_ids_copy[i], known_producers
-                                );
                                 if known_producers.len() == total_nodes {
                                     count.fetch_add(1, Ordering::Relaxed);
 
@@ -156,6 +159,45 @@ fn check_account_id_propagation(
     .unwrap();
 }
 
+/// Make Peer Manager with mocked client ready to accept any announce account.
+/// Used for `test_infinite_loop`
+pub fn make_peer_manager(
+    seed: &str,
+    port: u16,
+    boot_nodes: Vec<(&str, u16)>,
+    peer_max_count: u32,
+) -> (PeerManagerActor, PeerId, Arc<AtomicUsize>) {
+    let store = create_test_store();
+    let mut config = NetworkConfig::from_seed(seed, port);
+    config.boot_nodes = convert_boot_nodes(boot_nodes);
+    config.peer_max_count = peer_max_count;
+    let counter = Arc::new(AtomicUsize::new(0));
+    let counter1 = counter.clone();
+    let client_addr = ClientMock::mock(Box::new(move |msg, _ctx| {
+        let msg = msg.downcast_ref::<NetworkClientMessages>().unwrap();
+        match msg {
+            NetworkClientMessages::AnnounceAccount(accounts) => {
+                if !accounts.is_empty() {
+                    counter1.fetch_add(1, Ordering::SeqCst);
+                }
+                Box::new(Some(NetworkClientResponses::AnnounceAccount(accounts.clone())))
+            }
+            NetworkClientMessages::GetChainInfo => {
+                Box::new(Some(NetworkClientResponses::ChainInfo {
+                    genesis_id: GenesisId::default(),
+                    height: 1,
+                    weight_and_score: WeightAndScore::from_ints(0, 0),
+                    tracked_shards: vec![],
+                }))
+            }
+            _ => Box::new(Some(NetworkClientResponses::NoResponse)),
+        }
+    }))
+    .start();
+    let peer_id = config.public_key.clone().into();
+    (PeerManagerActor::new(store, config, client_addr.recipient()).unwrap(), peer_id, counter)
+}
+
 #[test]
 fn two_nodes() {
     heavy_test(|| {
@@ -288,3 +330,88 @@ fn circle_extra_connection() {
         check_account_id_propagation(accounts_id, adjacency_list, max_peer_connections, 5000);
     });
 }
+
+type ClientMock = Mocker<ClientActor>;
+
+#[test]
+fn test_infinite_loop() {
+    init_integration_logger();
+    System::run(|| {
+        let (port1, port2) = (open_port(), open_port());
+        let (pm1, peer_id1, counter1) = make_peer_manager("test1", port1, vec![], 10);
+        let (pm2, _, counter2) = make_peer_manager("test2", port2, vec![("test1", port1)], 10);
+        let pm1 = pm1.start();
+        let pm2 = pm2.start();
+        let peer_id = peer_id1.clone();
+        let request1 = NetworkRequests::Sync {
+            peer_id: peer_id.clone(),
+            sync_data: SyncData::account(AnnounceAccount {
+                account_id: "near".to_string(),
+                peer_id: peer_id.clone(),
+                epoch_id: Default::default(),
+                signature: Default::default(),
+            }),
+        };
+        let request2 = NetworkRequests::Sync {
+            peer_id: peer_id.clone(),
+            sync_data: SyncData::account(AnnounceAccount {
+                account_id: "near".to_string(),
+                peer_id: peer_id.clone(),
+                epoch_id: EpochId(hash(&[1])),
+                signature: Default::default(),
+            }),
+        };
+
+        let state = Arc::new(AtomicUsize::new(0));
+        let start = Instant::now();
+
+        WaitOrTimeout::new(
+            Box::new(move |_| {
+                let state_value = state.load(Ordering::SeqCst);
+
+                let state1 = state.clone();
+                if state_value == 0 {
+                    actix::spawn(pm2.clone().send(NetworkRequests::FetchInfo).then(move |res| {
+                        if let Ok(NetworkResponses::Info(info)) = res {
+                            if !info.active_peers.is_empty() {
+                                state1.store(1, Ordering::SeqCst);
+                            }
+                        }
+                        future::ok(())
+                    }));
+                } else if state_value == 1 {
+                    actix::spawn(pm1.clone().send(request1.clone()).then(move |res| {
+                        assert!(res.is_ok());
+                        state1.store(2, Ordering::SeqCst);
+                        future::ok(())
+                    }));
+                } else if state_value == 2 {
+                    if counter1.load(Ordering::SeqCst) == 1 && counter2.load(Ordering::SeqCst) == 1
+                    {
+                        state.store(3, Ordering::SeqCst);
+                    }
+                } else if state_value == 3 {
+                    actix::spawn(pm1.clone().send(request1.clone()).then(move |res| {
+                        assert!(res.is_ok());
+                        future::ok(())
+                    }));
+                    actix::spawn(pm2.clone().send(request2.clone()).then(move |res| {
+                        assert!(res.is_ok());
+                        future::ok(())
+                    }));
+                    state.store(4, Ordering::SeqCst);
+                } else if state_value == 4 {
+                    assert_eq!(counter1.load(Ordering::SeqCst), 1);
+                    assert_eq!(counter2.load(Ordering::SeqCst), 1);
+                    if Instant::now().duration_since(start).as_millis() > 800 {
+                        System::current().stop();
+                    }
+                }
+            }),
+            100,
+            10000,
+        )
+        .start();
+    })
+    .unwrap();
+}
diff --git a/chain/network/tests/peer_handshake.rs b/chain/network/tests/peer_handshake.rs
index 5b28e8e7e75..0bad002471e 100644
--- a/chain/network/tests/peer_handshake.rs
+++ b/chain/network/tests/peer_handshake.rs
@@ -14,6 +14,7 @@ use near_network::{
     NetworkClientMessages, NetworkClientResponses, NetworkConfig, NetworkRequests,
     NetworkResponses, PeerManagerActor,
 };
+use near_primitives::block::WeightAndScore;
 use near_primitives::test_utils::init_test_logger;
 use near_store::test_utils::create_test_store;
 
@@ -36,7 +37,7 @@ fn make_peer_manager(
                 Box::new(Some(NetworkClientResponses::ChainInfo {
                     genesis_id: Default::default(),
                     height: 1,
-                    total_weight: 1.into(),
+                    weight_and_score: WeightAndScore::from_ints(1, 0),
                     tracked_shards: vec![],
                 }))
             }
diff --git a/chain/network/tests/routing.rs b/chain/network/tests/routing.rs
index 765a68c6074..cf55acbe602 100644
--- a/chain/network/tests/routing.rs
+++ b/chain/network/tests/routing.rs
@@ -60,7 +60,7 @@ pub fn setup_network_node(
         ChainGenesis::new(genesis_time, 1_000_000, 100, 1_000_000_000, 0, 0, 1000, 5);
 
     let peer_manager = PeerManagerActor::create(move |ctx| {
-        let mut client_config = ClientConfig::test(false, 100, num_validators);
+        let mut client_config = ClientConfig::test(false, 100, 200, num_validators);
         client_config.ttl_account_id_router = ttl_account_id_router;
         let client_actor = ClientActor::new(
             client_config,
diff --git a/core/primitives/benches/serialization.rs b/core/primitives/benches/serialization.rs
index 6c5e1e59038..6200c1a217c 100644
--- a/core/primitives/benches/serialization.rs
+++ b/core/primitives/benches/serialization.rs
@@ -31,17 +31,12 @@ fn create_transaction() -> SignedTransaction {
 }
 
 fn create_block() -> Block {
-    let genesis_chunks = genesis_chunks(
-        vec![StateRoot { hash: CryptoHash::default(), num_parts: 1 /* TODO MOO */ }],
-        1,
-        1_000,
-    );
+    let genesis_chunks = genesis_chunks(vec![StateRoot::default()], 1, 1_000);
     let genesis = Block::genesis(
         genesis_chunks.into_iter().map(|chunk| chunk.header).collect(),
         Utc::now(),
         1_000,
         1_000,
-        1_000,
     );
     let signer = InMemorySigner::from_random("".to_string(), KeyType::ED25519);
     Block::produce(
diff --git a/core/primitives/src/block.rs b/core/primitives/src/block.rs
index acc7556533a..d903ccccd0f 100644
--- a/core/primitives/src/block.rs
+++ b/core/primitives/src/block.rs
@@ -11,6 +11,7 @@ use crate::types::{
     AccountId, Balance, BlockIndex, EpochId, Gas, MerkleHash, ShardId, StateRoot, ValidatorStake,
 };
 use crate::utils::{from_timestamp, to_timestamp};
+use std::cmp::Ordering;
 
 #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Eq, PartialEq)]
 pub struct BlockHeaderInner {
@@ -43,10 +44,6 @@ pub struct BlockHeaderInner {
     pub validator_proposals: Vec<ValidatorStake>,
     /// Mask for new chunks included in the block
     pub chunk_mask: Vec<bool>,
-    /// Sum of gas used across all chunks.
-    pub gas_used: Gas,
-    /// Gas limit. Same for all chunks.
-    pub gas_limit: Gas,
     /// Gas price. Same for all chunks
     pub gas_price: Balance,
     /// Sum of all storage rent paid across all chunks.
@@ -83,8 +80,6 @@ impl BlockHeaderInner {
         score: Weight,
         validator_proposals: Vec<ValidatorStake>,
         chunk_mask: Vec<bool>,
-        gas_used: Gas,
-        gas_limit: Gas,
         gas_price: Balance,
         rent_paid: Balance,
         validator_reward: Balance,
@@ -109,8 +104,6 @@ impl BlockHeaderInner {
             score,
             validator_proposals,
             chunk_mask,
-            gas_used,
-            gas_limit,
             gas_price,
             rent_paid,
             validator_reward,
@@ -121,6 +114,10 @@ impl BlockHeaderInner {
             approvals,
         }
     }
+
+    pub fn weight_and_score(&self) -> WeightAndScore {
+        WeightAndScore { weight: self.total_weight, score: self.score }
+    }
 }
 
 /// Block approval by other block producers.
@@ -206,8 +203,6 @@ impl BlockHeader {
         validator_proposals: Vec<ValidatorStake>,
         chunk_mask: Vec<bool>,
         epoch_id: EpochId,
-        gas_used: Gas,
-        gas_limit: Gas,
         gas_price: Balance,
         rent_paid: Balance,
         validator_reward: Balance,
@@ -233,8 +228,6 @@ impl BlockHeader {
             score,
             validator_proposals,
             chunk_mask,
-            gas_used,
-            gas_limit,
             gas_price,
             rent_paid,
             validator_reward,
@@ -255,7 +248,6 @@ impl BlockHeader {
         chunk_tx_root: MerkleHash,
         chunks_included: u64,
         timestamp: DateTime<Utc>,
-        initial_gas_limit: Gas,
         initial_gas_price: Balance,
         initial_total_supply: Balance,
     ) -> Self {
@@ -274,8 +266,6 @@ impl BlockHeader {
             0.into(),
             vec![],
             vec![],
-            0,
-            initial_gas_limit,
             initial_gas_price,
             0,
             0,
@@ -353,7 +343,6 @@ impl Block {
     pub fn genesis(
         chunks: Vec<ShardChunkHeader>,
         timestamp: DateTime<Utc>,
-        initial_gas_limit: Gas,
         initial_gas_price: Balance,
         initial_total_supply: Balance,
     ) -> Self {
@@ -365,7 +354,6 @@ impl Block {
                 Block::compute_chunk_tx_root(&chunks),
                 Block::compute_chunks_included(&chunks, 0),
                 timestamp,
-                initial_gas_limit,
                 initial_gas_price,
                 initial_total_supply,
             ),
@@ -393,12 +381,12 @@ impl Block {
         // Collect aggregate of validators and gas usage/limits from chunks.
         let mut validator_proposals = vec![];
         let mut gas_used = 0;
-        let mut gas_limit = 0;
         // This computation of chunk_mask relies on the fact that chunks are ordered by shard_id.
         let mut chunk_mask = vec![];
         let mut storage_rent = 0;
         let mut validator_reward = 0;
         let mut balance_burnt = 0;
+        let mut gas_limit = 0;
         for chunk in chunks.iter() {
             if chunk.height_included == height {
                 validator_proposals.extend_from_slice(&chunk.inner.validator_proposals);
@@ -412,16 +400,13 @@ impl Block {
                 chunk_mask.push(false);
             }
         }
+        let new_gas_price = Self::compute_new_gas_price(
+            prev.inner.gas_price,
+            gas_used,
+            gas_limit,
+            gas_price_adjustment_rate,
+        );
 
-        let new_gas_price = if gas_limit > 0 {
-            (2 * u128::from(gas_limit) + 2 * u128::from(gas_price_adjustment_rate)
-                - u128::from(gas_limit) * u128::from(gas_price_adjustment_rate))
-                * prev.inner.gas_price
-                / (2 * u128::from(gas_limit) * 100)
-        } else {
-            // If there are no new chunks included in this block, use previous price.
-            prev.inner.gas_price
-        };
         let new_total_supply = prev.inner.total_supply + inflation.unwrap_or(0) - balance_burnt;
 
         let num_approvals: u128 = approvals.len() as u128;
@@ -445,9 +430,6 @@ impl Block {
                 validator_proposals,
                 chunk_mask,
                 epoch_id,
-                gas_used,
-                gas_limit,
-                // TODO: calculate this correctly
                 new_gas_price,
                 storage_rent,
                 validator_reward,
@@ -463,12 +445,38 @@ impl Block {
         }
     }
 
+    pub fn verify_gas_price(&self, prev_gas_price: Balance, gas_price_adjustment_rate: u8) -> bool {
+        let gas_used = Self::compute_gas_used(&self.chunks, self.header.inner.height);
+        let gas_limit = Self::compute_gas_limit(&self.chunks, self.header.inner.height);
+        let expected_price = Self::compute_new_gas_price(
+            prev_gas_price,
+            gas_used,
+            gas_limit,
+            gas_price_adjustment_rate,
+        );
+        expected_price == self.header.inner.gas_price
+    }
+
+    pub fn compute_new_gas_price(
+        prev_gas_price: Balance,
+        gas_used: Gas,
+        gas_limit: Gas,
+        gas_price_adjustment_rate: u8,
+    ) -> Balance {
+        if gas_limit == 0 {
+            prev_gas_price
+        } else {
+            let numerator = 2 * 100 * u128::from(gas_limit)
+                - u128::from(gas_price_adjustment_rate) * u128::from(gas_limit)
+                + 2 * u128::from(gas_price_adjustment_rate) * u128::from(gas_used);
+            let denominator = 2 * 100 * u128::from(gas_limit);
+            prev_gas_price * numerator / denominator
+        }
+    }
+
     pub fn compute_state_root(chunks: &Vec<ShardChunkHeader>) -> CryptoHash {
         merklize(
-            &chunks
-                .iter()
-                .map(|chunk| chunk.inner.prev_state_root.hash)
-                .collect::<Vec<CryptoHash>>(),
+            &chunks.iter().map(|chunk| chunk.inner.prev_state_root).collect::<Vec<CryptoHash>>(),
         )
         .0
     }
@@ -507,6 +515,26 @@ impl Block {
             .0
     }
 
+    pub fn compute_gas_used(chunks: &[ShardChunkHeader], block_height: BlockIndex) -> Gas {
+        chunks.iter().fold(0, |acc, chunk| {
+            if chunk.height_included == block_height {
+                acc + chunk.inner.gas_used
+            } else {
+                acc
+            }
+        })
+    }
+
+    pub fn compute_gas_limit(chunks: &[ShardChunkHeader], block_height: BlockIndex) -> Gas {
+        chunks.iter().fold(0, |acc, chunk| {
+            if chunk.height_included == block_height {
+                acc + chunk.inner.gas_limit
+            } else {
+                acc
+            }
+        })
+    }
+
     pub fn validate_chunk_header_proof(
         chunk: &ShardChunkHeader,
         chunk_root: &CryptoHash,
@@ -567,6 +595,12 @@ pub struct Weight {
     num: u128,
 }
 
+#[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Copy, PartialEq, Eq, Default)]
+pub struct WeightAndScore {
+    pub weight: Weight,
+    pub score: Weight,
+}
+
 impl Weight {
     pub fn to_num(self) -> u128 {
         self.num
@@ -589,6 +623,40 @@ impl std::fmt::Display for Weight {
     }
 }
 
+impl WeightAndScore {
+    pub fn from_ints(weight: u128, score: u128) -> Self {
+        Self { weight: weight.into(), score: score.into() }
+    }
+
+    /// Returns whether one chain is `threshold` weight ahead of the other, where "ahead" is losely
+    /// defined as either having the score exceeding by the `threshold` (finality gadget is working
+    /// fine, and the last reported final block is way ahead of the last known to us), or having the
+    /// same score, but the weight exceeding by the `threshold` (finality gadget is down, and the
+    /// canonical chain is has significantly higher weight)
+    pub fn beyond_threshold(&self, other: &WeightAndScore, threshold: u128) -> bool {
+        if self.score == other.score {
+            self.weight.to_num() > other.weight.to_num() + threshold
+        } else {
+            self.score.to_num() > other.score.to_num() + threshold
+        }
+    }
+}
+
+impl PartialOrd for WeightAndScore {
+    fn partial_cmp(&self, other: &WeightAndScore) -> Option<Ordering> {
+        Some(self.cmp(other))
+    }
+}
+
+impl Ord for WeightAndScore {
+    fn cmp(&self, other: &WeightAndScore) -> Ordering {
+        match self.score.cmp(&other.score) {
+            v @ Ordering::Less | v @ Ordering::Greater => v,
+            Ordering::Equal => self.weight.cmp(&other.weight),
+        }
+    }
+}
+
 #[derive(BorshSerialize, BorshDeserialize, Clone, Debug, Eq, PartialEq, Default)]
 pub struct GenesisId {
     /// Chain Id
diff --git a/core/primitives/src/challenge.rs b/core/primitives/src/challenge.rs
index 13a67d53561..4ac3b45f1bb 100644
--- a/core/primitives/src/challenge.rs
+++ b/core/primitives/src/challenge.rs
@@ -10,7 +10,8 @@ use crate::types::AccountId;
 /// Serialized TrieNodeWithSize
 pub type StateItem = Vec<u8>;
 
-pub type PartialState = Vec<StateItem>;
+#[derive(BorshSerialize, BorshDeserialize, Debug, Clone, Eq, PartialEq)]
+pub struct PartialState(pub Vec<StateItem>);
 
 /// Double signed block.
 #[derive(BorshSerialize, BorshDeserialize, PartialEq, Eq, Clone, Debug)]
diff --git a/core/primitives/src/errors.rs b/core/primitives/src/errors.rs
index 3400494852c..dc8aeb1f88c 100644
--- a/core/primitives/src/errors.rs
+++ b/core/primitives/src/errors.rs
@@ -173,10 +173,12 @@ pub struct BalanceMismatchError {
     pub incoming_validator_rewards: Balance,
     pub initial_accounts_balance: Balance,
     pub incoming_receipts_balance: Balance,
+    pub processed_delayed_receipts_balance: Balance,
     pub initial_postponed_receipts_balance: Balance,
     // Output balances
     pub final_accounts_balance: Balance,
     pub outgoing_receipts_balance: Balance,
+    pub new_delayed_receipts_balance: Balance,
     pub final_postponed_receipts_balance: Balance,
     pub total_rent_paid: Balance,
     pub total_validator_reward: Balance,
@@ -191,10 +193,12 @@ impl Display for BalanceMismatchError {
             .incoming_validator_rewards
             .saturating_add(self.initial_accounts_balance)
             .saturating_add(self.incoming_receipts_balance)
+            .saturating_add(self.processed_delayed_receipts_balance)
             .saturating_add(self.initial_postponed_receipts_balance);
         let final_balance = self
             .final_accounts_balance
             .saturating_add(self.outgoing_receipts_balance)
+            .saturating_add(self.new_delayed_receipts_balance)
             .saturating_add(self.final_postponed_receipts_balance)
             .saturating_add(self.total_rent_paid)
             .saturating_add(self.total_validator_reward)
@@ -207,10 +211,12 @@ impl Display for BalanceMismatchError {
              \tIncoming validator rewards sum: {}\n\
              \tInitial accounts balance sum: {}\n\
              \tIncoming receipts balance sum: {}\n\
+             \tProcessed delayed receipts balance sum: {}\n\
              \tInitial postponed receipts balance sum: {}\n\
              Outputs:\n\
              \tFinal accounts balance sum: {}\n\
              \tOutgoing receipts balance sum: {}\n\
+             \tNew delayed receipts balance sum: {}\n\
              \tFinal postponed receipts balance sum: {}\n\
              \tTotal rent paid: {}\n\
              \tTotal validators reward: {}\n\
@@ -221,9 +227,11 @@ impl Display for BalanceMismatchError {
             self.incoming_validator_rewards,
             self.initial_accounts_balance,
             self.incoming_receipts_balance,
+            self.processed_delayed_receipts_balance,
             self.initial_postponed_receipts_balance,
             self.final_accounts_balance,
             self.outgoing_receipts_balance,
+            self.new_delayed_receipts_balance,
             self.final_postponed_receipts_balance,
             self.total_rent_paid,
             self.total_validator_reward,
diff --git a/core/primitives/src/types.rs b/core/primitives/src/types.rs
index da5bce207ed..03abe317e00 100644
--- a/core/primitives/src/types.rs
+++ b/core/primitives/src/types.rs
@@ -30,11 +30,15 @@ pub type Gas = u64;
 pub type ReceiptIndex = usize;
 pub type PromiseId = Vec<ReceiptIndex>;
 
-/// Hash used by to store state root and the number of parts the state is divided.
-#[derive(Hash, Eq, PartialEq, Clone, Debug, BorshSerialize, BorshDeserialize, Default)]
-pub struct StateRoot {
-    pub hash: CryptoHash,
-    pub num_parts: u64,
+/// Hash used by to store state root.
+pub type StateRoot = CryptoHash;
+
+#[derive(PartialEq, Eq, Clone, Debug, BorshSerialize, BorshDeserialize)]
+pub struct StateRootNode {
+    /// in Nightshade, data is the serialized TrieNodeWithSize
+    pub data: Vec<u8>,
+    /// in Nightshade, memory_usage is a field of TrieNodeWithSize
+    pub memory_usage: u64,
 }
 
 /// Epoch identifier -- wrapped hash, to make it easier to distinguish.
@@ -49,9 +53,6 @@ impl AsRef<[u8]> for EpochId {
     }
 }
 
-#[derive(Hash, Eq, PartialEq, Clone, Debug, BorshSerialize, BorshDeserialize, Default)]
-pub struct Range(pub u64, pub u64);
-
 /// Stores validator and its stake.
 #[derive(BorshSerialize, BorshDeserialize, Debug, Clone, PartialEq, Eq)]
 pub struct ValidatorStake {
diff --git a/core/primitives/src/utils.rs b/core/primitives/src/utils.rs
index a29abec04aa..074192ec172 100644
--- a/core/primitives/src/utils.rs
+++ b/core/primitives/src/utils.rs
@@ -30,6 +30,8 @@ pub mod col {
     pub const POSTPONED_RECEIPT_ID: &[u8] = &[4];
     pub const PENDING_DATA_COUNT: &[u8] = &[5];
     pub const POSTPONED_RECEIPT: &[u8] = &[6];
+    pub const DELAYED_RECEIPT_INDICES: &[u8] = &[7];
+    pub const DELAYED_RECEIPT: &[u8] = &[8];
 }
 
 fn key_for_column_account_id(column: &[u8], account_key: &AccountId) -> Vec<u8> {
@@ -100,6 +102,12 @@ pub fn key_for_postponed_receipt(account_id: &AccountId, receipt_id: &CryptoHash
     key
 }
 
+pub fn key_for_delayed_receipt(index: u64) -> Vec<u8> {
+    let mut key = col::DELAYED_RECEIPT.to_vec();
+    key.extend_from_slice(&index.to_le_bytes());
+    key
+}
+
 pub fn create_nonce_with_nonce(base: &CryptoHash, salt: u64) -> CryptoHash {
     let mut nonce: Vec<u8> = base.as_ref().to_owned();
     nonce.append(&mut index_to_bytes(salt));
diff --git a/core/primitives/src/views.rs b/core/primitives/src/views.rs
index e7278308b5b..0a5a632848a 100644
--- a/core/primitives/src/views.rs
+++ b/core/primitives/src/views.rs
@@ -251,8 +251,6 @@ pub struct BlockHeaderView {
     pub score: u128,
     pub validator_proposals: Vec<ValidatorStakeView>,
     pub chunk_mask: Vec<bool>,
-    pub gas_used: Gas,
-    pub gas_limit: Gas,
     #[serde(with = "u128_dec_format")]
     pub gas_price: Balance,
     #[serde(with = "u128_dec_format")]
@@ -291,8 +289,6 @@ impl From<BlockHeader> for BlockHeaderView {
                 .map(|v| v.into())
                 .collect(),
             chunk_mask: header.inner.chunk_mask,
-            gas_used: header.inner.gas_used,
-            gas_limit: header.inner.gas_limit,
             gas_price: header.inner.gas_price,
             rent_paid: header.inner.rent_paid,
             validator_reward: header.inner.validator_reward,
@@ -333,9 +329,7 @@ impl From<BlockHeaderView> for BlockHeader {
                     .map(|v| v.into())
                     .collect(),
                 chunk_mask: view.chunk_mask,
-                gas_limit: view.gas_limit,
                 gas_price: view.gas_price,
-                gas_used: view.gas_used,
                 total_supply: view.total_supply,
                 challenges_result: view.challenges_result,
                 rent_paid: view.rent_paid,
@@ -366,8 +360,7 @@ pub struct ChunkHeaderView {
     pub chunk_hash: CryptoHash,
     pub prev_block_hash: CryptoHash,
     pub outcome_root: CryptoHash,
-    pub prev_state_root_hash: CryptoHash,
-    pub prev_state_num_parts: u64,
+    pub prev_state_root: StateRoot,
     pub encoded_merkle_root: CryptoHash,
     pub encoded_length: u64,
     pub height_created: BlockIndex,
@@ -393,8 +386,7 @@ impl From<ShardChunkHeader> for ChunkHeaderView {
             chunk_hash: chunk.hash.0,
             prev_block_hash: chunk.inner.prev_block_hash,
             outcome_root: chunk.inner.outcome_root,
-            prev_state_root_hash: chunk.inner.prev_state_root.hash,
-            prev_state_num_parts: chunk.inner.prev_state_root.num_parts,
+            prev_state_root: chunk.inner.prev_state_root,
             encoded_merkle_root: chunk.inner.encoded_merkle_root,
             encoded_length: chunk.inner.encoded_length,
             height_created: chunk.inner.height_created,
@@ -423,10 +415,7 @@ impl From<ChunkHeaderView> for ShardChunkHeader {
         let mut header = ShardChunkHeader {
             inner: ShardChunkHeaderInner {
                 prev_block_hash: view.prev_block_hash,
-                prev_state_root: StateRoot {
-                    hash: view.prev_state_root_hash,
-                    num_parts: view.prev_state_num_parts,
-                },
+                prev_state_root: view.prev_state_root,
                 outcome_root: view.outcome_root,
                 encoded_merkle_root: view.encoded_merkle_root,
                 encoded_length: view.encoded_length,
@@ -976,9 +965,18 @@ impl TryFrom<ReceiptView> for Receipt {
 #[derive(BorshSerialize, BorshDeserialize, Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
 pub struct EpochValidatorInfo {
     /// Validators for the current epoch
-    pub current_validators: Vec<ValidatorStakeView>,
+    pub current_validators: Vec<CurrentEpochValidatorInfo>,
     /// Validators for the next epoch
     pub next_validators: Vec<ValidatorStakeView>,
     /// Proposals in the current epoch
     pub current_proposals: Vec<ValidatorStakeView>,
 }
+
+#[derive(BorshSerialize, BorshDeserialize, Serialize, Deserialize, Debug, PartialEq, Eq, Clone)]
+pub struct CurrentEpochValidatorInfo {
+    pub account_id: AccountId,
+    pub is_slashed: bool,
+    #[serde(with = "u128_dec_format")]
+    pub stake: Balance,
+    pub num_missing_blocks: BlockIndex,
+}
diff --git a/core/store/src/lib.rs b/core/store/src/lib.rs
index efa6d36b535..dc8cc812a87 100644
--- a/core/store/src/lib.rs
+++ b/core/store/src/lib.rs
@@ -58,7 +58,8 @@ pub const COL_BLOCK_EXTRA: Option<u32> = Some(19);
 pub const COL_BLOCK_PER_HEIGHT: Option<u32> = Some(20);
 pub const COL_LAST_APPROVALS_PER_ACCOUNT: Option<u32> = Some(21);
 pub const COL_MY_LAST_APPROVALS_PER_CHAIN: Option<u32> = Some(22);
-const NUM_COLS: u32 = 23;
+pub const COL_STATE_PARTS: Option<u32> = Some(23);
+const NUM_COLS: u32 = 24;
 
 pub struct Store {
     storage: Arc<dyn KeyValueDB>,
diff --git a/core/store/src/trie/mod.rs b/core/store/src/trie/mod.rs
index afb1b0c1bf1..eb049c56ca8 100644
--- a/core/store/src/trie/mod.rs
+++ b/core/store/src/trie/mod.rs
@@ -12,12 +12,14 @@ use kvdb::{DBOp, DBTransaction};
 
 use near_primitives::challenge::PartialState;
 use near_primitives::hash::{hash, CryptoHash};
+use near_primitives::types::{StateRoot, StateRootNode};
 
 use crate::trie::insert_delete::NodesStorage;
 use crate::trie::iterator::TrieIterator;
 use crate::trie::nibble_slice::NibbleSlice;
 use crate::trie::trie_storage::{
-    TrieCachingStorage, TrieMemoryPartialStorage, TrieRecordingStorage, TrieStorage,
+    TouchedNodesCounter, TrieCachingStorage, TrieMemoryPartialStorage, TrieRecordingStorage,
+    TrieStorage,
 };
 use crate::{StorageError, Store, StoreUpdate, COL_STATE};
 
@@ -66,9 +68,9 @@ enum TrieNode {
 }
 
 #[derive(Clone, Debug)]
-pub(crate) struct TrieNodeWithSize {
+pub struct TrieNodeWithSize {
     node: TrieNode,
-    memory_usage: u64,
+    pub memory_usage: u64,
 }
 
 impl TrieNodeWithSize {
@@ -371,6 +373,7 @@ impl RcTrieNode {
 
 pub struct Trie {
     storage: Box<dyn TrieStorage>,
+    pub counter: TouchedNodesCounter,
 }
 
 ///
@@ -398,14 +401,14 @@ pub struct Trie {
 /// StoreUpdate are the changes from current state refcount to refcount + delta.
 pub struct TrieChanges {
     #[allow(dead_code)]
-    old_root: CryptoHash,
-    pub new_root: CryptoHash,
+    old_root: StateRoot,
+    pub new_root: StateRoot,
     insertions: Vec<(CryptoHash, Vec<u8>, u32)>, // key, value, rc
     deletions: Vec<(CryptoHash, Vec<u8>, u32)>,  // key, value, rc
 }
 
 impl TrieChanges {
-    pub fn empty(old_root: CryptoHash) -> Self {
+    pub fn empty(old_root: StateRoot) -> Self {
         TrieChanges { old_root, new_root: old_root, insertions: vec![], deletions: vec![] }
     }
     pub fn insertions_into(
@@ -454,7 +457,7 @@ impl TrieChanges {
     pub fn into(
         self,
         trie: Arc<Trie>,
-    ) -> Result<(StoreUpdate, CryptoHash), Box<dyn std::error::Error>> {
+    ) -> Result<(StoreUpdate, StateRoot), Box<dyn std::error::Error>> {
         let mut store_update = StoreUpdate::new_with_trie(
             trie.storage
                 .as_caching_storage()
@@ -497,7 +500,10 @@ impl WrappedTrieChanges {
 
 impl Trie {
     pub fn new(store: Arc<Store>) -> Self {
-        Trie { storage: Box::new(TrieCachingStorage::new(store)) }
+        Trie {
+            storage: Box::new(TrieCachingStorage::new(store)),
+            counter: TouchedNodesCounter::default(),
+        }
     }
 
     pub fn recording_reads(&self) -> Self {
@@ -510,11 +516,11 @@ impl Trie {
             },
             recorded: Arc::new(Mutex::new(Default::default())),
         };
-        Trie { storage: Box::new(storage) }
+        Trie { storage: Box::new(storage), counter: TouchedNodesCounter::default() }
     }
 
-    pub fn empty_root() -> CryptoHash {
-        CryptoHash::default()
+    pub fn empty_root() -> StateRoot {
+        StateRoot::default()
     }
 
     pub fn recorded_storage(&self) -> Option<PartialStorage> {
@@ -522,17 +528,18 @@ impl Trie {
         let mut guard = storage.recorded.lock().expect(POISONED_LOCK_ERR);
         let mut nodes: Vec<_> = guard.drain().map(|(_key, value)| value).collect();
         nodes.sort();
-        Some(PartialStorage { nodes })
+        Some(PartialStorage { nodes: PartialState(nodes) })
     }
 
     pub fn from_recorded_storage(partial_storage: PartialStorage) -> Self {
         let recorded_storage =
-            partial_storage.nodes.into_iter().map(|value| (hash(&value), value)).collect();
+            partial_storage.nodes.0.into_iter().map(|value| (hash(&value), value)).collect();
         Trie {
             storage: Box::new(TrieMemoryPartialStorage {
                 recorded_storage,
                 visited_nodes: Default::default(),
             }),
+            counter: TouchedNodesCounter::default(),
         }
     }
 
@@ -587,6 +594,7 @@ impl Trie {
         if *hash == Trie::empty_root() {
             Ok(memory.store(TrieNodeWithSize::empty()))
         } else {
+            self.counter.increment();
             let bytes = self.storage.retrieve_raw_bytes(hash)?;
             match RawTrieNodeWithSize::decode(&bytes) {
                 Ok(value) => {
@@ -610,6 +618,7 @@ impl Trie {
         if *hash == Trie::empty_root() {
             return Ok(TrieNodeWithSize::empty());
         }
+        self.counter.increment();
         let bytes = self.storage.retrieve_raw_bytes(hash)?;
         match RawTrieNodeWithSize::decode(&bytes) {
             Ok(value) => Ok(TrieNodeWithSize::from_raw(value)),
@@ -620,6 +629,27 @@ impl Trie {
         }
     }
 
+    pub fn retrieve_root_node(&self, root: &StateRoot) -> Result<StateRootNode, StorageError> {
+        if *root == Trie::empty_root() {
+            return Err(StorageError::StorageInconsistentState(format!(
+                "Failed to retrieve root node {}",
+                root
+            )));
+        }
+        self.counter.increment();
+        let data = self.storage.retrieve_raw_bytes(root)?;
+        match RawTrieNodeWithSize::decode(&data) {
+            Ok(value) => {
+                let memory_usage = TrieNodeWithSize::from_raw(value).memory_usage;
+                Ok(StateRootNode { data, memory_usage })
+            }
+            Err(_) => Err(StorageError::StorageInconsistentState(format!(
+                "Failed to decode node {}",
+                root
+            ))),
+        }
+    }
+
     fn lookup(
         &self,
         root: &CryptoHash,
@@ -631,6 +661,7 @@ impl Trie {
             if hash == Trie::empty_root() {
                 return Ok(None);
             }
+            self.counter.increment();
             let bytes = self.storage.retrieve_raw_bytes(&hash)?;
             let node = RawTrieNodeWithSize::decode(&bytes).map_err(|_| {
                 StorageError::StorageInconsistentState("RawTrieNode decode failed".to_string())
@@ -1040,8 +1071,8 @@ mod tests {
             if root1 != root2 {
                 eprintln!("{:?}", trie_changes);
                 eprintln!("{:?}", simplified_changes);
-                eprintln!("root1: {}", root1);
-                eprintln!("root2: {}", root2);
+                eprintln!("root1: {:?}", root1);
+                eprintln!("root2: {:?}", root2);
                 panic!("MISMATCH!");
             }
             // TODO: compare state updates?
@@ -1110,7 +1141,7 @@ mod tests {
             let trie2 = Trie::new(Arc::clone(&store)).recording_reads();
             trie2.get(&root, b"doge").unwrap();
             // record extension, branch and one leaf, but not the other
-            assert_eq!(trie2.recorded_storage().unwrap().nodes.len(), 3);
+            assert_eq!(trie2.recorded_storage().unwrap().nodes.0.len(), 3);
         }
 
         {
@@ -1118,7 +1149,7 @@ mod tests {
             let updates = vec![(b"doge".to_vec(), None)];
             trie2.update(&root, updates.into_iter()).unwrap();
             // record extension, branch and both leaves
-            assert_eq!(trie2.recorded_storage().unwrap().nodes.len(), 4);
+            assert_eq!(trie2.recorded_storage().unwrap().nodes.0.len(), 4);
         }
 
         {
@@ -1126,7 +1157,7 @@ mod tests {
             let updates = vec![(b"dodo".to_vec(), Some(b"asdf".to_vec()))];
             trie2.update(&root, updates.into_iter()).unwrap();
             // record extension and branch, but not leaves
-            assert_eq!(trie2.recorded_storage().unwrap().nodes.len(), 2);
+            assert_eq!(trie2.recorded_storage().unwrap().nodes.0.len(), 2);
         }
     }
 
diff --git a/core/store/src/trie/state_parts.rs b/core/store/src/trie/state_parts.rs
index 2e579b6f411..b29e421a8f9 100644
--- a/core/store/src/trie/state_parts.rs
+++ b/core/store/src/trie/state_parts.rs
@@ -1,12 +1,13 @@
 use std::cmp::min;
 use std::collections::HashMap;
 
+use near_primitives::challenge::PartialState;
 use near_primitives::hash::CryptoHash;
 use near_primitives::types::StateRoot;
 
 use crate::trie::iterator::CrumbStatus;
 use crate::trie::nibble_slice::NibbleSlice;
-use crate::trie::{NodeHandle, TrieNode, TrieNodeWithSize, POISONED_LOCK_ERR};
+use crate::trie::{NodeHandle, RawTrieNodeWithSize, TrieNode, TrieNodeWithSize, POISONED_LOCK_ERR};
 use crate::{PartialStorage, StorageError, Trie, TrieChanges, TrieIterator};
 
 impl Trie {
@@ -21,18 +22,18 @@ impl Trie {
     pub fn get_trie_nodes_for_part(
         &self,
         part_id: u64,
+        num_parts: u64,
         state_root: &StateRoot,
-    ) -> Result<Vec<Vec<u8>>, StorageError> {
-        assert!(part_id < state_root.num_parts);
+    ) -> Result<PartialState, StorageError> {
+        assert!(part_id < num_parts);
         assert!(self.storage.as_caching_storage().is_some());
-        let root_node = self.retrieve_node(&state_root.hash)?;
-        let num_parts = state_root.num_parts;
+        let root_node = self.retrieve_node(&state_root)?;
         let total_size = root_node.memory_usage;
         let size_start = (total_size + num_parts - 1) / num_parts * part_id;
         let size_end = min((total_size + num_parts - 1) / num_parts * (part_id + 1), total_size);
 
         let with_recording = self.recording_reads();
-        with_recording.visit_nodes_for_size_range(&state_root.hash, size_start, size_end)?;
+        with_recording.visit_nodes_for_size_range(&state_root, size_start, size_end)?;
         let recorded = with_recording.recorded_storage().unwrap();
 
         let trie_nodes = recorded.nodes;
@@ -141,28 +142,28 @@ impl Trie {
     /// Validate state part
     ///
     /// # Panics
-    /// part_id must be in [0..state_root.num_parts)
+    /// part_id must be in [0..num_parts)
     ///
     /// # Errors
     /// StorageError::TrieNodeWithMissing if some nodes are missing
     pub fn validate_trie_nodes_for_part(
         state_root: &StateRoot,
         part_id: u64,
-        trie_nodes: &Vec<Vec<u8>>,
+        num_parts: u64,
+        trie_nodes: &PartialState,
     ) -> Result<(), StorageError> {
-        assert!(part_id < state_root.num_parts);
-        let trie = Trie::from_recorded_storage(PartialStorage { nodes: trie_nodes.to_vec() });
+        assert!(part_id < num_parts);
+        let trie = Trie::from_recorded_storage(PartialStorage { nodes: trie_nodes.clone() });
 
-        let root_node = trie.retrieve_node(&state_root.hash)?;
-        let num_parts = state_root.num_parts;
+        let root_node = trie.retrieve_node(&state_root)?;
         let total_size = root_node.memory_usage;
         let size_start = (total_size + num_parts - 1) / num_parts * part_id;
         let size_end = min((total_size + num_parts - 1) / num_parts * (part_id + 1), total_size);
 
-        trie.visit_nodes_for_size_range(&state_root.hash, size_start, size_end)?;
+        trie.visit_nodes_for_size_range(&state_root, size_start, size_end)?;
         let storage = trie.storage.as_partial_storage().unwrap();
 
-        if storage.visited_nodes.lock().expect(POISONED_LOCK_ERR).len() != trie_nodes.len() {
+        if storage.visited_nodes.lock().expect(POISONED_LOCK_ERR).len() != trie_nodes.0.len() {
             // TODO #1603 not actually TrieNodeMissing.
             // The error is that the proof has more nodes than needed.
             return Err(StorageError::TrieNodeMissing);
@@ -257,9 +258,9 @@ impl Trie {
             .flatten()
             .map(|data| data.to_vec())
             .collect::<Vec<_>>();
-        let trie = Trie::from_recorded_storage(PartialStorage { nodes });
+        let trie = Trie::from_recorded_storage(PartialStorage { nodes: PartialState(nodes) });
         let mut insertions = <HashMap<CryptoHash, (Vec<u8>, u32)>>::new();
-        trie.traverse_all_nodes(&state_root.hash, |hash| {
+        trie.traverse_all_nodes(&state_root, |hash| {
             if let Some((_bytes, rc)) = insertions.get_mut(hash) {
                 *rc += 1;
             } else {
@@ -273,11 +274,20 @@ impl Trie {
         insertions.sort();
         Ok(TrieChanges {
             old_root: Default::default(),
-            new_root: state_root.hash,
+            new_root: *state_root,
             insertions,
             deletions: vec![],
         })
     }
+
+    pub fn get_memory_usage_from_serialized(bytes: &Vec<u8>) -> Result<u64, StorageError> {
+        match RawTrieNodeWithSize::decode(&bytes) {
+            Ok(value) => Ok(TrieNodeWithSize::from_raw(value).memory_usage),
+            Err(_) => {
+                Err(StorageError::StorageInconsistentState(format!("Failed to decode node",)))
+            }
+        }
+    }
 }
 
 #[cfg(test)]
@@ -295,7 +305,7 @@ mod tests {
 
     #[test]
     fn test_combine_empty_trie_parts() {
-        let state_root = StateRoot { hash: CryptoHash::default(), num_parts: 0 };
+        let state_root = StateRoot::default();
         let _ = Trie::combine_state_parts(&state_root, &vec![]).unwrap();
     }
 
@@ -306,7 +316,7 @@ mod tests {
             let trie = create_trie();
             let trie_changes = gen_changes(&mut rng, 500);
 
-            let (store_update, root) = trie
+            let (store_update, state_root) = trie
                 .update(&Trie::empty_root(), trie_changes.iter().cloned())
                 .unwrap()
                 .into(trie.clone())
@@ -316,18 +326,19 @@ mod tests {
                 // Test that creating and validating are consistent
                 let num_parts = rng.gen_range(1, 10);
                 let part_id = rng.gen_range(0, num_parts);
-                let state_root = StateRoot { hash: root, num_parts };
-                let trie_nodes = trie.get_trie_nodes_for_part(part_id, &state_root).unwrap();
-                Trie::validate_trie_nodes_for_part(&state_root, part_id, &trie_nodes)
+                let trie_nodes =
+                    trie.get_trie_nodes_for_part(part_id, num_parts, &state_root).unwrap();
+                Trie::validate_trie_nodes_for_part(&state_root, part_id, num_parts, &trie_nodes)
                     .expect("validate ok");
             }
 
             {
                 // Test that combining all parts gets all nodes
                 let num_parts = rng.gen_range(2, 10);
-                let state_root = StateRoot { hash: root, num_parts };
                 let parts = (0..num_parts)
-                    .map(|part_id| trie.get_trie_nodes_for_part(part_id, &state_root).unwrap())
+                    .map(|part_id| {
+                        trie.get_trie_nodes_for_part(part_id, num_parts, &state_root).unwrap().0
+                    })
                     .collect::<Vec<_>>();
 
                 let trie_changes = Trie::combine_state_parts(&state_root, &parts).unwrap();
@@ -346,9 +357,10 @@ mod tests {
                 assert_eq!(all_nodes.len(), trie_changes.insertions.len());
                 let size_of_all = all_nodes.iter().map(|node| node.len()).sum::<usize>();
                 Trie::validate_trie_nodes_for_part(
-                    &StateRoot { hash: root, num_parts: 1 },
+                    &state_root,
                     0,
-                    &all_nodes,
+                    1,
+                    &PartialState(all_nodes.clone()),
                 )
                 .expect("validate ok");
 
diff --git a/core/store/src/trie/trie_storage.rs b/core/store/src/trie/trie_storage.rs
index 0f7c971025b..abec3caba66 100644
--- a/core/store/src/trie/trie_storage.rs
+++ b/core/store/src/trie/trie_storage.rs
@@ -1,4 +1,5 @@
 use std::collections::{HashMap, HashSet};
+use std::sync::atomic::{AtomicU64, Ordering};
 use std::sync::{Arc, Mutex};
 
 use cached::{Cached, SizedCache};
@@ -143,3 +144,24 @@ impl TrieStorage for TrieCachingStorage {
         Some(self)
     }
 }
+
+/// Runtime counts the number of touched trie nodes for the purpose of gas calculation.
+/// Trie increments it on every call to TrieStorage::retrieve_raw_bytes()
+#[derive(Default)]
+pub struct TouchedNodesCounter {
+    counter: AtomicU64,
+}
+
+impl TouchedNodesCounter {
+    pub fn increment(&self) {
+        self.counter.fetch_add(1, Ordering::SeqCst);
+    }
+
+    pub fn reset(&self) {
+        self.counter.store(0, Ordering::SeqCst);
+    }
+
+    pub fn get(&self) -> u64 {
+        self.counter.load(Ordering::SeqCst)
+    }
+}
diff --git a/core/store/src/trie/update.rs b/core/store/src/trie/update.rs
index a394d7b58e6..1dc99f49029 100644
--- a/core/store/src/trie/update.rs
+++ b/core/store/src/trie/update.rs
@@ -5,7 +5,7 @@ use std::sync::Arc;
 use kvdb::DBValue;
 use log::debug;
 
-use near_primitives::types::MerkleHash;
+use near_primitives::hash::CryptoHash;
 
 use crate::trie::TrieChanges;
 
@@ -15,7 +15,7 @@ use crate::StorageError;
 /// Provides a way to access Storage and record changes with future commit.
 pub struct TrieUpdate {
     pub trie: Arc<Trie>,
-    root: MerkleHash,
+    root: CryptoHash,
     committed: BTreeMap<Vec<u8>, Option<Vec<u8>>>,
     prospective: BTreeMap<Vec<u8>, Option<Vec<u8>>>,
 }
@@ -25,7 +25,7 @@ pub struct TrieUpdate {
 pub type PrefixKeyValueChanges = HashMap<Vec<u8>, HashMap<Vec<u8>, Option<Vec<u8>>>>;
 
 impl TrieUpdate {
-    pub fn new(trie: Arc<Trie>, root: MerkleHash) -> Self {
+    pub fn new(trie: Arc<Trie>, root: CryptoHash) -> Self {
         TrieUpdate { trie, root, committed: BTreeMap::default(), prospective: BTreeMap::default() }
     }
     pub fn get(&self, key: &[u8]) -> Result<Option<DBValue>, StorageError> {
@@ -130,7 +130,7 @@ impl TrieUpdate {
         TrieUpdateIterator::new(self, prefix, start, Some(end))
     }
 
-    pub fn get_root(&self) -> MerkleHash {
+    pub fn get_root(&self) -> CryptoHash {
         self.root
     }
 }
@@ -300,7 +300,7 @@ mod tests {
     #[test]
     fn trie() {
         let trie = create_trie();
-        let root = MerkleHash::default();
+        let root = CryptoHash::default();
         let mut trie_update = TrieUpdate::new(trie.clone(), root);
         trie_update.set(b"dog".to_vec(), DBValue::from_slice(b"puppy"));
         trie_update.set(b"dog2".to_vec(), DBValue::from_slice(b"puppy"));
@@ -319,37 +319,37 @@ mod tests {
         let trie = create_trie();
 
         // Delete non-existing element.
-        let mut trie_update = TrieUpdate::new(trie.clone(), MerkleHash::default());
+        let mut trie_update = TrieUpdate::new(trie.clone(), CryptoHash::default());
         trie_update.remove(b"dog");
         let (store_update, new_root) = trie_update.finalize().unwrap().into(trie.clone()).unwrap();
         store_update.commit().ok();
-        assert_eq!(new_root, MerkleHash::default());
+        assert_eq!(new_root, CryptoHash::default());
 
         // Add and right away delete element.
-        let mut trie_update = TrieUpdate::new(trie.clone(), MerkleHash::default());
+        let mut trie_update = TrieUpdate::new(trie.clone(), CryptoHash::default());
         trie_update.set(b"dog".to_vec(), DBValue::from_slice(b"puppy"));
         trie_update.remove(b"dog");
         let (store_update, new_root) = trie_update.finalize().unwrap().into(trie.clone()).unwrap();
         store_update.commit().ok();
-        assert_eq!(new_root, MerkleHash::default());
+        assert_eq!(new_root, CryptoHash::default());
 
         // Add, apply changes and then delete element.
-        let mut trie_update = TrieUpdate::new(trie.clone(), MerkleHash::default());
+        let mut trie_update = TrieUpdate::new(trie.clone(), CryptoHash::default());
         trie_update.set(b"dog".to_vec(), DBValue::from_slice(b"puppy"));
         let (store_update, new_root) = trie_update.finalize().unwrap().into(trie.clone()).unwrap();
         store_update.commit().ok();
-        assert_ne!(new_root, MerkleHash::default());
+        assert_ne!(new_root, CryptoHash::default());
         let mut trie_update = TrieUpdate::new(trie.clone(), new_root);
         trie_update.remove(b"dog");
         let (store_update, new_root) = trie_update.finalize().unwrap().into(trie.clone()).unwrap();
         store_update.commit().ok();
-        assert_eq!(new_root, MerkleHash::default());
+        assert_eq!(new_root, CryptoHash::default());
     }
 
     #[test]
     fn trie_iter() {
         let trie = create_trie();
-        let mut trie_update = TrieUpdate::new(trie.clone(), MerkleHash::default());
+        let mut trie_update = TrieUpdate::new(trie.clone(), CryptoHash::default());
         trie_update.set(b"dog".to_vec(), DBValue::from_slice(b"puppy"));
         trie_update.set(b"aaa".to_vec(), DBValue::from_slice(b"puppy"));
         let (store_update, new_root) = trie_update.finalize().unwrap().into(trie.clone()).unwrap();
diff --git a/genesis-tools/genesis-populate/src/lib.rs b/genesis-tools/genesis-populate/src/lib.rs
index 91b4dd4588f..bf61fb2e90a 100644
--- a/genesis-tools/genesis-populate/src/lib.rs
+++ b/genesis-tools/genesis-populate/src/lib.rs
@@ -116,7 +116,7 @@ impl GenesisBuilder {
             .roots
             .iter()
             .map(|(shard_idx, root)| {
-                (*shard_idx, TrieUpdate::new(self.runtime.trie.clone(), root.hash))
+                (*shard_idx, TrieUpdate::new(self.runtime.trie.clone(), *root))
             })
             .collect();
         self.unflushed_records =
@@ -176,7 +176,7 @@ impl GenesisBuilder {
         let (store_update, root) = state_update.finalize()?.into(trie)?;
         store_update.commit()?;
 
-        self.roots.insert(shard_idx, StateRoot { hash: root, num_parts: 9 /* TODO MOO */ });
+        self.roots.insert(shard_idx, root.clone());
         self.state_updates.insert(shard_idx, TrieUpdate::new(self.runtime.trie.clone(), root));
         Ok(())
     }
@@ -190,7 +190,6 @@ impl GenesisBuilder {
         let genesis = Block::genesis(
             genesis_chunks.into_iter().map(|chunk| chunk.header).collect(),
             self.config.genesis_time,
-            self.config.gas_limit,
             self.config.gas_price,
             self.config.total_supply,
         );
@@ -203,6 +202,7 @@ impl GenesisBuilder {
                 CryptoHash::default(),
                 genesis.hash(),
                 genesis.header.inner.height,
+                0,
                 vec![],
                 vec![],
                 vec![],
diff --git a/near/Cargo.toml b/near/Cargo.toml
index 0f191a841aa..efe2ad4200d 100644
--- a/near/Cargo.toml
+++ b/near/Cargo.toml
@@ -1,6 +1,6 @@
 [package]
 name = "near"
-version = "0.4.5"
+version = "0.4.6"
 authors = ["Near Inc <hello@nearprotocol.com>"]
 edition = "2018"
 
diff --git a/near/res/testnet.json b/near/res/testnet.json
index 6df8d89daca..04e6a02b6d1 100644
--- a/near/res/testnet.json
+++ b/near/res/testnet.json
@@ -121468,172 +121468,9 @@
   "chunk_producer_kickout_threshold": 60,
   "gas_price_adjustment_rate": 1, 
   "runtime_config": {
-    "account_length_baseline_cost_per_block": "6561", 
-    "transaction_costs": {
-      "storage_usage_config": {
-        "value_cost_per_byte": 1, 
-        "account_cost": 100, 
-        "data_record_cost": 40, 
-        "key_cost_per_byte": 1, 
-        "code_cost_per_byte": 1
-      }, 
-      "action_creation_config": {
-        "delete_account_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "deploy_contract_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "function_call_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "function_call_cost_per_byte": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "deploy_contract_cost_per_byte": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "create_account_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "delete_key_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "stake_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "transfer_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "add_key_cost": {
-          "full_access_cost": {
-            "send_sir": 1, 
-            "execution": 1, 
-            "send_not_sir": 1
-          }, 
-          "function_call_cost_per_byte": {
-            "send_sir": 1, 
-            "execution": 1, 
-            "send_not_sir": 1
-          }, 
-          "function_call_cost": {
-            "send_sir": 1, 
-            "execution": 1, 
-            "send_not_sir": 1
-          }
-        }
-      }, 
-      "action_receipt_creation_config": {
-        "send_sir": 1, 
-        "execution": 1, 
-        "send_not_sir": 1
-      }, 
-      "data_receipt_creation_config": {
-        "cost_per_byte": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }, 
-        "base_cost": {
-          "send_sir": 1, 
-          "execution": 1, 
-          "send_not_sir": 1
-        }
-      }, 
-      "burnt_gas_reward": {
-        "denominator": 10, 
-        "numerator": 3
-      }
-    }, 
-    "storage_cost_byte_per_block": "1", 
-    "poke_threshold": 60, 
-    "wasm_config": {
-      "max_log_len": 500, 
-      "max_number_logs": 100, 
-      "max_memory_pages": 32, 
-      "max_register_size": 104857600, 
-      "grow_mem_cost": 1, 
-      "ext_costs": {
-        "storage_iter_next_base": 1, 
-        "read_register_byte": 1, 
-        "write_register_base": 1, 
-        "signer_account_id": 1, 
-        "write_memory_base": 1, 
-        "prepaid_gas": 1, 
-        "block_index": 1, 
-        "input_per_byte": 1, 
-        "signer_account_pk": 1, 
-        "storage_read_key_byte": 1, 
-        "storage_iter_next_value_byte": 1, 
-        "storage_remove_key_byte": 1, 
-        "storage_iter_create_range_base": 1, 
-        "storage_write_key_byte": 1, 
-        "sha256_byte": 1, 
-        "storage_has_key_byte": 1, 
-        "promise_and_per_promise": 1, 
-        "signer_account_id_byte": 1, 
-        "write_memory_byte": 1, 
-        "storage_remove_ret_value_byte": 1, 
-        "storage_has_key_base": 1, 
-        "log_per_byte": 1, 
-        "promise_result_base": 1, 
-        "storage_iter_create_key_byte": 1, 
-        "storage_write_value_byte": 1, 
-        "random_seed_base": 1, 
-        "storage_read_base": 1, 
-        "account_balance": 1, 
-        "storage_read_value_byte": 1, 
-        "signer_account_pk_byte": 1, 
-        "promise_result_byte": 1, 
-        "storage_usage": 1, 
-        "attached_deposit": 1, 
-        "promise_and_base": 1, 
-        "current_account_id": 1, 
-        "random_seed_per_byte": 1, 
-        "storage_remove_base": 1, 
-        "promise_results_count": 1, 
-        "predecessor_account_id_byte": 1, 
-        "read_register_base": 1, 
-        "current_account_id_byte": 1, 
-        "sha256": 1, 
-        "used_gas": 1, 
-        "log_base": 1, 
-        "predecessor_account_id": 1, 
-        "block_timestamp": 1, 
-        "write_register_byte": 1, 
-        "input_base": 1, 
-        "storage_iter_next_key_byte": 1, 
-        "read_memory_byte": 1, 
-        "storage_write_base": 1, 
-        "read_memory_base": 1, 
-        "promise_return": 1, 
-        "storage_iter_create_prefix_base": 1
-      }, 
-      "initial_memory_pages": 17, 
-      "max_gas_burnt": 1000000000, 
-      "max_stack_height": 65536, 
-      "regular_op_cost": 1, 
-      "max_number_registers": 100, 
-      "registers_memory_limit": 1073741824
-    }
+    "storage_cost_byte_per_block": "1",
+    "poke_threshold": 60,
+    "account_length_baseline_cost_per_block": "6561"
   }, 
   "total_supply": 1630229887743533121883554
-}
\ No newline at end of file
+}
diff --git a/near/src/runtime.rs b/near/src/runtime.rs
index 41776448d00..0fc87104786 100644
--- a/near/src/runtime.rs
+++ b/near/src/runtime.rs
@@ -1,33 +1,33 @@
 use std::collections::{HashMap, HashSet};
 use std::convert::{TryFrom, TryInto};
 use std::fs::File;
-use std::io::{Cursor, Read, Write};
+use std::io::Read;
 use std::path::{Path, PathBuf};
 use std::sync::{Arc, RwLock};
 
+use borsh::ser::BorshSerialize;
 use borsh::BorshDeserialize;
-use byteorder::{LittleEndian, ReadBytesExt, WriteBytesExt};
-use kvdb::DBValue;
 use log::debug;
 
-use near_chain::types::{ApplyTransactionResult, StatePart, ValidatorSignatureVerificationResult};
+use near_chain::types::{ApplyTransactionResult, ValidatorSignatureVerificationResult};
 use near_chain::{BlockHeader, Error, ErrorKind, RuntimeAdapter, ValidTransaction, Weight};
 use near_crypto::{PublicKey, Signature};
 use near_epoch_manager::{BlockInfo, EpochConfig, EpochManager, RewardCalculator};
 use near_primitives::account::{AccessKey, Account};
 use near_primitives::challenge::ChallengesResult;
 use near_primitives::errors::RuntimeError;
-use near_primitives::hash::CryptoHash;
+use near_primitives::hash::{hash, CryptoHash};
 use near_primitives::receipt::Receipt;
 use near_primitives::serialize::from_base64;
 use near_primitives::sharding::ShardChunkHeader;
 use near_primitives::transaction::SignedTransaction;
 use near_primitives::types::{
-    AccountId, Balance, BlockIndex, EpochId, MerkleHash, ShardId, StateRoot, ValidatorStake,
+    AccountId, Balance, BlockIndex, EpochId, Gas, MerkleHash, ShardId, StateRoot, StateRootNode,
+    ValidatorStake,
 };
 use near_primitives::utils::{prefix_for_access_key, ACCOUNT_DATA_SEPARATOR};
 use near_primitives::views::{
-    AccessKeyInfoView, CallResult, QueryError, QueryResponse, ViewStateResult,
+    AccessKeyInfoView, CallResult, EpochValidatorInfo, QueryError, QueryResponse, ViewStateResult,
 };
 use near_store::{
     get_access_key_raw, PartialStorage, Store, StoreUpdate, Trie, TrieUpdate, WrappedTrieChanges,
@@ -143,13 +143,8 @@ impl NightshadeRuntime {
         let mut file = File::open(roots_files).expect("Failed to open genesis roots file.");
         let mut data = vec![];
         file.read_to_end(&mut data).expect("Failed to read genesis roots file.");
-        let state_root_hashes: Vec<MerkleHash> =
+        let state_roots: Vec<StateRoot> =
             BorshDeserialize::try_from_slice(&data).expect("Failed to deserialize genesis roots");
-        // TODO MOO read new_state_num_parts
-        let mut state_roots = vec![];
-        for hash in state_root_hashes {
-            state_roots.push(StateRoot { hash, num_parts: 1 /* TODO MOO */ })
-        }
         (store_update, state_roots)
     }
 
@@ -211,6 +206,7 @@ impl NightshadeRuntime {
         transactions: &[SignedTransaction],
         last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        gas_limit: Gas,
         challenges_result: &ChallengesResult,
     ) -> Result<ApplyTransactionResult, Error> {
         let validator_accounts_update = {
@@ -278,6 +274,7 @@ impl NightshadeRuntime {
             epoch_length: self.genesis_config.epoch_length,
             gas_price,
             block_timestamp,
+            gas_limit: Some(gas_limit),
         };
 
         let apply_result = self
@@ -366,7 +363,7 @@ impl RuntimeAdapter for NightshadeRuntime {
         } else if has_records {
             self.genesis_state_from_records()
         } else {
-            panic!("Found neither records in the confign nor the state dump file. Either one should be present")
+            panic!("Found neither records in the config nor the state dump file. Either one should be present")
         }
     }
 
@@ -621,6 +618,15 @@ impl RuntimeAdapter for NightshadeRuntime {
         Ok(epoch_manager.get_epoch_inflation(epoch_id)?)
     }
 
+    fn push_final_block_back_if_needed(
+        &self,
+        parent_hash: CryptoHash,
+        last_final_hash: CryptoHash,
+    ) -> Result<CryptoHash, Error> {
+        let mut epoch_manager = self.epoch_manager.write().expect(POISONED_LOCK_ERR);
+        Ok(epoch_manager.push_final_block_back_if_needed(parent_hash, last_final_hash)?)
+    }
+
     fn validate_tx(
         &self,
         block_index: BlockIndex,
@@ -629,12 +635,14 @@ impl RuntimeAdapter for NightshadeRuntime {
         state_root: StateRoot,
         transaction: SignedTransaction,
     ) -> Result<ValidTransaction, RuntimeError> {
-        let mut state_update = TrieUpdate::new(self.trie.clone(), state_root.hash);
+        let mut state_update = TrieUpdate::new(self.trie.clone(), state_root);
         let apply_state = ApplyState {
             block_index,
             epoch_length: self.genesis_config.epoch_length,
             gas_price,
             block_timestamp,
+            // NOTE: verify transaction doesn't use gas limit
+            gas_limit: None,
         };
 
         if let Err(err) = self.runtime.verify_and_charge_transaction(
@@ -653,15 +661,17 @@ impl RuntimeAdapter for NightshadeRuntime {
         block_index: BlockIndex,
         block_timestamp: u64,
         gas_price: Balance,
+        gas_limit: Gas,
         state_root: StateRoot,
         transactions: Vec<SignedTransaction>,
     ) -> Vec<SignedTransaction> {
-        let mut state_update = TrieUpdate::new(self.trie.clone(), state_root.hash);
+        let mut state_update = TrieUpdate::new(self.trie.clone(), state_root);
         let apply_state = ApplyState {
             block_index,
             epoch_length: self.genesis_config.epoch_length,
             gas_price,
             block_timestamp,
+            gas_limit: Some(gas_limit),
         };
         transactions
             .into_iter()
@@ -678,6 +688,7 @@ impl RuntimeAdapter for NightshadeRuntime {
         parent_hash: CryptoHash,
         current_hash: CryptoHash,
         block_index: BlockIndex,
+        last_finalized_height: BlockIndex,
         proposals: Vec<ValidatorStake>,
         slashed_validators: Vec<AccountId>,
         chunk_mask: Vec<bool>,
@@ -696,6 +707,7 @@ impl RuntimeAdapter for NightshadeRuntime {
         }
         let block_info = BlockInfo::new(
             block_index,
+            last_finalized_height,
             parent_hash,
             proposals,
             chunk_mask,
@@ -725,6 +737,7 @@ impl RuntimeAdapter for NightshadeRuntime {
         transactions: &[SignedTransaction],
         last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        gas_limit: Gas,
         challenges: &ChallengesResult,
         generate_storage_proof: bool,
     ) -> Result<ApplyTransactionResult, Error> {
@@ -735,7 +748,7 @@ impl RuntimeAdapter for NightshadeRuntime {
         };
         match self.process_state_update(
             trie,
-            state_root.hash,
+            *state_root,
             shard_id,
             block_index,
             block_timestamp,
@@ -744,6 +757,7 @@ impl RuntimeAdapter for NightshadeRuntime {
             transactions,
             last_validator_proposals,
             gas_price,
+            gas_limit,
             challenges,
         ) {
             Ok(result) => Ok(result),
@@ -769,12 +783,13 @@ impl RuntimeAdapter for NightshadeRuntime {
         transactions: &[SignedTransaction],
         last_validator_proposals: &[ValidatorStake],
         gas_price: Balance,
+        gas_limit: Gas,
         challenges: &ChallengesResult,
     ) -> Result<ApplyTransactionResult, Error> {
         let trie = Arc::new(Trie::from_recorded_storage(partial_storage));
         self.process_state_update(
             trie.clone(),
-            state_root.hash,
+            *state_root,
             shard_id,
             block_index,
             block_timestamp,
@@ -783,6 +798,7 @@ impl RuntimeAdapter for NightshadeRuntime {
             transactions,
             last_validator_proposals,
             gas_price,
+            gas_limit,
             challenges,
         )
     }
@@ -792,7 +808,7 @@ impl RuntimeAdapter for NightshadeRuntime {
         state_root: &StateRoot,
         height: BlockIndex,
         block_timestamp: u64,
-        block_hash: &CryptoHash,
+        _block_hash: &CryptoHash,
         path_parts: Vec<&str>,
         data: &[u8],
     ) -> Result<QueryResponse, Box<dyn std::error::Error>> {
@@ -800,16 +816,14 @@ impl RuntimeAdapter for NightshadeRuntime {
             return Err("Path must contain at least single token".into());
         }
         match path_parts[0] {
-            "account" => {
-                match self.view_account(state_root.hash, &AccountId::from(path_parts[1])) {
-                    Ok(r) => Ok(QueryResponse::ViewAccount(r.into())),
-                    Err(e) => Err(e),
-                }
-            }
+            "account" => match self.view_account(*state_root, &AccountId::from(path_parts[1])) {
+                Ok(r) => Ok(QueryResponse::ViewAccount(r.into())),
+                Err(e) => Err(e),
+            },
             "call" => {
                 let mut logs = vec![];
                 match self.call_function(
-                    state_root.hash,
+                    *state_root,
                     height,
                     block_timestamp,
                     &AccountId::from(path_parts[1]),
@@ -824,7 +838,7 @@ impl RuntimeAdapter for NightshadeRuntime {
                 }
             }
             "contract" => {
-                match self.view_state(state_root.hash, &AccountId::from(path_parts[1]), data) {
+                match self.view_state(*state_root, &AccountId::from(path_parts[1]), data) {
                     Ok(result) => Ok(QueryResponse::ViewState(result)),
                     Err(err) => Ok(QueryResponse::Error(QueryError {
                         error: err.to_string(),
@@ -834,21 +848,19 @@ impl RuntimeAdapter for NightshadeRuntime {
             }
             "access_key" => {
                 let result = if path_parts.len() == 2 {
-                    self.view_access_keys(state_root.hash, &AccountId::from(path_parts[1])).map(
-                        |r| {
-                            QueryResponse::AccessKeyList(
-                                r.into_iter()
-                                    .map(|(public_key, access_key)| AccessKeyInfoView {
-                                        public_key,
-                                        access_key: access_key.into(),
-                                    })
-                                    .collect(),
-                            )
-                        },
-                    )
+                    self.view_access_keys(*state_root, &AccountId::from(path_parts[1])).map(|r| {
+                        QueryResponse::AccessKeyList(
+                            r.into_iter()
+                                .map(|(public_key, access_key)| AccessKeyInfoView {
+                                    public_key,
+                                    access_key: access_key.into(),
+                                })
+                                .collect(),
+                        )
+                    })
                 } else {
                     self.view_access_key(
-                        state_root.hash,
+                        *state_root,
                         &AccountId::from(path_parts[1]),
                         &PublicKey::try_from(path_parts[2])?,
                     )
@@ -862,81 +874,89 @@ impl RuntimeAdapter for NightshadeRuntime {
                     })),
                 }
             }
-            "validators" => {
-                let mut epoch_manager = self.epoch_manager.write().expect(POISONED_LOCK_ERR);
-                match epoch_manager.get_validator_info(block_hash) {
-                    Ok(info) => Ok(QueryResponse::Validators(info)),
-                    Err(e) => {
-                        Ok(QueryResponse::Error(QueryError { error: e.to_string(), logs: vec![] }))
-                    }
-                }
-            }
             _ => Err(format!("Unknown path {}", path_parts[0]).into()),
         }
     }
 
-    fn obtain_state_part(
+    fn get_validator_info(&self, block_hash: &CryptoHash) -> Result<EpochValidatorInfo, Error> {
+        println!("get validator info");
+        let mut epoch_manager = self.epoch_manager.write().expect(POISONED_LOCK_ERR);
+        epoch_manager.get_validator_info(block_hash).map_err(|e| e.into())
+    }
+
+    fn obtain_state_part(&self, state_root: &StateRoot, part_id: u64, num_parts: u64) -> Vec<u8> {
+        assert!(part_id < num_parts);
+        self.trie
+            .get_trie_nodes_for_part(part_id, num_parts, state_root)
+            .expect("storage should not fail")
+            .try_to_vec()
+            .expect("serializer should not fail")
+    }
+
+    fn validate_state_part(
         &self,
-        shard_id: ShardId,
-        part_id: u64,
         state_root: &StateRoot,
-    ) -> Result<(StatePart, Vec<u8>), Box<dyn std::error::Error>> {
-        if part_id > 0 {
-            /* TODO MOO */
-            return Ok((StatePart { shard_id, part_id, data: vec![] }, vec![]));
+        part_id: u64,
+        num_parts: u64,
+        data: &Vec<u8>,
+    ) -> bool {
+        assert!(part_id < num_parts);
+        match BorshDeserialize::try_from_slice(data) {
+            Ok(trie_nodes) => match Trie::validate_trie_nodes_for_part(
+                state_root,
+                part_id,
+                num_parts,
+                &trie_nodes,
+            ) {
+                Ok(_) => true,
+                // Storage error should not happen
+                Err(_) => false,
+            },
+            // Deserialization error means we've got the data from malicious peer
+            Err(_) => false,
         }
-        // TODO(1052): make sure state_root is present in the trie.
-        // create snapshot.
-        let mut result = vec![];
-        let mut cursor = Cursor::new(&mut result);
-        for item in self.trie.iter(&state_root.hash)? {
-            let (key, value) = item?;
-            cursor.write_u32::<LittleEndian>(key.len() as u32)?;
-            cursor.write_all(&key)?;
-            cursor.write_u32::<LittleEndian>(value.len() as u32)?;
-            cursor.write_all(value.as_ref())?;
+    }
+
+    fn confirm_state(&self, state_root: &StateRoot, data: &Vec<Vec<u8>>) -> Result<(), Error> {
+        let mut parts = vec![];
+        for part in data {
+            parts.push(
+                BorshDeserialize::try_from_slice(part)
+                    .expect("Part was already validated earlier, so could never fail here"),
+            );
         }
-        // TODO(1048): Save on disk an snapshot, split into chunks and compressed. Send chunks instead of single blob.
-        debug!(target: "runtime", "Read state part #{} for shard #{} @ {}, size = {}", part_id, shard_id, state_root.hash, result.len());
-        // TODO add proof in Nightshade Runtime
-        Ok((StatePart { shard_id, part_id, data: result }, vec![]))
+        let trie_changes = Trie::combine_state_parts(&state_root, &parts)
+            .expect("combine_state_parts is guaranteed to succeed when each part is valid");
+        // TODO clean old states
+        let trie = self.trie.clone();
+        let (store_update, _) = trie_changes.into(trie).expect("TrieChanges::into never fails");
+        Ok(store_update.commit()?)
+    }
+
+    fn get_state_root_node(&self, state_root: &StateRoot) -> StateRootNode {
+        self.trie.retrieve_root_node(state_root).expect("Failed to get root node")
     }
 
-    fn accept_state_part(
+    fn validate_state_root_node(
         &self,
+        state_root_node: &StateRootNode,
         state_root: &StateRoot,
-        part: &StatePart,
-        _proof: &Vec<u8>,
-    ) -> Result<(), Box<dyn std::error::Error>> {
-        if part.part_id > 0 {
-            /* TODO MOO */
-            return Ok(());
-        }
-        debug!(target: "runtime", "Writing state part #{} for shard #{} @ {}, size = {}", part.part_id, part.shard_id, state_root.hash, part.data.len());
-        // TODO prove that the part is valid
-        let mut state_update = TrieUpdate::new(self.trie.clone(), CryptoHash::default());
-        let state_part_len = part.data.len();
-        let mut cursor = Cursor::new(part.data.clone());
-        while cursor.position() < state_part_len as u64 {
-            let key_len = cursor.read_u32::<LittleEndian>()? as usize;
-            let mut key = vec![0; key_len];
-            cursor.read_exact(&mut key)?;
-            let value_len = cursor.read_u32::<LittleEndian>()? as usize;
-            let mut value = vec![0; value_len];
-            cursor.read_exact(&mut value)?;
-            state_update.set(key, DBValue::from_slice(&value));
-        }
-        let (store_update, root) = state_update.finalize()?.into(self.trie.clone())?;
-        if root != state_root.hash {
-            return Err("Invalid state root".into());
+    ) -> bool {
+        if hash(&state_root_node.data) != *state_root {
+            false
+        } else {
+            match Trie::get_memory_usage_from_serialized(&state_root_node.data) {
+                Ok(memory_usage) => {
+                    if memory_usage != state_root_node.memory_usage {
+                        // Invalid value of memory_usage
+                        false
+                    } else {
+                        true
+                    }
+                }
+                Err(_) => false, // Invalid state_root_node
+            }
         }
-        store_update.commit()?;
-        Ok(())
-    }
-
-    fn confirm_state(&self, _state_root: &StateRoot) -> Result<bool, Error> {
-        // TODO(1457): approve that all parts are here
-        Ok(true)
     }
 }
 
@@ -1025,7 +1045,7 @@ mod test {
     use near_chain::{ReceiptResult, RuntimeAdapter, Tip};
     use near_client::BlockProducer;
     use near_crypto::{InMemorySigner, KeyType, Signer};
-    use near_primitives::block::Weight;
+    use near_primitives::block::WeightAndScore;
     use near_primitives::challenge::ChallengesResult;
     use near_primitives::hash::{hash, CryptoHash};
     use near_primitives::receipt::Receipt;
@@ -1034,9 +1054,9 @@ mod test {
         Action, CreateAccountAction, SignedTransaction, StakeAction,
     };
     use near_primitives::types::{
-        AccountId, Balance, BlockIndex, EpochId, Nonce, ShardId, StateRoot, ValidatorStake,
+        AccountId, Balance, BlockIndex, EpochId, Gas, Nonce, ShardId, StateRoot, ValidatorStake,
     };
-    use near_primitives::views::{AccountView, EpochValidatorInfo, QueryResponse};
+    use near_primitives::views::{AccountView, CurrentEpochValidatorInfo, EpochValidatorInfo};
     use near_store::create_store;
     use node_runtime::adapter::ViewRuntimeAdapter;
     use node_runtime::config::RuntimeConfig;
@@ -1080,6 +1100,7 @@ mod test {
             transactions: &[SignedTransaction],
             last_proposals: &[ValidatorStake],
             gas_price: Balance,
+            gas_limit: Gas,
             challenges: &ChallengesResult,
         ) -> (StateRoot, Vec<ValidatorStake>, ReceiptResult) {
             let result = self
@@ -1094,6 +1115,7 @@ mod test {
                     transactions,
                     last_proposals,
                     gas_price,
+                    gas_limit,
                     challenges,
                 )
                 .unwrap();
@@ -1152,6 +1174,7 @@ mod test {
                     CryptoHash::default(),
                     genesis_hash,
                     0,
+                    0,
                     vec![],
                     vec![],
                     vec![],
@@ -1167,7 +1190,7 @@ mod test {
                     prev_block_hash: CryptoHash::default(),
                     height: 0,
                     epoch_id: EpochId::default(),
-                    total_weight: Weight::default(),
+                    weight_and_score: WeightAndScore::from_ints(0, 0),
                 },
                 state_roots,
                 last_receipts: HashMap::default(),
@@ -1200,6 +1223,7 @@ mod test {
                     &transactions[i as usize],
                     self.last_shard_proposals.get(&i).unwrap_or(&vec![]),
                     self.runtime.genesis_config.gas_price,
+                    u64::max_value(),
                     &challenges_result,
                 );
                 self.state_roots[i as usize] = state_root;
@@ -1217,6 +1241,7 @@ mod test {
                     self.head.last_block_hash,
                     new_hash,
                     self.head.height + 1,
+                    self.head.height.saturating_sub(1),
                     self.last_proposals.clone(),
                     challenges_result,
                     chunk_mask,
@@ -1232,7 +1257,10 @@ mod test {
                 prev_block_hash: self.head.last_block_hash,
                 height: self.head.height + 1,
                 epoch_id: self.runtime.get_epoch_id_from_prev_block(&new_hash).unwrap(),
-                total_weight: Weight::from(self.head.total_weight.to_num() + 1),
+                weight_and_score: WeightAndScore::from_ints(
+                    self.head.weight_and_score.weight.to_num() + 1,
+                    self.head.weight_and_score.score.to_num(),
+                ),
             };
         }
 
@@ -1244,7 +1272,7 @@ mod test {
         pub fn view_account(&self, account_id: &str) -> AccountView {
             let shard_id = self.runtime.account_id_to_shard_id(&account_id.to_string());
             self.runtime
-                .view_account(self.state_roots[shard_id as usize].hash, &account_id.to_string())
+                .view_account(self.state_roots[shard_id as usize], &account_id.to_string())
                 .unwrap()
                 .into()
         }
@@ -1623,7 +1651,8 @@ mod test {
         let staking_transaction = stake(1, &signer, &block_producers[0], TESTING_INIT_STAKE + 1);
         env.step_default(vec![staking_transaction]);
         env.step_default(vec![]);
-        let (state_part, proof) = env.runtime.obtain_state_part(0, 0, &env.state_roots[0]).unwrap();
+        let state_part = env.runtime.obtain_state_part(&env.state_roots[0], 0, 1);
+        let root_node = env.runtime.get_state_root_node(&env.state_roots[0]);
         let mut new_env =
             TestEnv::new("test_state_sync", vec![validators.clone()], 2, vec![], vec![]);
         for i in 1..=2 {
@@ -1644,6 +1673,7 @@ mod test {
                     prev_hash,
                     cur_hash,
                     i,
+                    i.saturating_sub(2),
                     new_env.last_proposals.clone(),
                     vec![],
                     vec![true],
@@ -1657,7 +1687,15 @@ mod test {
             new_env.head.prev_block_hash = prev_hash;
             new_env.last_proposals = proposals;
         }
-        new_env.runtime.accept_state_part(&env.state_roots[0], &state_part, &proof).unwrap();
+        assert!(new_env.runtime.validate_state_root_node(&root_node, &env.state_roots[0]));
+        let mut root_node_wrong = root_node.clone();
+        root_node_wrong.memory_usage += 1;
+        assert!(!new_env.runtime.validate_state_root_node(&root_node_wrong, &env.state_roots[0]));
+        root_node_wrong.data = vec![123];
+        assert!(!new_env.runtime.validate_state_root_node(&root_node_wrong, &env.state_roots[0]));
+        assert!(!new_env.runtime.validate_state_part(&StateRoot::default(), 0, 1, &state_part));
+        new_env.runtime.validate_state_part(&env.state_roots[0], 0, 1, &state_part);
+        new_env.runtime.confirm_state(&env.state_roots[0], &vec![state_part]).unwrap();
         new_env.state_roots[0] = env.state_roots[0].clone();
         for _ in 3..=5 {
             new_env.step_default(vec![]);
@@ -1761,57 +1799,44 @@ mod test {
             .unwrap()
             .validators
             .clone();
-        let response = env
-            .runtime
-            .query(&env.state_roots[0], 2, 0, &env.head.last_block_hash, vec!["validators"], &[])
-            .unwrap();
-        match response {
-            QueryResponse::Validators(info) => assert_eq!(
-                info,
-                EpochValidatorInfo {
-                    current_validators: current_validators
-                        .clone()
-                        .into_iter()
-                        .map(Into::into)
-                        .collect(),
-                    next_validators: current_validators
-                        .clone()
-                        .into_iter()
-                        .map(Into::into)
-                        .collect(),
-                    current_proposals: vec![ValidatorStake {
-                        account_id: "test1".to_string(),
-                        public_key: block_producers[0].signer.public_key(),
-                        amount: 0
-                    }
-                    .into()]
+        let current_epoch_validator_info = current_validators
+            .clone()
+            .into_iter()
+            .map(|v| CurrentEpochValidatorInfo {
+                account_id: v.account_id,
+                is_slashed: false,
+                stake: v.amount,
+                num_missing_blocks: 0,
+            })
+            .collect::<Vec<_>>();
+        let response = env.runtime.get_validator_info(&env.head.last_block_hash).unwrap();
+        assert_eq!(
+            response,
+            EpochValidatorInfo {
+                current_validators: current_epoch_validator_info.clone(),
+                next_validators: current_validators.clone().into_iter().map(Into::into).collect(),
+                current_proposals: vec![ValidatorStake {
+                    account_id: "test1".to_string(),
+                    public_key: block_producers[0].signer.public_key(),
+                    amount: 0
                 }
-            ),
-            _ => panic!("wrong response"),
-        }
+                .into()]
+            }
+        );
         env.step_default(vec![]);
-        let response = env
-            .runtime
-            .query(&env.state_roots[0], 3, 0, &env.head.last_block_hash, vec!["validators"], &[])
-            .unwrap();
-        match response {
-            QueryResponse::Validators(info) => {
-                let v: Vec<ValidatorStake> =
-                    info.current_validators.clone().into_iter().map(Into::into).collect();
-                assert_eq!(v, current_validators);
-                assert_eq!(
-                    info.next_validators,
-                    vec![ValidatorStake {
-                        account_id: "test2".to_string(),
-                        public_key: block_producers[1].signer.public_key(),
-                        amount: TESTING_INIT_STAKE + per_epoch_per_validator_reward
-                    }
-                    .into()]
-                );
-                assert!(info.current_proposals.is_empty());
+        let response = env.runtime.get_validator_info(&env.head.last_block_hash).unwrap();
+
+        assert_eq!(response.current_validators, current_epoch_validator_info);
+        assert_eq!(
+            response.next_validators,
+            vec![ValidatorStake {
+                account_id: "test2".to_string(),
+                public_key: block_producers[1].signer.public_key(),
+                amount: TESTING_INIT_STAKE + per_epoch_per_validator_reward
             }
-            _ => panic!("wrong response"),
-        }
+            .into()]
+        );
+        assert!(response.current_proposals.is_empty());
     }
 
     #[test]
@@ -1939,8 +1964,13 @@ mod test {
             10,
             CryptoHash::default(),
         );
-        let apply_state =
-            ApplyState { block_index: 1, epoch_length: 2, gas_price: 10, block_timestamp: 100 };
+        let apply_state = ApplyState {
+            block_index: 1,
+            epoch_length: 2,
+            gas_price: 10,
+            block_timestamp: 100,
+            gas_limit: None,
+        };
         let mut prefixes = HashSet::new();
         prefixes.insert(prefix);
         let apply_result = env
@@ -1948,7 +1978,7 @@ mod test {
             .runtime
             .apply(
                 env.runtime.trie.clone(),
-                env.state_roots[0].hash,
+                env.state_roots[0],
                 &None,
                 &apply_state,
                 &[],
diff --git a/near/src/shard_tracker.rs b/near/src/shard_tracker.rs
index 39fa48991c4..8b3d91aade1 100644
--- a/near/src/shard_tracker.rs
+++ b/near/src/shard_tracker.rs
@@ -285,6 +285,7 @@ mod tests {
                 &cur_h,
                 BlockInfo::new(
                     index,
+                    0,
                     prev_h,
                     proposals,
                     vec![],
diff --git a/near/tests/tx_propagation.rs b/near/tests/rpc_nodes.rs
similarity index 84%
rename from near/tests/tx_propagation.rs
rename to near/tests/rpc_nodes.rs
index dfd308581f3..86dce7569b9 100644
--- a/near/tests/tx_propagation.rs
+++ b/near/tests/rpc_nodes.rs
@@ -8,7 +8,7 @@ use near_client::{GetBlock, TxStatus};
 use near_crypto::{InMemorySigner, KeyType};
 use near_jsonrpc::client::new_client;
 use near_network::test_utils::WaitOrTimeout;
-use near_primitives::serialize::to_base64;
+use near_primitives::serialize::{to_base, to_base64};
 use near_primitives::test_utils::{heavy_test, init_integration_logger};
 use near_primitives::transaction::SignedTransaction;
 use near_primitives::views::{FinalExecutionStatus, QueryResponse};
@@ -257,3 +257,51 @@ fn test_rpc_routing() {
         system.run().unwrap();
     });
 }
+
+#[test]
+fn test_get_validator_info_rpc() {
+    init_integration_logger();
+    heavy_test(|| {
+        let system = System::new("NEAR");
+        let num_nodes = 1;
+        let dirs = (0..num_nodes)
+            .map(|i| TempDir::new(&format!("tx_propagation{}", i)).unwrap())
+            .collect::<Vec<_>>();
+        let (_, rpc_addrs, clients) = start_nodes(1, &dirs, 1, 0, 10);
+        let view_client = clients[0].1.clone();
+
+        WaitOrTimeout::new(
+            Box::new(move |_ctx| {
+                let rpc_addrs_copy = rpc_addrs.clone();
+                actix::spawn(view_client.send(GetBlock::Best).then(move |res| {
+                    let res = res.unwrap().unwrap();
+                    if res.header.height > 1 {
+                        let mut client = new_client(&format!("http://{}", rpc_addrs_copy[0]));
+                        let block_hash = res.header.hash;
+                        actix::spawn(
+                            client
+                                .validators(to_base(&block_hash))
+                                .map_err(|err| {
+                                    panic!(format!("error: {:?}", err));
+                                })
+                                .map(move |result| {
+                                    assert_eq!(result.current_validators.len(), 1);
+                                    assert!(result
+                                        .current_validators
+                                        .iter()
+                                        .any(|r| r.account_id == "near.0".to_string()));
+                                    System::current().stop();
+                                }),
+                        );
+                    }
+                    futures::future::ok(())
+                }));
+            }),
+            100,
+            20000,
+        )
+        .start();
+
+        system.run().unwrap();
+    });
+}
diff --git a/near/tests/stake_nodes.rs b/near/tests/stake_nodes.rs
index 9ce9d5edf0e..eed228e1ad2 100644
--- a/near/tests/stake_nodes.rs
+++ b/near/tests/stake_nodes.rs
@@ -115,7 +115,11 @@ fn test_stake_nodes() {
         WaitOrTimeout::new(
             Box::new(move |_ctx| {
                 actix::spawn(test_nodes[0].client.send(Status {}).then(|res| {
-                    if res.unwrap().unwrap().validators
+                    let res = res.unwrap();
+                    if res.is_err() {
+                        return futures::future::ok(());
+                    }
+                    if res.unwrap().validators
                         == vec![
                             ValidatorInfo { account_id: "near.1".to_string(), is_slashed: false },
                             ValidatorInfo { account_id: "near.0".to_string(), is_slashed: false },
@@ -197,7 +201,11 @@ fn test_validator_kickout() {
                             is_slashed: false,
                         })
                         .collect();
-                    if res.unwrap().unwrap().validators == expected {
+                    let res = res.unwrap();
+                    if res.is_err() {
+                        return futures::future::ok(());
+                    }
+                    if res.unwrap().validators == expected {
                         for i in 0..num_nodes / 2 {
                             let mark = finalized_mark1[i].clone();
                             actix::spawn(
@@ -337,7 +345,11 @@ fn test_validator_join() {
                         ValidatorInfo { account_id: "near.0".to_string(), is_slashed: false },
                         ValidatorInfo { account_id: "near.2".to_string(), is_slashed: false },
                     ];
-                    if res.unwrap().unwrap().validators == expected {
+                    let res = res.unwrap();
+                    if res.is_err() {
+                        return futures::future::ok(());
+                    }
+                    if res.unwrap().validators == expected {
                         actix::spawn(
                             test_node1
                                 .view_client
diff --git a/pytest/tests/sanity/block_production.py b/pytest/tests/sanity/block_production.py
index 26f9ac14a26..9de3b1d9fff 100644
--- a/pytest/tests/sanity/block_production.py
+++ b/pytest/tests/sanity/block_production.py
@@ -13,14 +13,14 @@
 BLOCKS = 50
 
 # Local:
-nodes = start_cluster(4, 0, 4, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["validator_kickout_threshold", 80]], {})
+nodes = start_cluster(4, 0, 4, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {})
 
 # Remote:
 # nodes = start_cluster(4, 0, 4, {'local': False, 'near_root': '../target/debug/',
 #     'remote': {
 #         'instance_name': 'near-pytest',
 #     }
-# }, [["epoch_length", 10], ["validator_kickout_threshold", 80]], {})
+# }, [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {})
 
 started = time.time()
 
diff --git a/pytest/tests/sanity/restart.py b/pytest/tests/sanity/restart.py
index e8e2a61e39f..1f15d6ec36a 100644
--- a/pytest/tests/sanity/restart.py
+++ b/pytest/tests/sanity/restart.py
@@ -13,7 +13,7 @@
 BLOCKS1 = 20
 BLOCKS2 = 40
 
-nodes = start_cluster(2, 0, 2, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["validator_kickout_threshold", 80]], {})
+nodes = start_cluster(2, 0, 2, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {})
 
 started = time.time()
 
diff --git a/pytest/tests/sanity/rpc_tx_forwarding.py b/pytest/tests/sanity/rpc_tx_forwarding.py
index 13580d97f0e..9f51c20dd99 100644
--- a/pytest/tests/sanity/rpc_tx_forwarding.py
+++ b/pytest/tests/sanity/rpc_tx_forwarding.py
@@ -12,7 +12,7 @@
 from utils import TxContext
 from transaction import sign_payment_tx
 
-nodes = start_cluster(2, 2, 4, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["validator_kickout_threshold", 70]], {3: {"tracked_shards": [0, 1, 2, 3]}})
+nodes = start_cluster(2, 2, 4, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {3: {"tracked_shards": [0, 1, 2, 3]}})
 
 started = time.time()
 
diff --git a/pytest/tests/sanity/skip_epoch.py b/pytest/tests/sanity/skip_epoch.py
index 3cb4ecbbd0a..14fd8dd7792 100644
--- a/pytest/tests/sanity/skip_epoch.py
+++ b/pytest/tests/sanity/skip_epoch.py
@@ -15,7 +15,7 @@
 TWENTY_FIVE = 25
 
 config = {'local': True, 'near_root': '../target/debug/'}
-near_root, node_dirs = init_cluster(2, 1, 2, config, [["max_inflation_rate", 0], ["epoch_length", 7], ["validator_kickout_threshold", 80]], {2: {"tracked_shards": [0, 1]}})
+near_root, node_dirs = init_cluster(2, 1, 2, config, [["max_inflation_rate", 0], ["epoch_length", 7], ["block_producer_kickout_threshold", 80]], {2: {"tracked_shards": [0, 1]}})
 
 started = time.time()
 
diff --git a/pytest/tests/sanity/staking1.py b/pytest/tests/sanity/staking1.py
index 6da0390b7e1..2244c11017a 100644
--- a/pytest/tests/sanity/staking1.py
+++ b/pytest/tests/sanity/staking1.py
@@ -12,7 +12,7 @@
 TIMEOUT = 150
 
 config = {'local': True, 'near_root': '../target/debug/'}
-nodes = start_cluster(2, 1, 1, config, [["epoch_length", 5], ["validator_kickout_threshold", 40]], {2: {"tracked_shards": [0]}})
+nodes = start_cluster(2, 1, 1, config, [["epoch_length", 5], ["block_producer_kickout_threshold", 40]], {2: {"tracked_shards": [0]}})
 
 started = time.time()
 
diff --git a/pytest/tests/sanity/staking2.py b/pytest/tests/sanity/staking2.py
index be4d6983436..7cc64a7a3b1 100644
--- a/pytest/tests/sanity/staking2.py
+++ b/pytest/tests/sanity/staking2.py
@@ -65,7 +65,7 @@ def doit(seq = []):
     sequence = seq
 
     config = {'local': True, 'near_root': '../target/debug/'}
-    nodes = start_cluster(2, 1, 1, config, [["epoch_length", EPOCH_LENGTH], ["validator_kickout_threshold", 40]], {2: {"tracked_shards": [0]}})
+    nodes = start_cluster(2, 1, 1, config, [["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 40]], {2: {"tracked_shards": [0]}})
 
     started = time.time()
     last_iter = started
diff --git a/pytest/tests/sanity/state_sync.py b/pytest/tests/sanity/state_sync.py
index 35b6c0f89a9..1aeeb0ad635 100644
--- a/pytest/tests/sanity/state_sync.py
+++ b/pytest/tests/sanity/state_sync.py
@@ -26,7 +26,7 @@
 TIMEOUT = 150 + START_AT_BLOCK * 10
 
 config = {'local': True, 'near_root': '../target/debug/'}
-near_root, node_dirs = init_cluster(2, 1, 1, config, [["epoch_length", 10], ["validator_kickout_threshold", 80]], {2: {"tracked_shards": [0]}})
+near_root, node_dirs = init_cluster(2, 1, 1, config, [["max_inflation_rate", 0], ["epoch_length", 10], ["block_producer_kickout_threshold", 80]], {2: {"tracked_shards": [0]}})
 
 started = time.time()
 
diff --git a/pytest/tests/sanity/transactions.py b/pytest/tests/sanity/transactions.py
index 0a3245593a8..727b110a085 100644
--- a/pytest/tests/sanity/transactions.py
+++ b/pytest/tests/sanity/transactions.py
@@ -15,7 +15,7 @@
 
 TIMEOUT = 240
 
-nodes = start_cluster(4, 0, 4, {'local': True, 'near_root': '../target/debug/'}, [["epoch_length", 10], ["validator_kickout_threshold", 70]], {})
+nodes = start_cluster(4, 0, 4, {'local': True, 'near_root': '../target/debug/'}, [["max_inflation_rate", 0], ["epoch_length", 10], ["block_producer_kickout_threshold", 70]], {})
 
 started = time.time()
 
diff --git a/pytest/tests/stress/stress.py b/pytest/tests/stress/stress.py
index cf6a4f890b5..2d77e3359d6 100644
--- a/pytest/tests/stress/stress.py
+++ b/pytest/tests/stress/stress.py
@@ -403,7 +403,7 @@ def doit(s, n, N, k, monkeys, timeout):
         # make all the observers track all the shards
         local_config_changes[i] = {"tracked_shards": list(range(s))}
 
-    near_root, node_dirs = init_cluster(N, s, k + 1, config, [["max_inflation_rate", 0], ["epoch_length", EPOCH_LENGTH], ["validator_kickout_threshold", 75]], local_config_changes)
+    near_root, node_dirs = init_cluster(N, s, k + 1, config, [["max_inflation_rate", 0], ["epoch_length", EPOCH_LENGTH], ["block_producer_kickout_threshold", 75]], local_config_changes)
 
     started = time.time()
 
diff --git a/runtime/near-vm-logic/Cargo.toml b/runtime/near-vm-logic/Cargo.toml
index 6affb8ffb9d..227dffe4fb6 100644
--- a/runtime/near-vm-logic/Cargo.toml
+++ b/runtime/near-vm-logic/Cargo.toml
@@ -13,6 +13,7 @@ This crate implements the specification of the interface that Near blockchain ex
 """
 
 [dependencies]
+byteorder = "1.2"
 bs58 = "0.3"
 serde = { version = "1.0", features = ["derive"] }
 near-runtime-fees = { path = "../near-runtime-fees", version = "0.4.0" }
@@ -21,6 +22,11 @@ near-vm-errors = { path = "../near-vm-errors" }
 [dev-dependencies]
 serde_json = {version= "1.0", features= ["preserve_order"]}
 
+[features]
+default = ["costs_counting"]
+
+# Use this feature to enable counting of fees and costs applied.
+costs_counting = []
 
 [[test]]
 name = "test_storage_read_write"
@@ -33,6 +39,7 @@ path = "tests/test_context.rs"
 [[test]]
 name = "test_miscs"
 path = "tests/test_miscs.rs"
+required-features = ["costs_counting"]
 
 [[test]]
 name = "test_registers"
diff --git a/runtime/near-vm-logic/src/config.rs b/runtime/near-vm-logic/src/config.rs
index 2ee28c7534e..6c7cd9b2d68 100644
--- a/runtime/near-vm-logic/src/config.rs
+++ b/runtime/near-vm-logic/src/config.rs
@@ -46,7 +46,7 @@ impl Default for VMConfig {
             grow_mem_cost: 1,
             regular_op_cost: 1,
             max_gas_burnt: 10u64.pow(9),
-            max_stack_height: 32 * 1024,        // 32Kib of stack.
+            max_stack_height: 16 * 1024,        // 16Kib of stack.
             initial_memory_pages: 2u32.pow(10), // 64Mib of memory.
             max_memory_pages: 2u32.pow(11),     // 128Mib of memory.
             // By default registers are limited by 1GiB of memory.
@@ -76,7 +76,7 @@ impl VMConfig {
             grow_mem_cost: 0,
             regular_op_cost: 0,
             max_gas_burnt: 10u64.pow(9),
-            max_stack_height: 64 * 1024,
+            max_stack_height: 16 * 1024,
             initial_memory_pages: 17,
             max_memory_pages: 32,
             registers_memory_limit: 2u64.pow(30),
@@ -90,173 +90,157 @@ impl VMConfig {
 
 #[derive(Debug, Serialize, Deserialize, Clone, Hash, PartialEq, Eq)]
 pub struct ExtCostsConfig {
-    /// Pay for reading contract input base
-    pub input_base: Gas,
-    /// Pay for reading contract input per byte
-    pub input_per_byte: Gas,
-    /// Storage trie read key base cost
-    pub storage_read_base: Gas,
-    /// Storage trie read key per byte cost
-    pub storage_read_key_byte: Gas,
-    /// Storage trie read value cost per byte cost
-    pub storage_read_value_byte: Gas,
+    /// Base cost for calling a host function.
+    pub base: Gas,
+
+    /// Base cost for guest memory read
+    pub read_memory_base: Gas,
+    /// Cost for guest memory read
+    pub read_memory_byte: Gas,
+
+    /// Base cost for guest memory write
+    pub write_memory_base: Gas,
+    /// Cost for guest memory write per byte
+    pub write_memory_byte: Gas,
+
+    /// Base cost for reading from register
+    pub read_register_base: Gas,
+    /// Cost for reading byte from register
+    pub read_register_byte: Gas,
+
+    /// Base cost for writing into register
+    pub write_register_base: Gas,
+    /// Cost for writing byte into register
+    pub write_register_byte: Gas,
+
+    /// Base cost of decoding utf8.
+    pub utf8_decoding_base: Gas,
+    /// Cost per bye of decoding utf8.
+    pub utf8_decoding_byte: Gas,
+
+    /// Base cost of decoding utf16.
+    pub utf16_decoding_base: Gas,
+    /// Cost per bye of decoding utf16.
+    pub utf16_decoding_byte: Gas,
+
+    /// Cost of getting sha256 base
+    pub sha256_base: Gas,
+    /// Cost of getting sha256 per byte
+    pub sha256_byte: Gas,
+
+    /// Cost for calling logging.
+    pub log_base: Gas,
+    /// Cost for logging per byte
+    pub log_byte: Gas,
+
+    // ###############
+    // # Storage API #
+    // ###############
     /// Storage trie write key base cost
     pub storage_write_base: Gas,
     /// Storage trie write key per byte cost
     pub storage_write_key_byte: Gas,
     /// Storage trie write value per byte cost
     pub storage_write_value_byte: Gas,
-    /// Storage trie check for key existence cost base
-    pub storage_has_key_base: Gas,
-    /// Storage trie check for key existence per key byte
-    pub storage_has_key_byte: Gas,
+    /// Storage trie write cost per byte of evicted value.
+    pub storage_write_evicted_byte: Gas,
+
+    /// Storage trie read key base cost
+    pub storage_read_base: Gas,
+    /// Storage trie read key per byte cost
+    pub storage_read_key_byte: Gas,
+    /// Storage trie read value cost per byte cost
+    pub storage_read_value_byte: Gas,
+
     /// Remove key from trie base cost
     pub storage_remove_base: Gas,
     /// Remove key from trie per byte cost
     pub storage_remove_key_byte: Gas,
     /// Remove key from trie ret value byte cost
     pub storage_remove_ret_value_byte: Gas,
+
+    /// Storage trie check for key existence cost base
+    pub storage_has_key_base: Gas,
+    /// Storage trie check for key existence per key byte
+    pub storage_has_key_byte: Gas,
+
     /// Create trie prefix iterator cost base
     pub storage_iter_create_prefix_base: Gas,
+    /// Create trie prefix iterator cost per byte.
+    pub storage_iter_create_prefix_byte: Gas,
+
     /// Create trie range iterator cost base
     pub storage_iter_create_range_base: Gas,
-    /// Create trie iterator per key byte cost
-    pub storage_iter_create_key_byte: Gas,
+    /// Create trie range iterator cost per byte of from key.
+    pub storage_iter_create_from_byte: Gas,
+    /// Create trie range iterator cost per byte of to key.
+    pub storage_iter_create_to_byte: Gas,
+
     /// Trie iterator per key base cost
     pub storage_iter_next_base: Gas,
     /// Trie iterator next key byte cost
     pub storage_iter_next_key_byte: Gas,
     /// Trie iterator next key byte cost
     pub storage_iter_next_value_byte: Gas,
-    /// Base cost for reading from register
-    pub read_register_base: Gas,
-    /// Cost for reading byte from register
-    pub read_register_byte: Gas,
-    /// Base cost for writing into register
-    pub write_register_base: Gas,
-    /// Cost for writing byte into register
-    pub write_register_byte: Gas,
-    /// Base cost for guest memory read
-    pub read_memory_base: Gas,
-    /// Cost for guest memory read
-    pub read_memory_byte: Gas,
-    /// Base cost for guest memory write
-    pub write_memory_base: Gas,
-    /// Cost for guest memory write per byte
-    pub write_memory_byte: Gas,
-    /// Get account balance cost
-    pub account_balance: Gas,
-    /// Get prepaid gas cost
-    pub prepaid_gas: Gas,
-    /// Get used gas cost
-    pub used_gas: Gas,
-    /// Cost of getting random seed
-    pub random_seed_base: Gas,
-    /// Cost of getting random seed per byte
-    pub random_seed_per_byte: Gas,
-    /// Cost of getting sha256 base
-    pub sha256: Gas,
-    /// Cost of getting sha256 per byte
-    pub sha256_byte: Gas,
-    /// Get account attached_deposit base cost
-    pub attached_deposit: Gas,
-    /// Get storage usage cost
-    pub storage_usage: Gas,
-    /// Get a current block height base cost
-    pub block_index: Gas,
-    /// Get a current timestamp base cost
-    pub block_timestamp: Gas,
-    /// Cost for getting a current account base
-    pub current_account_id: Gas,
-    /// Cost for getting a current account per byte
-    pub current_account_id_byte: Gas,
-    /// Cost for getting a signer account id base
-    pub signer_account_id: Gas,
-    /// Cost for getting a signer account per byte
-    pub signer_account_id_byte: Gas,
-    /// Cost for getting a signer public key
-    pub signer_account_pk: Gas,
-    /// Cost for getting a signer public key per byte
-    pub signer_account_pk_byte: Gas,
-    /// Cost for getting a predecessor account
-    pub predecessor_account_id: Gas,
-    /// Cost for getting a predecessor account per byte
-    pub predecessor_account_id_byte: Gas,
+
+    /// Cost per touched trie node
+    pub touching_trie_node: Gas,
+
+    // ###############
+    // # Promise API #
+    // ###############
     /// Cost for calling promise_and
     pub promise_and_base: Gas,
     /// Cost for calling promise_and for each promise
     pub promise_and_per_promise: Gas,
-    /// Cost for calling promise_result
-    pub promise_result_base: Gas,
-    /// Cost for calling promise_result per result byte
-    pub promise_result_byte: Gas,
-    /// Cost for calling promise_results_count
-    pub promise_results_count: Gas,
     /// Cost for calling promise_return
     pub promise_return: Gas,
-    /// Cost for calling logging
-    pub log_base: Gas,
-    /// Cost for logging per byte
-    pub log_per_byte: Gas,
 }
 
 impl Default for ExtCostsConfig {
     fn default() -> ExtCostsConfig {
         ExtCostsConfig {
-            input_base: 1,
-            input_per_byte: 1,
-            storage_read_base: 1,
-            storage_read_key_byte: 1,
-            storage_read_value_byte: 1,
+            base: 1,
+            read_memory_base: 1,
+            read_memory_byte: 1,
+            write_memory_base: 1,
+            write_memory_byte: 1,
+            read_register_base: 1,
+            read_register_byte: 1,
+            write_register_base: 1,
+            write_register_byte: 1,
+            utf8_decoding_base: 1,
+            utf8_decoding_byte: 1,
+            utf16_decoding_base: 1,
+            utf16_decoding_byte: 1,
+            sha256_base: 1,
+            sha256_byte: 1,
+            log_base: 1,
+            log_byte: 1,
             storage_write_base: 1,
             storage_write_key_byte: 1,
             storage_write_value_byte: 1,
-            storage_has_key_base: 1,
-            storage_has_key_byte: 1,
+            storage_write_evicted_byte: 1,
+            storage_read_base: 1,
+            storage_read_key_byte: 1,
+            storage_read_value_byte: 1,
             storage_remove_base: 1,
             storage_remove_key_byte: 1,
             storage_remove_ret_value_byte: 1,
+            storage_has_key_base: 1,
+            storage_has_key_byte: 1,
             storage_iter_create_prefix_base: 1,
+            storage_iter_create_prefix_byte: 1,
             storage_iter_create_range_base: 1,
-            storage_iter_create_key_byte: 1,
+            storage_iter_create_from_byte: 1,
+            storage_iter_create_to_byte: 1,
             storage_iter_next_base: 1,
             storage_iter_next_key_byte: 1,
             storage_iter_next_value_byte: 1,
-            read_register_base: 1,
-            read_register_byte: 1,
-            write_register_base: 1,
-            write_register_byte: 1,
-            read_memory_base: 1,
-            read_memory_byte: 1,
-            write_memory_base: 1,
-            write_memory_byte: 1,
-            account_balance: 1,
-            prepaid_gas: 1,
-            used_gas: 1,
-            random_seed_base: 1,
-            random_seed_per_byte: 1,
-            sha256: 1,
-            sha256_byte: 1,
-            attached_deposit: 1,
-            storage_usage: 1,
-            block_index: 1,
-            block_timestamp: 1,
-            current_account_id: 1,
-            current_account_id_byte: 1,
-            signer_account_id: 1,
-            signer_account_id_byte: 1,
-            signer_account_pk: 1,
-            signer_account_pk_byte: 1,
-            predecessor_account_id: 1,
-            predecessor_account_id_byte: 1,
+            touching_trie_node: 1,
             promise_and_base: 1,
             promise_and_per_promise: 1,
-            promise_result_base: 1,
-            promise_result_byte: 1,
-            promise_results_count: 1,
             promise_return: 1,
-            log_base: 1,
-            log_per_byte: 1,
         }
     }
 }
@@ -264,60 +248,143 @@ impl Default for ExtCostsConfig {
 impl ExtCostsConfig {
     fn free() -> ExtCostsConfig {
         ExtCostsConfig {
-            input_base: 0,
-            input_per_byte: 0,
-            storage_read_base: 0,
-            storage_read_key_byte: 0,
-            storage_read_value_byte: 0,
+            base: 0,
+            read_memory_base: 0,
+            read_memory_byte: 0,
+            write_memory_base: 0,
+            write_memory_byte: 0,
+            read_register_base: 0,
+            read_register_byte: 0,
+            write_register_base: 0,
+            write_register_byte: 0,
+            utf8_decoding_base: 0,
+            utf8_decoding_byte: 0,
+            utf16_decoding_base: 0,
+            utf16_decoding_byte: 0,
+            sha256_base: 0,
+            sha256_byte: 0,
+            log_base: 0,
+            log_byte: 0,
             storage_write_base: 0,
             storage_write_key_byte: 0,
             storage_write_value_byte: 0,
-            storage_has_key_base: 0,
-            storage_has_key_byte: 0,
+            storage_write_evicted_byte: 0,
+            storage_read_base: 0,
+            storage_read_key_byte: 0,
+            storage_read_value_byte: 0,
             storage_remove_base: 0,
             storage_remove_key_byte: 0,
             storage_remove_ret_value_byte: 0,
+            storage_has_key_base: 0,
+            storage_has_key_byte: 0,
             storage_iter_create_prefix_base: 0,
+            storage_iter_create_prefix_byte: 0,
             storage_iter_create_range_base: 0,
-            storage_iter_create_key_byte: 0,
+            storage_iter_create_from_byte: 0,
+            storage_iter_create_to_byte: 0,
             storage_iter_next_base: 0,
             storage_iter_next_key_byte: 0,
             storage_iter_next_value_byte: 0,
-            read_register_base: 0,
-            read_register_byte: 0,
-            write_register_base: 0,
-            write_register_byte: 0,
-            read_memory_base: 0,
-            read_memory_byte: 0,
-            write_memory_base: 0,
-            write_memory_byte: 0,
-            account_balance: 0,
-            prepaid_gas: 0,
-            used_gas: 0,
-            random_seed_base: 0,
-            random_seed_per_byte: 0,
-            sha256: 0,
-            sha256_byte: 0,
-            attached_deposit: 0,
-            storage_usage: 0,
-            block_index: 0,
-            block_timestamp: 0,
-            current_account_id: 0,
-            current_account_id_byte: 0,
-            signer_account_id: 0,
-            signer_account_id_byte: 0,
-            signer_account_pk: 0,
-            signer_account_pk_byte: 0,
-            predecessor_account_id: 0,
-            predecessor_account_id_byte: 0,
+            touching_trie_node: 0,
             promise_and_base: 0,
             promise_and_per_promise: 0,
-            promise_result_base: 0,
-            promise_result_byte: 0,
-            promise_results_count: 0,
             promise_return: 0,
-            log_base: 0,
-            log_per_byte: 0,
+        }
+    }
+}
+
+/// Strongly-typed representation of the fees for counting.
+#[derive(Copy, Clone, Hash, PartialEq, Eq, Debug, PartialOrd, Ord)]
+#[allow(non_camel_case_types)]
+pub enum ExtCosts {
+    base,
+    read_memory_base,
+    read_memory_byte,
+    write_memory_base,
+    write_memory_byte,
+    read_register_base,
+    read_register_byte,
+    write_register_base,
+    write_register_byte,
+    utf8_decoding_base,
+    utf8_decoding_byte,
+    utf16_decoding_base,
+    utf16_decoding_byte,
+    sha256_base,
+    sha256_byte,
+    log_base,
+    log_byte,
+    storage_write_base,
+    storage_write_key_byte,
+    storage_write_value_byte,
+    storage_write_evicted_byte,
+    storage_read_base,
+    storage_read_key_byte,
+    storage_read_value_byte,
+    storage_remove_base,
+    storage_remove_key_byte,
+    storage_remove_ret_value_byte,
+    storage_has_key_base,
+    storage_has_key_byte,
+    storage_iter_create_prefix_base,
+    storage_iter_create_prefix_byte,
+    storage_iter_create_range_base,
+    storage_iter_create_from_byte,
+    storage_iter_create_to_byte,
+    storage_iter_next_base,
+    storage_iter_next_key_byte,
+    storage_iter_next_value_byte,
+    touching_trie_node,
+    promise_and_base,
+    promise_and_per_promise,
+    promise_return,
+}
+
+impl ExtCosts {
+    pub fn value(&self, config: &ExtCostsConfig) -> Gas {
+        use ExtCosts::*;
+        match self {
+            base => config.base,
+            read_memory_base => config.read_memory_base,
+            read_memory_byte => config.read_memory_byte,
+            write_memory_base => config.write_memory_base,
+            write_memory_byte => config.write_memory_byte,
+            read_register_base => config.read_register_base,
+            read_register_byte => config.read_register_byte,
+            write_register_base => config.write_register_base,
+            write_register_byte => config.write_register_byte,
+            utf8_decoding_base => config.utf8_decoding_base,
+            utf8_decoding_byte => config.utf8_decoding_byte,
+            utf16_decoding_base => config.utf16_decoding_base,
+            utf16_decoding_byte => config.utf16_decoding_byte,
+            sha256_base => config.sha256_base,
+            sha256_byte => config.sha256_byte,
+            log_base => config.log_base,
+            log_byte => config.log_byte,
+            storage_write_base => config.storage_write_base,
+            storage_write_key_byte => config.storage_write_key_byte,
+            storage_write_value_byte => config.storage_write_value_byte,
+            storage_write_evicted_byte => config.storage_write_evicted_byte,
+            storage_read_base => config.storage_read_base,
+            storage_read_key_byte => config.storage_read_key_byte,
+            storage_read_value_byte => config.storage_read_value_byte,
+            storage_remove_base => config.storage_remove_base,
+            storage_remove_key_byte => config.storage_remove_key_byte,
+            storage_remove_ret_value_byte => config.storage_remove_ret_value_byte,
+            storage_has_key_base => config.storage_has_key_base,
+            storage_has_key_byte => config.storage_has_key_byte,
+            storage_iter_create_prefix_base => config.storage_iter_create_prefix_base,
+            storage_iter_create_prefix_byte => config.storage_iter_create_prefix_byte,
+            storage_iter_create_range_base => config.storage_iter_create_range_base,
+            storage_iter_create_from_byte => config.storage_iter_create_from_byte,
+            storage_iter_create_to_byte => config.storage_iter_create_to_byte,
+            storage_iter_next_base => config.storage_iter_next_base,
+            storage_iter_next_key_byte => config.storage_iter_next_key_byte,
+            storage_iter_next_value_byte => config.storage_iter_next_value_byte,
+            touching_trie_node => config.touching_trie_node,
+            promise_and_base => config.promise_and_base,
+            promise_and_per_promise => config.promise_and_per_promise,
+            promise_return => config.promise_return,
         }
     }
 }
diff --git a/runtime/near-vm-logic/src/dependencies.rs b/runtime/near-vm-logic/src/dependencies.rs
index 355d3270b7a..ee1b2d086a4 100644
--- a/runtime/near-vm-logic/src/dependencies.rs
+++ b/runtime/near-vm-logic/src/dependencies.rs
@@ -118,4 +118,8 @@ pub trait External {
     ) -> Result<()>;
 
     fn sha256(&self, data: &[u8]) -> Result<Vec<u8>>;
+
+    fn get_touched_nodes_count(&self) -> u64;
+
+    fn reset_touched_nodes_counter(&mut self);
 }
diff --git a/runtime/near-vm-logic/src/gas_counter.rs b/runtime/near-vm-logic/src/gas_counter.rs
index 7ad55d1a85e..6ec6f605f42 100644
--- a/runtime/near-vm-logic/src/gas_counter.rs
+++ b/runtime/near-vm-logic/src/gas_counter.rs
@@ -1,7 +1,14 @@
+use crate::config::{ExtCosts, ExtCostsConfig};
 use crate::types::Gas;
 use crate::{HostError, HostErrorOrStorageError};
 use near_runtime_fees::Fee;
 
+#[cfg(feature = "costs_counting")]
+thread_local! {
+    pub static EXT_COSTS_COUNTER: std::cell::RefCell<std::collections::HashMap<ExtCosts, u64>> =
+        Default::default();
+}
+
 type Result<T> = ::std::result::Result<T, HostErrorOrStorageError>;
 
 /// Gas counter (a part of VMlogic)
@@ -14,12 +21,19 @@ pub struct GasCounter {
     max_gas_burnt: Gas,
     prepaid_gas: Gas,
     is_view: bool,
+    ext_costs_config: ExtCostsConfig,
 }
 
 impl GasCounter {
-    pub fn new(max_gas_burnt: Gas, prepaid_gas: Gas, is_view: bool) -> Self {
-        Self { burnt_gas: 0, used_gas: 0, max_gas_burnt, prepaid_gas, is_view }
+    pub fn new(
+        ext_costs_config: ExtCostsConfig,
+        max_gas_burnt: Gas,
+        prepaid_gas: Gas,
+        is_view: bool,
+    ) -> Self {
+        Self { ext_costs_config, burnt_gas: 0, used_gas: 0, max_gas_burnt, prepaid_gas, is_view }
     }
+
     pub fn deduct_gas(&mut self, burn_gas: Gas, use_gas: Gas) -> Result<()> {
         assert!(burn_gas <= use_gas);
         let new_burnt_gas =
@@ -47,14 +61,32 @@ impl GasCounter {
             res
         }
     }
+
+    #[cfg(feature = "costs_counting")]
+    #[inline]
+    fn inc_ext_costs_counter(&self, cost: ExtCosts, value: u64) {
+        EXT_COSTS_COUNTER.with(|f| {
+            *f.borrow_mut().entry(cost).or_default() += value;
+        });
+    }
+
+    #[cfg(not(feature = "costs_counting"))]
+    #[inline]
+    fn inc_ext_costs_counter(&self, _cost: ExtCosts, _value: u64) {}
+
     /// A helper function to pay per byte gas
-    pub fn pay_per_byte(&mut self, per_byte: Gas, num_bytes: u64) -> Result<()> {
-        let use_gas = num_bytes.checked_mul(per_byte).ok_or(HostError::IntegerOverflow)?;
+    pub fn pay_per_byte(&mut self, cost: ExtCosts, num_bytes: u64) -> Result<()> {
+        self.inc_ext_costs_counter(cost, num_bytes);
+        let use_gas = num_bytes
+            .checked_mul(cost.value(&self.ext_costs_config))
+            .ok_or(HostError::IntegerOverflow)?;
         self.deduct_gas(use_gas, use_gas)
     }
 
     /// A helper function to pay base cost gas
-    pub fn pay_base(&mut self, base_fee: Gas) -> Result<()> {
+    pub fn pay_base(&mut self, cost: ExtCosts) -> Result<()> {
+        self.inc_ext_costs_counter(cost, 1);
+        let base_fee = cost.value(&self.ext_costs_config);
         self.deduct_gas(base_fee, base_fee)
     }
 
@@ -104,7 +136,7 @@ mod tests {
     use super::*;
     #[test]
     fn test_deduct_gas() {
-        let mut counter = GasCounter::new(10, 10, false);
+        let mut counter = GasCounter::new(ExtCostsConfig::default(), 10, 10, false);
         counter.deduct_gas(5, 10).expect("deduct_gas should work");
         assert_eq!(counter.burnt_gas(), 5);
         assert_eq!(counter.used_gas(), 10);
@@ -113,7 +145,7 @@ mod tests {
     #[test]
     #[should_panic]
     fn test_prepaid_gas_min() {
-        let mut counter = GasCounter::new(100, 10, false);
+        let mut counter = GasCounter::new(ExtCostsConfig::default(), 100, 10, false);
         counter.deduct_gas(10, 5).unwrap();
     }
 }
diff --git a/runtime/near-vm-logic/src/lib.rs b/runtime/near-vm-logic/src/lib.rs
index c99d00b7de3..6e3ee1766e8 100644
--- a/runtime/near-vm-logic/src/lib.rs
+++ b/runtime/near-vm-logic/src/lib.rs
@@ -8,9 +8,12 @@ pub mod mocks;
 pub mod serde_with;
 
 pub mod types;
-pub use config::VMConfig;
+pub use config::{ExtCosts, ExtCostsConfig, VMConfig};
 pub use context::VMContext;
 pub use dependencies::{External, MemoryLike};
 pub use logic::{VMLogic, VMOutcome};
 pub use near_vm_errors::{HostError, HostErrorOrStorageError};
 pub use types::ReturnData;
+
+#[cfg(feature = "costs_counting")]
+pub use gas_counter::EXT_COSTS_COUNTER;
diff --git a/runtime/near-vm-logic/src/logic.rs b/runtime/near-vm-logic/src/logic.rs
index 7f4f8ed6a89..25557615dab 100644
--- a/runtime/near-vm-logic/src/logic.rs
+++ b/runtime/near-vm-logic/src/logic.rs
@@ -1,3 +1,4 @@
+use crate::config::ExtCosts::*;
 use crate::config::VMConfig;
 use crate::context::VMContext;
 use crate::dependencies::{External, MemoryLike};
@@ -7,6 +8,7 @@ use crate::types::{
     StorageUsage,
 };
 use crate::{HostError, HostErrorOrStorageError};
+use byteorder::ByteOrder;
 use near_runtime_fees::RuntimeFeesConfig;
 use serde::{Deserialize, Serialize};
 use std::collections::{HashMap, HashSet};
@@ -72,6 +74,24 @@ enum PromiseToReceipts {
     NotReceipt(Vec<ReceiptIndex>),
 }
 
+macro_rules! memory_get {
+    ($_type:ty, $name:ident) => {
+        fn $name(&mut self, offset: u64, ) -> Result<$_type> {
+            let mut array = [0u8; size_of::<$_type>()];
+            self.memory_get_into(offset, &mut array)?;
+            Ok(<$_type>::from_le_bytes(array))
+        }
+    };
+}
+
+macro_rules! memory_set {
+    ($_type:ty, $name:ident) => {
+        fn $name( &mut self, offset: u64, value: $_type, ) -> Result<()> {
+            self.memory_set_slice(offset, &value.to_le_bytes())
+        }
+    };
+}
+
 impl<'a> VMLogic<'a> {
     pub fn new(
         ext: &'a mut dyn External,
@@ -81,10 +101,15 @@ impl<'a> VMLogic<'a> {
         promise_results: &'a [PromiseResult],
         memory: &'a mut dyn MemoryLike,
     ) -> Self {
+        ext.reset_touched_nodes_counter();
         let current_account_balance = context.account_balance + context.attached_deposit;
         let current_storage_usage = context.storage_usage;
-        let gas_counter =
-            GasCounter::new(config.max_gas_burnt, context.prepaid_gas, context.is_view);
+        let gas_counter = GasCounter::new(
+            config.ext_costs.clone(),
+            config.max_gas_burnt,
+            context.prepaid_gas,
+            context.is_view,
+        );
         Self {
             ext,
             context,
@@ -109,88 +134,165 @@ impl<'a> VMLogic<'a> {
     // # Memory helper functions #
     // ###########################
 
-    fn try_fit_mem(memory: &dyn MemoryLike, offset: u64, len: u64) -> Result<()> {
-        if memory.fits_memory(offset, len) {
+    fn try_fit_mem(&mut self, offset: u64, len: u64) -> Result<()> {
+        if self.memory.fits_memory(offset, len) {
             Ok(())
         } else {
             Err(HostError::MemoryAccessViolation.into())
         }
     }
 
-    fn memory_get_into(memory: &dyn MemoryLike, offset: u64, buf: &mut [u8]) -> Result<()> {
-        Self::try_fit_mem(memory, offset, buf.len() as u64)?;
-        memory.read_memory(offset, buf);
+    fn memory_get_into(&mut self, offset: u64, buf: &mut [u8]) -> Result<()> {
+        self.gas_counter.pay_base(read_memory_base)?;
+        self.gas_counter.pay_per_byte(read_memory_byte, buf.len() as _)?;
+        self.try_fit_mem(offset, buf.len() as _)?;
+        self.memory.read_memory(offset, buf);
         Ok(())
     }
 
-    fn memory_get(memory: &dyn MemoryLike, offset: u64, len: u64) -> Result<Vec<u8>> {
-        Self::try_fit_mem(memory, offset, len)?;
+    fn memory_get_vec(&mut self, offset: u64, len: u64) -> Result<Vec<u8>> {
+        self.gas_counter.pay_base(read_memory_base)?;
+        self.gas_counter.pay_per_byte(read_memory_byte, len)?;
+        self.try_fit_mem(offset, len)?;
         let mut buf = vec![0; len as usize];
-        memory.read_memory(offset, &mut buf);
+        self.memory.read_memory(offset, &mut buf);
         Ok(buf)
     }
 
-    fn memory_set(memory: &mut dyn MemoryLike, offset: u64, buf: &[u8]) -> Result<()> {
-        Self::try_fit_mem(memory, offset, buf.len() as _)?;
-        memory.write_memory(offset, buf);
-        Ok(())
-    }
-
-    /// Writes `u128` to Wasm memory.
-    #[allow(dead_code)]
-    fn memory_set_u128(memory: &mut dyn MemoryLike, offset: u64, value: u128) -> Result<()> {
-        let data: [u8; size_of::<u128>()] = value.to_le_bytes();
-        Self::memory_set(memory, offset, &data)
-    }
-
-    /// Get `u128` from Wasm memory.
-    fn memory_get_u128(memory: &dyn MemoryLike, offset: u64) -> Result<u128> {
-        let mut array = [0u8; size_of::<u128>()];
-        Self::memory_get_into(memory, offset, &mut array)?;
-        Ok(u128::from_le_bytes(array))
-    }
+    memory_get!(u128, memory_get_u128);
+    memory_get!(u32, memory_get_u32);
+    memory_get!(u16, memory_get_u16);
+    memory_get!(u8, memory_get_u8);
 
     /// Reads an array of `u64` elements.
-    fn memory_get_array_u64(
-        memory: &dyn MemoryLike,
-        offset: u64,
-        num_elements: u64,
-    ) -> Result<Vec<u64>> {
+    fn memory_get_vec_u64(&mut self, offset: u64, num_elements: u64) -> Result<Vec<u64>> {
         let memory_len = num_elements
             .checked_mul(size_of::<u64>() as u64)
             .ok_or(HostError::MemoryAccessViolation)?;
-        let data = Self::memory_get(memory, offset, memory_len)?;
-        Ok(data
-            .chunks(size_of::<u64>())
-            .map(|buf| {
-                assert_eq!(buf.len(), size_of::<u64>());
-                let mut array = [0u8; size_of::<u64>()];
-                array.copy_from_slice(buf);
-                u64::from_le_bytes(array)
-            })
-            .collect())
+        let data = self.memory_get_vec(offset, memory_len)?;
+        let mut res = vec![0u64; num_elements as usize];
+        byteorder::LittleEndian::read_u64_into(&data, &mut res);
+        Ok(res)
     }
 
-    fn read_memory_u32(memory: &dyn MemoryLike, ptr: u64) -> Result<u32> {
-        let mut slice = [0u8; size_of::<u32>()];
-        let buf = Self::memory_get(memory, ptr, size_of::<u32>() as u64)?;
-        slice.copy_from_slice(&buf);
-        Ok(u32::from_le_bytes(slice))
+    fn get_vec_from_memory_or_register(&mut self, offset: u64, len: u64) -> Result<Vec<u8>> {
+        if len != std::u64::MAX {
+            self.memory_get_vec(offset, len)
+        } else {
+            self.internal_read_register(offset)
+        }
     }
 
-    fn get_from_memory_or_register(
-        memory: &dyn MemoryLike,
-        registers: &HashMap<u64, Vec<u8>>,
-        offset: u64,
-        len: u64,
-    ) -> Result<Vec<u8>> {
-        if len != std::u64::MAX {
-            Self::memory_get(memory, offset, len)
+    fn memory_set_slice(&mut self, offset: u64, buf: &[u8]) -> Result<()> {
+        self.gas_counter.pay_base(write_memory_base)?;
+        self.gas_counter.pay_per_byte(write_memory_byte, buf.len() as _)?;
+        self.try_fit_mem(offset, buf.len() as _)?;
+        self.memory.write_memory(offset, buf);
+        Ok(())
+    }
+
+    memory_set!(u128, memory_set_u128);
+
+    // #################
+    // # Registers API #
+    // #################
+
+    fn internal_read_register(&mut self, register_id: u64) -> Result<Vec<u8>> {
+        if let Some(data) = self.registers.get(&register_id) {
+            self.gas_counter.pay_base(read_register_base)?;
+            self.gas_counter.pay_per_byte(read_register_byte, data.len() as _)?;
+            Ok(data.clone())
         } else {
-            registers.get(&offset).ok_or(HostError::InvalidRegisterId.into()).map(|v| v.clone())
+            Err(HostError::InvalidRegisterId.into())
         }
     }
 
+    fn internal_write_register(&mut self, register_id: u64, data: Vec<u8>) -> Result<()> {
+        self.gas_counter.pay_base(write_register_base)?;
+        self.gas_counter.pay_per_byte(write_register_byte, data.len() as u64)?;
+        if data.len() as u64 > self.config.max_register_size
+            || self.registers.len() as u64 >= self.config.max_number_registers
+        {
+            return Err(HostError::MemoryAccessViolation.into());
+        }
+        self.registers.insert(register_id, data);
+
+        // Calculate the new memory usage.
+        let usage: usize =
+            self.registers.values().map(|v| size_of::<u64>() + v.len() * size_of::<u8>()).sum();
+        if usage as u64 > self.config.registers_memory_limit {
+            Err(HostError::MemoryAccessViolation.into())
+        } else {
+            Ok(())
+        }
+    }
+
+    /// Convenience function for testing.
+    pub fn wrapped_internal_write_register(&mut self, register_id: u64, data: &[u8]) -> Result<()> {
+        self.internal_write_register(register_id, data.to_vec())
+    }
+
+    /// Writes the entire content from the register `register_id` into the memory of the guest starting with `ptr`.
+    ///
+    /// # Arguments
+    ///
+    /// * `register_id` -- a register id from where to read the data;
+    /// * `ptr` -- location on guest memory where to copy the data.
+    ///
+    /// # Errors
+    ///
+    /// * If the content extends outside the memory allocated to the guest. In Wasmer, it returns `MemoryAccessViolation` error message;
+    /// * If `register_id` is pointing to unused register returns `InvalidRegisterId` error message.
+    ///
+    /// # Undefined Behavior
+    ///
+    /// If the content of register extends outside the preallocated memory on the host side, or the pointer points to a
+    /// wrong location this function will overwrite memory that it is not supposed to overwrite causing an undefined behavior.
+    ///
+    /// # Cost
+    ///
+    /// `base + read_register_base + read_register_byte * num_bytes + write_memory_base + write_memory_byte * num_bytes`
+    pub fn read_register(&mut self, register_id: u64, ptr: u64) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
+        let data = self.internal_read_register(register_id)?;
+        self.memory_set_slice(ptr, &data)
+    }
+
+    /// Returns the size of the blob stored in the given register.
+    /// * If register is used, then returns the size, which can potentially be zero;
+    /// * If register is not used, returns `u64::MAX`
+    ///
+    /// # Arguments
+    ///
+    /// * `register_id` -- a register id from where to read the data;
+    ///
+    /// # Cost
+    ///
+    /// `base`
+    pub fn register_len(&mut self, register_id: u64) -> Result<u64> {
+        self.gas_counter.pay_base(base)?;
+        Ok(self.registers.get(&register_id).map(|r| r.len() as _).unwrap_or(std::u64::MAX))
+    }
+
+    /// Copies `data` from the guest memory into the register. If register is unused will initialize
+    /// it. If register has larger capacity than needed for `data` will not re-allocate it. The
+    /// register will lose the pre-existing data if any.
+    ///
+    /// # Arguments
+    ///
+    /// * `register_id` -- a register id where to write the data;
+    /// * `data_len` -- length of the data in bytes;
+    /// * `data_ptr` -- pointer in the guest memory where to read the data from.
+    ///
+    /// # Cost
+    ///
+    /// `base + read_memory_base + read_memory_bytes * num_bytes + write_register_base + write_register_bytes * num_bytes`
+    pub fn write_register(&mut self, register_id: u64, data_len: u64, data_ptr: u64) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
+        let data = self.memory_get_vec(data_ptr, data_len)?;
+        self.internal_write_register(register_id, data)
+    }
+
     // ###################################
     // # String reading helper functions #
     // ###################################
@@ -203,22 +305,27 @@ impl<'a> VMLogic<'a> {
     /// * If string extends outside the memory of the guest with `MemoryAccessViolation`;
     /// * If string is not UTF-8 returns `BadUtf8`.
     /// * If string is longer than `max_log_len` returns `BadUtf8`.
+    ///
+    /// # Cost
+    ///
+    /// For not nul-terminated string:
+    /// `read_memory_base + read_memory_byte * num_bytes + utf8_decoding_base + utf8_decoding_byte * num_bytes`
+    ///
+    /// For nul-terminated string:
+    /// `(read_memory_base + read_memory_byte) * num_bytes + utf8_decoding_base + utf8_decoding_byte * num_bytes`
     fn get_utf8_string(&mut self, len: u64, ptr: u64) -> Result<String> {
-        self.gas_counter.pay_base(self.config.ext_costs.log_base)?;
+        self.gas_counter.pay_base(utf8_decoding_base)?;
         let mut buf;
         let max_len = self.config.max_log_len;
         if len != std::u64::MAX {
             if len > max_len {
                 return Err(HostError::BadUTF8.into());
             }
-            self.gas_counter.pay_per_byte(self.config.ext_costs.log_per_byte, len)?;
-            buf = Self::memory_get(self.memory, ptr, len)?;
+            buf = self.memory_get_vec(ptr, len)?;
         } else {
             buf = vec![];
             for i in 0..=max_len {
-                self.gas_counter.pay_per_byte(self.config.ext_costs.log_per_byte, 1)?;
-                Self::try_fit_mem(self.memory, ptr + i, 1)?;
-                let el = self.memory.read_memory_u8(ptr + i);
+                let el = self.memory_get_u8(ptr + i)?;
                 if el == 0 {
                     break;
                 }
@@ -228,6 +335,7 @@ impl<'a> VMLogic<'a> {
                 buf.push(el);
             }
         }
+        self.gas_counter.pay_per_byte(utf8_decoding_byte, buf.len() as _)?;
         String::from_utf8(buf).map_err(|_| HostError::BadUTF8.into())
     }
 
@@ -236,123 +344,45 @@ impl<'a> VMLogic<'a> {
     ///
     /// * If string extends outside the memory of the guest with `MemoryAccessViolation`;
     /// * If string is not UTF-16 returns `BadUtf16`.
+    ///
+    /// # Cost
+    ///
+    /// For not nul-terminated string:
+    /// `read_memory_base + read_memory_byte * num_bytes + utf16_decoding_base + utf16_decoding_byte * num_bytes`
+    ///
+    /// For nul-terminated string:
+    /// `read_memory_base * num_bytes / 2 + read_memory_byte * num_bytes + utf16_decoding_base + utf16_decoding_byte * num_bytes`
     fn get_utf16_string(&mut self, len: u64, ptr: u64) -> Result<String> {
-        self.gas_counter.pay_base(self.config.ext_costs.log_base)?;
-        let mut u16_buffer = Vec::new();
+        self.gas_counter.pay_base(utf16_decoding_base)?;
+        let mut u16_buffer;
         let max_len = self.config.max_log_len;
         if len != std::u64::MAX {
-            let input = Self::memory_get(self.memory, ptr, len)?;
+            let input = self.memory_get_vec(ptr, len)?;
             if len % 2 != 0 || len > max_len {
                 return Err(HostError::BadUTF16.into());
             }
-            self.gas_counter.pay_per_byte(self.config.ext_costs.log_per_byte, len)?;
-            for i in 0..((len / 2) as usize) {
-                u16_buffer
-                    .push(u16::from_le_bytes([input[i as usize * 2], input[i as usize * 2 + 1]]));
-            }
+            u16_buffer = vec![0u16; len as usize / 2];
+            byteorder::LittleEndian::read_u16_into(&input, &mut u16_buffer);
         } else {
+            u16_buffer = vec![];
             let limit = max_len / size_of::<u16>() as u64;
             // Takes 2 bytes each iter
             for i in 0..=limit {
-                self.gas_counter
-                    .pay_per_byte(self.config.ext_costs.log_per_byte, size_of::<u16>() as u64)?;
-                // Self::try_fit_mem will check for u64 overflow on the first iteration (i == 0)
+                // self.try_fit_mem will check for u64 overflow on the first iteration (i == 0)
                 let start = ptr + i * size_of::<u16>() as u64;
-                Self::try_fit_mem(self.memory, start, size_of::<u16>() as u64)?;
-                let lo = self.memory.read_memory_u8(start);
-                let hi = self.memory.read_memory_u8(start + 1);
-                if (lo, hi) == (0, 0) {
+                let el = self.memory_get_u16(start)?;
+                if el == 0 {
                     break;
                 }
                 if i == limit {
                     return Err(HostError::BadUTF16.into());
                 }
-                u16_buffer.push(u16::from_le_bytes([lo, hi]));
+                u16_buffer.push(el);
             }
         }
-        String::from_utf16(&u16_buffer).map_err(|_| HostError::BadUTF16.into())
-    }
-
-    // #################
-    // # Registers API #
-    // #################
-
-    /// Writes the entire content from the register `register_id` into the memory of the guest starting with `ptr`.
-    ///
-    /// # Arguments
-    ///
-    /// * `register_id` -- a register id from where to read the data;
-    /// * `ptr` -- location on guest memory where to copy the data.
-    ///
-    /// # Errors
-    ///
-    /// * If the content extends outside the memory allocated to the guest. In Wasmer, it returns `MemoryAccessViolation` error message;
-    /// * If `register_id` is pointing to unused register returns `InvalidRegisterId` error message.
-    ///
-    /// # Undefined Behavior
-    ///
-    /// If the content of register extends outside the preallocated memory on the host side, or the pointer points to a
-    /// wrong location this function will overwrite memory that it is not supposed to overwrite causing an undefined behavior.
-    pub fn read_register(&mut self, register_id: u64, ptr: u64) -> Result<()> {
-        let Self { registers, memory, config, .. } = self;
-        self.gas_counter.pay_base(config.ext_costs.read_register_base)?;
-        let register = registers.get(&register_id).ok_or(HostError::InvalidRegisterId)?;
         self.gas_counter
-            .pay_per_byte(config.ext_costs.read_register_byte, register.len() as u64)?;
-        Self::memory_set(*memory, ptr, register)
-    }
-
-    /// Returns the size of the blob stored in the given register.
-    /// * If register is used, then returns the size, which can potentially be zero;
-    /// * If register is not used, returns `u64::MAX`
-    ///
-    /// # Arguments
-    ///
-    /// * `register_id` -- a register id from where to read the data;
-    pub fn register_len(&mut self, register_id: u64) -> Result<u64> {
-        Ok(self.registers.get(&register_id).map(|r| r.len() as _).unwrap_or(std::u64::MAX))
-    }
-
-    /// Copies `data` into register. If register is unused will initialize it. If register has
-    /// larger capacity than needed for `data` will not re-allocate it. The register will lose
-    /// the pre-existing data if any.
-    ///
-    /// # Arguments
-    ///
-    /// * `register_id` -- a register into which to write the data;
-    /// * `data` -- data to be copied into register.
-    pub fn write_register(&mut self, register_id: u64, data: &[u8]) -> Result<()> {
-        let Self { registers, config, gas_counter, .. } = self;
-        Self::internal_write_register(registers, gas_counter, config, register_id, data)
-    }
-
-    fn internal_write_register(
-        registers: &mut HashMap<u64, Vec<u8>>,
-        gas_counter: &mut GasCounter,
-        config: &VMConfig,
-        register_id: u64,
-        data: &[u8],
-    ) -> Result<()> {
-        gas_counter.pay_base(config.ext_costs.write_register_base)?;
-        gas_counter.pay_per_byte(config.ext_costs.write_register_byte, data.len() as u64)?;
-        if data.len() as u64 > config.max_register_size
-            || registers.len() as u64 == config.max_number_registers
-        {
-            return Err(HostError::MemoryAccessViolation.into());
-        }
-        let register = registers.entry(register_id).or_insert_with(Vec::new);
-        register.clear();
-        register.reserve(data.len());
-        register.extend_from_slice(data);
-
-        // Calculate the new memory usage.
-        let usage: usize =
-            registers.values().map(|v| size_of::<u64>() + v.len() * size_of::<u8>()).sum();
-        if usage as u64 > config.registers_memory_limit {
-            Err(HostError::MemoryAccessViolation.into())
-        } else {
-            Ok(())
-        }
+            .pay_per_byte(utf16_decoding_byte, u16_buffer.len() as u64 * size_of::<u16>() as u64)?;
+        String::from_utf16(&u16_buffer).map_err(|_| HostError::BadUTF16.into())
     }
 
     // ###############
@@ -364,12 +394,17 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If the registers exceed the memory limit returns `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes`
     pub fn current_account_id(&mut self, register_id: u64) -> Result<()> {
-        let Self { context, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.current_account_id)?;
-        let data = context.current_account_id.as_bytes();
-        gas_counter.pay_per_byte(config.ext_costs.signer_account_id_byte, data.len() as u64)?;
-        Self::internal_write_register(registers, gas_counter, config, register_id, data)
+        self.gas_counter.pay_base(base)?;
+
+        self.internal_write_register(
+            register_id,
+            self.context.current_account_id.as_bytes().to_vec(),
+        )
     }
 
     /// All contract calls are a result of some transaction that was signed by some account using
@@ -381,15 +416,20 @@ impl<'a> VMLogic<'a> {
     ///
     /// * If the registers exceed the memory limit returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes`
     pub fn signer_account_id(&mut self, register_id: u64) -> Result<()> {
-        let Self { context, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.signer_account_id)?;
-        if context.is_view {
+        self.gas_counter.pay_base(base)?;
+
+        if self.context.is_view {
             return Err(HostError::ProhibitedInView("signer_account_id".to_string()).into());
         }
-        let data = context.signer_account_id.as_bytes();
-        gas_counter.pay_per_byte(config.ext_costs.signer_account_id_byte, data.len() as u64)?;
-        Self::internal_write_register(registers, gas_counter, config, register_id, data)
+        self.internal_write_register(
+            register_id,
+            self.context.signer_account_id.as_bytes().to_vec(),
+        )
     }
 
     /// Saves the public key fo the access key that was used by the signer into the register. In
@@ -400,15 +440,17 @@ impl<'a> VMLogic<'a> {
     ///
     /// * If the registers exceed the memory limit returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes`
     pub fn signer_account_pk(&mut self, register_id: u64) -> Result<()> {
-        let Self { context, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.signer_account_id)?;
-        if context.is_view {
+        self.gas_counter.pay_base(base)?;
+
+        if self.context.is_view {
             return Err(HostError::ProhibitedInView("signer_account_pk".to_string()).into());
         }
-        let data = context.signer_account_pk.as_slice();
-        gas_counter.pay_per_byte(config.ext_costs.signer_account_pk_byte, data.len() as u64)?;
-        Self::internal_write_register(registers, gas_counter, config, register_id, data)
+        self.internal_write_register(register_id, self.context.signer_account_pk.clone())
     }
 
     /// All contract calls are a result of a receipt, this receipt might be created by a transaction
@@ -419,37 +461,52 @@ impl<'a> VMLogic<'a> {
     ///
     /// * If the registers exceed the memory limit returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes`
     pub fn predecessor_account_id(&mut self, register_id: u64) -> Result<()> {
-        let Self { context, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.predecessor_account_id)?;
-        if context.is_view {
+        self.gas_counter.pay_base(base)?;
+
+        if self.context.is_view {
             return Err(HostError::ProhibitedInView("predecessor_account_id".to_string()).into());
         }
-        let data = context.predecessor_account_id.as_bytes();
-        gas_counter
-            .pay_per_byte(config.ext_costs.predecessor_account_id_byte, data.len() as u64)?;
-        Self::internal_write_register(registers, gas_counter, config, register_id, data)
+        self.internal_write_register(
+            register_id,
+            self.context.predecessor_account_id.as_bytes().to_vec(),
+        )
     }
 
     /// Reads input to the contract call into the register. Input is expected to be in JSON-format.
     /// If input is provided saves the bytes (potentially zero) of input into register. If input is
-    /// not provided makes the register "not used", i.e. `register_len` now returns `u64::MAX`.
+    /// not provided writes 0 bytes into the register.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes`
     pub fn input(&mut self, register_id: u64) -> Result<()> {
-        let Self { context, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.input_base)?;
-        gas_counter.pay_per_byte(config.ext_costs.input_per_byte, context.input.len() as u64)?;
-        Self::internal_write_register(registers, gas_counter, config, register_id, &context.input)
+        self.gas_counter.pay_base(base)?;
+
+        self.internal_write_register(register_id, self.context.input.clone())
     }
 
     /// Returns the current block index.
+    ///
+    /// # Cost
+    ///
+    /// `base`
     pub fn block_index(&mut self) -> Result<u64> {
-        self.gas_counter.pay_base(self.config.ext_costs.block_index)?;
+        self.gas_counter.pay_base(base)?;
         Ok(self.context.block_index)
     }
 
     /// Returns the current block timestamp.
+    ///
+    /// # Cost
+    ///
+    /// `base`
     pub fn block_timestamp(&mut self) -> Result<u64> {
-        self.gas_counter.pay_base(self.config.ext_costs.block_timestamp)?;
+        self.gas_counter.pay_base(base)?;
         Ok(self.context.block_timestamp)
     }
 
@@ -460,8 +517,11 @@ impl<'a> VMLogic<'a> {
     /// * The contract code size
     /// * A small fixed overhead for account metadata.
     ///
+    /// # Cost
+    ///
+    /// `base`
     pub fn storage_usage(&mut self) -> Result<StorageUsage> {
-        self.gas_counter.pay_base(self.config.ext_costs.storage_usage)?;
+        self.gas_counter.pay_base(base)?;
         Ok(self.current_storage_usage)
     }
 
@@ -471,9 +531,14 @@ impl<'a> VMLogic<'a> {
 
     /// The current balance of the given account. This includes the attached_deposit that was
     /// attached to the transaction.
+    ///
+    /// # Cost
+    ///
+    /// `base + memory_write_base + memory_write_size * 16`
     pub fn account_balance(&mut self, balance_ptr: u64) -> Result<()> {
-        self.gas_counter.pay_base(self.config.ext_costs.account_balance)?;
-        Self::memory_set(self.memory, balance_ptr, &self.current_account_balance.to_le_bytes())
+        self.gas_counter.pay_base(base)?;
+
+        self.memory_set_u128(balance_ptr, self.current_account_balance)
     }
 
     /// The balance that was attached to the call that will be immediately deposited before the
@@ -482,12 +547,17 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If called as view function returns `ProhibitedInView``.
+    ///
+    /// # Cost
+    ///
+    /// `base + memory_write_base + memory_write_size * 16`
     pub fn attached_deposit(&mut self, balance_ptr: u64) -> Result<()> {
-        self.gas_counter.pay_base(self.config.ext_costs.attached_deposit)?;
+        self.gas_counter.pay_base(base)?;
+
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("attached_deposit".to_string()).into());
         }
-        Self::memory_set(self.memory, balance_ptr, &self.context.attached_deposit.to_le_bytes())
+        self.memory_set_u128(balance_ptr, self.context.attached_deposit)
     }
 
     /// The amount of gas attached to the call that can be used to pay for the gas fees.
@@ -495,8 +565,12 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base`
     pub fn prepaid_gas(&mut self) -> Result<Gas> {
-        self.gas_counter.pay_base(self.config.ext_costs.prepaid_gas)?;
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("prepaid_gas".to_string()).into());
         }
@@ -508,8 +582,12 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base`
     pub fn used_gas(&mut self) -> Result<Gas> {
-        self.gas_counter.pay_base(self.config.ext_costs.used_gas)?;
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("used_gas".to_string()).into());
         }
@@ -525,20 +603,13 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If the size of the registers exceed the set limit `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes`.
     pub fn random_seed(&mut self, register_id: u64) -> Result<()> {
-        let Self { context, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.random_seed_base)?;
-        gas_counter.pay_per_byte(
-            config.ext_costs.random_seed_per_byte,
-            context.random_seed.len() as u64,
-        )?;
-        Self::internal_write_register(
-            registers,
-            gas_counter,
-            config,
-            register_id,
-            &context.random_seed,
-        )
+        self.gas_counter.pay_base(base)?;
+        self.internal_write_register(register_id, self.context.random_seed.clone())
     }
 
     /// Hashes the random sequence of bytes using sha256 and returns it into `register_id`.
@@ -547,13 +618,16 @@ impl<'a> VMLogic<'a> {
     ///
     /// If `value_len + value_ptr` points outside the memory or the registers use more memory than
     /// the limit with `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + write_register_base + write_register_byte * num_bytes + sha256_base + sha256_byte * num_bytes`
     pub fn sha256(&mut self, value_len: u64, value_ptr: u64, register_id: u64) -> Result<()> {
-        let Self { memory, registers, gas_counter, config, ext, .. } = self;
-        gas_counter.pay_base(config.ext_costs.sha256)?;
-        let value = Self::get_from_memory_or_register(*memory, registers, value_ptr, value_len)?;
-        gas_counter.pay_per_byte(config.ext_costs.sha256_byte, value.len() as u64)?;
-        let value_hash = ext.sha256(&value)?;
-        Self::internal_write_register(registers, gas_counter, config, register_id, &value_hash)
+        self.gas_counter.pay_base(sha256_base)?;
+        let value = self.get_vec_from_memory_or_register(value_ptr, value_len)?;
+        self.gas_counter.pay_per_byte(sha256_byte, value.len() as u64)?;
+        let value_hash = self.ext.sha256(&value)?;
+        self.internal_write_register(register_id, value_hash)
     }
 
     /// Called by gas metering injected into Wasm. Counts both towards `burnt_gas` and `used_gas`.
@@ -576,6 +650,15 @@ impl<'a> VMLogic<'a> {
     /// * `sir`: whether contract call is addressed to itself;
     /// * `data_dependencies`: other contracts that this execution will be waiting on (or rather
     ///   their data receipts), where bool indicates whether this is sender=receiver communication.
+    ///
+    /// # Cost
+    ///
+    /// This is a convenience function that encapsulates several costs:
+    /// `burnt_gas := dispatch cost of the receipt + base dispatch cost  cost of the data receipt`
+    /// `used_gas := burnt_gas + exec cost of the receipt + base exec cost  cost of the data receipt`
+    /// Notice that we prepay all base cost upon the creation of the data dependency, we are going to
+    /// pay for the content transmitted through the dependency upon the actual creation of the
+    /// DataReceipt.
     fn pay_gas_for_new_receipt(&mut self, sir: bool, data_dependencies: &[bool]) -> Result<()> {
         let fees_config_cfg = &self.fees_config;
         let mut burn_gas = fees_config_cfg.action_receipt_creation_config.send_fee(sir);
@@ -615,6 +698,11 @@ impl<'a> VMLogic<'a> {
     ///
     /// Index of the new promise that uniquely identifies it within the current execution of the
     /// method.
+    ///
+    /// # Cost
+    ///
+    /// Since `promise_create` is a convenience wrapper around `promise_batch_create` and
+    /// `promise_batch_action_function_call`. This also means it charges `base` cost twice.
     pub fn promise_create(
         &mut self,
         account_id_len: u64,
@@ -653,6 +741,11 @@ impl<'a> VMLogic<'a> {
     ///
     /// Index of the new promise that uniquely identifies it within the current execution of the
     /// method.
+    ///
+    /// # Cost
+    ///
+    /// Since `promise_create` is a convenience wrapper around `promise_batch_then` and
+    /// `promise_batch_action_function_call`. This also means it charges `base` cost twice.
     pub fn promise_then(
         &mut self,
         promise_idx: u64,
@@ -696,21 +789,28 @@ impl<'a> VMLogic<'a> {
     ///
     /// Index of the new promise that uniquely identifies it within the current execution of the
     /// method.
+    ///
+    /// # Cost
+    ///
+    /// `base + promise_and_base + promise_and_per_promise * num_promises + cost of reading promise ids from memory`.
     pub fn promise_and(
         &mut self,
         promise_idx_ptr: u64,
         promise_idx_count: u64,
     ) -> Result<PromiseIndex> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("promise_and".to_string()).into());
         }
-        self.gas_counter.pay_base(self.config.ext_costs.promise_and_base)?;
+        self.gas_counter.pay_base(promise_and_base)?;
         self.gas_counter.pay_per_byte(
-            self.config.ext_costs.promise_and_per_promise,
-            promise_idx_count * size_of::<u64>() as u64,
+            promise_and_per_promise,
+            promise_idx_count
+                .checked_mul(size_of::<u64>() as u64)
+                .ok_or(HostError::IntegerOverflow)?,
         )?;
-        let promise_indices =
-            Self::memory_get_array_u64(self.memory, promise_idx_ptr, promise_idx_count)?;
+
+        let promise_indices = self.memory_get_vec_u64(promise_idx_ptr, promise_idx_count)?;
 
         let mut receipt_dependencies = vec![];
         for promise_idx in &promise_indices {
@@ -744,15 +844,20 @@ impl<'a> VMLogic<'a> {
     ///
     /// Index of the new promise that uniquely identifies it within the current execution of the
     /// method.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + cost of reading and decoding the account id + dispatch cost of the receipt`.
+    /// `used_gas := burnt_gas + exec cost of the receipt`.
     pub fn promise_batch_create(
         &mut self,
         account_id_len: u64,
         account_id_ptr: u64,
     ) -> Result<u64> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("promise_batch_create".to_string()).into());
         }
-
         let account_id = self.read_and_parse_account_id(account_id_ptr, account_id_len)?;
         let sir = account_id == self.context.current_account_id;
         self.pay_gas_for_new_receipt(sir, &[])?;
@@ -779,12 +884,18 @@ impl<'a> VMLogic<'a> {
     ///
     /// Index of the new promise that uniquely identifies it within the current execution of the
     /// method.
+    ///
+    /// # Cost
+    ///
+    /// `base + cost of reading and decoding the account id + dispatch&execution cost of the receipt
+    ///  + dispatch&execution base cost for each data dependency`
     pub fn promise_batch_then(
         &mut self,
         promise_idx: u64,
         account_id_len: u64,
         account_id_ptr: u64,
     ) -> Result<u64> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("promise_batch_then".to_string()).into());
         }
@@ -848,7 +959,13 @@ impl<'a> VMLogic<'a> {
     /// * If the promise pointed by the `promise_idx` is an ephemeral promise created by
     /// `promise_and` returns `CannotAppendActionToJointPromise`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action fee`
+    /// `used_gas := burnt_gas + exec action fee`
     pub fn promise_batch_action_create_account(&mut self, promise_idx: u64) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView(
                 "promise_batch_action_create_account".to_string(),
@@ -875,20 +992,25 @@ impl<'a> VMLogic<'a> {
     /// * If `code_len + code_ptr` points outside the memory of the guest or host returns
     /// `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading vector from memory `
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_deploy_contract(
         &mut self,
         promise_idx: u64,
         code_len: u64,
         code_ptr: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView(
                 "promise_batch_action_deploy_contract".to_string(),
             )
             .into());
         }
-        let code =
-            Self::get_from_memory_or_register(self.memory, &self.registers, code_ptr, code_len)?;
+        let code = self.get_vec_from_memory_or_register(code_ptr, code_len)?;
 
         let (receipt_idx, sir) = self.promise_idx_to_receipt_idx_with_sir(promise_idx)?;
 
@@ -917,6 +1039,12 @@ impl<'a> VMLogic<'a> {
     /// `amount_ptr + 16` points outside the memory of the guest or host returns
     /// `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading vector from memory
+    ///  + cost of reading u128, method_name and arguments from the memory`
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_function_call(
         &mut self,
         promise_idx: u64,
@@ -927,28 +1055,19 @@ impl<'a> VMLogic<'a> {
         amount_ptr: u64,
         gas: Gas,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView(
                 "promise_batch_action_function_call".to_string(),
             )
             .into());
         }
-        let amount = Self::memory_get_u128(self.memory, amount_ptr)?;
-        let method_name = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            method_name_ptr,
-            method_name_len,
-        )?;
+        let amount = self.memory_get_u128(amount_ptr)?;
+        let method_name = self.get_vec_from_memory_or_register(method_name_ptr, method_name_len)?;
         if method_name.is_empty() {
             return Err(HostError::EmptyMethodName.into());
         }
-        let arguments = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            arguments_ptr,
-            arguments_len,
-        )?;
+        let arguments = self.get_vec_from_memory_or_register(arguments_ptr, arguments_len)?;
 
         let (receipt_idx, sir) = self.promise_idx_to_receipt_idx_with_sir(promise_idx)?;
 
@@ -980,17 +1099,23 @@ impl<'a> VMLogic<'a> {
     /// * If `amount_ptr + 16` points outside the memory of the guest or host returns
     /// `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading u128 from memory `
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_transfer(
         &mut self,
         promise_idx: u64,
         amount_ptr: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(
                 HostError::ProhibitedInView("promise_batch_action_transfer".to_string()).into()
             );
         }
-        let amount = Self::memory_get_u128(self.memory, amount_ptr)?;
+        let amount = self.memory_get_u128(amount_ptr)?;
 
         let (receipt_idx, sir) = self.promise_idx_to_receipt_idx_with_sir(promise_idx)?;
 
@@ -1015,6 +1140,11 @@ impl<'a> VMLogic<'a> {
     /// * If `amount_ptr + 16` or `public_key_len + public_key_ptr` points outside the memory of the
     /// guest or host returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading public key from memory `
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_stake(
         &mut self,
         promise_idx: u64,
@@ -1022,18 +1152,14 @@ impl<'a> VMLogic<'a> {
         public_key_len: u64,
         public_key_ptr: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(
                 HostError::ProhibitedInView("promise_batch_action_stake".to_string()).into()
             );
         }
-        let amount = Self::memory_get_u128(self.memory, amount_ptr)?;
-        let public_key = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            public_key_ptr,
-            public_key_len,
-        )?;
+        let amount = self.memory_get_u128(amount_ptr)?;
+        let public_key = self.get_vec_from_memory_or_register(public_key_ptr, public_key_len)?;
 
         let (receipt_idx, sir) = self.promise_idx_to_receipt_idx_with_sir(promise_idx)?;
 
@@ -1058,6 +1184,11 @@ impl<'a> VMLogic<'a> {
     /// * If `public_key_len + public_key_ptr` points outside the memory of the guest or host
     /// returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading public key from memory `
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_add_key_with_full_access(
         &mut self,
         promise_idx: u64,
@@ -1065,18 +1196,14 @@ impl<'a> VMLogic<'a> {
         public_key_ptr: u64,
         nonce: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView(
                 "promise_batch_action_add_key_with_full_access".to_string(),
             )
             .into());
         }
-        let public_key = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            public_key_ptr,
-            public_key_len,
-        )?;
+        let public_key = self.get_vec_from_memory_or_register(public_key_ptr, public_key_len)?;
 
         let (receipt_idx, sir) = self.promise_idx_to_receipt_idx_with_sir(promise_idx)?;
 
@@ -1102,6 +1229,12 @@ impl<'a> VMLogic<'a> {
     /// `receiver_id_len + receiver_id_ptr` or `method_names_len + method_names_ptr` points outside
     /// the memory of the guest or host returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading vector from memory
+    ///  + cost of reading u128, method_names and public key from the memory + cost of reading and parsing account name`
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_add_key_with_function_call(
         &mut self,
         promise_idx: u64,
@@ -1114,27 +1247,19 @@ impl<'a> VMLogic<'a> {
         method_names_len: u64,
         method_names_ptr: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView(
                 "promise_batch_action_add_key_with_function_call".to_string(),
             )
             .into());
         }
-        let public_key = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            public_key_ptr,
-            public_key_len,
-        )?;
-        let allowance = Self::memory_get_u128(self.memory, allowance_ptr)?;
+        let public_key = self.get_vec_from_memory_or_register(public_key_ptr, public_key_len)?;
+        let allowance = self.memory_get_u128(allowance_ptr)?;
         let allowance = if allowance > 0 { Some(allowance) } else { None };
         let receiver_id = self.read_and_parse_account_id(receiver_id_ptr, receiver_id_len)?;
-        let method_names = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            method_names_ptr,
-            method_names_len,
-        )?;
+        let method_names =
+            self.get_vec_from_memory_or_register(method_names_ptr, method_names_len)?;
         // Use `,` separator to split `method_names` into a vector of method names.
         let method_names =
             method_names
@@ -1185,23 +1310,24 @@ impl<'a> VMLogic<'a> {
     /// * If `public_key_len + public_key_ptr` points outside the memory of the guest or host
     /// returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading public key from memory `
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_delete_key(
         &mut self,
         promise_idx: u64,
         public_key_len: u64,
         public_key_ptr: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(
                 HostError::ProhibitedInView("promise_batch_action_delete_key".to_string()).into()
             );
         }
-        let public_key = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            public_key_ptr,
-            public_key_len,
-        )?;
+        let public_key = self.get_vec_from_memory_or_register(public_key_ptr, public_key_len)?;
 
         let (receipt_idx, sir) = self.promise_idx_to_receipt_idx_with_sir(promise_idx)?;
 
@@ -1223,12 +1349,18 @@ impl<'a> VMLogic<'a> {
     /// * If `beneficiary_id_len + beneficiary_id_ptr` points outside the memory of the guest or
     /// host returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `burnt_gas := base + dispatch action base fee + dispatch action per byte fee * num bytes + cost of reading and parsing account id from memory `
+    /// `used_gas := burnt_gas + exec action base fee + exec action per byte fee * num bytes`
     pub fn promise_batch_action_delete_account(
         &mut self,
         promise_idx: u64,
         beneficiary_id_len: u64,
         beneficiary_id_ptr: u64,
     ) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView(
                 "promise_batch_action_delete_account".to_string(),
@@ -1257,7 +1389,12 @@ impl<'a> VMLogic<'a> {
     /// * If there is only one callback returns `1`;
     /// * If there are multiple callbacks (e.g. created through `promise_and`) returns their number;
     /// * If the function was called not through the callback returns `0`.
-    pub fn promise_results_count(&self) -> Result<u64> {
+    ///
+    /// # Cost
+    ///
+    /// `base`
+    pub fn promise_results_count(&mut self) -> Result<u64> {
+        self.gas_counter.pay_base(base)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("promise_results_count".to_string()).into());
         }
@@ -1282,21 +1419,23 @@ impl<'a> VMLogic<'a> {
     /// * If `result_id` does not correspond to an existing result returns `InvalidResultIndex`;
     /// * If copying the blob exhausts the memory limit it returns `MemoryAccessViolation`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base + cost of writing data into a register`
     pub fn promise_result(&mut self, result_idx: u64, register_id: u64) -> Result<u64> {
-        let Self { promise_results, registers, gas_counter, config, context, .. } = self;
-        if context.is_view {
+        self.gas_counter.pay_base(base)?;
+        if self.context.is_view {
             return Err(HostError::ProhibitedInView("promise_result".to_string()).into());
         }
-        gas_counter.pay_base(config.ext_costs.promise_result_base)?;
-        match promise_results
+        match self
+            .promise_results
             .get(result_idx as usize)
             .ok_or(HostError::InvalidPromiseResultIndex)?
         {
             PromiseResult::NotReady => Ok(0),
             PromiseResult::Successful(data) => {
-                gas_counter
-                    .pay_per_byte(config.ext_costs.promise_result_byte, data.len() as u64)?;
-                Self::internal_write_register(registers, gas_counter, config, register_id, data)?;
+                self.internal_write_register(register_id, data.clone())?;
                 Ok(1)
             }
             PromiseResult::Failed => Ok(2),
@@ -1310,11 +1449,16 @@ impl<'a> VMLogic<'a> {
     ///
     /// * If `promise_idx` does not correspond to an existing promise returns `InvalidPromiseIndex`.
     /// * If called as view function returns `ProhibitedInView`.
+    ///
+    /// # Cost
+    ///
+    /// `base + promise_return`
     pub fn promise_return(&mut self, promise_idx: u64) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(promise_return)?;
         if self.context.is_view {
             return Err(HostError::ProhibitedInView("promise_return".to_string()).into());
         }
-        self.gas_counter.pay_base(self.config.ext_costs.promise_return)?;
         match self
             .promises
             .get(promise_idx as usize)
@@ -1339,9 +1483,12 @@ impl<'a> VMLogic<'a> {
     ///
     /// If `value_len + value_ptr` exceeds the memory container or points to an unused register it
     /// returns `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    /// `base + cost of reading return value from memory or register + dispatch&exec cost per byte of the data sent * num data receivers`
     pub fn value_return(&mut self, value_len: u64, value_ptr: u64) -> Result<()> {
-        let return_val =
-            Self::get_from_memory_or_register(self.memory, &self.registers, value_ptr, value_len)?;
+        self.gas_counter.pay_base(base)?;
+        let return_val = self.get_vec_from_memory_or_register(value_ptr, value_len)?;
         let mut burn_gas: Gas = 0;
         let num_bytes = return_val.len() as u64;
         let data_cfg = &self.fees_config.data_receipt_creation_config;
@@ -1372,7 +1519,12 @@ impl<'a> VMLogic<'a> {
     }
 
     /// Terminates the execution of the program with panic `GuestPanic`.
-    pub fn panic(&self) -> Result<()> {
+    ///
+    /// # Cost
+    ///
+    /// `base`
+    pub fn panic(&mut self) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         Err(HostError::GuestPanic("explicit guest panic".to_string()).into())
     }
 
@@ -1384,7 +1536,11 @@ impl<'a> VMLogic<'a> {
     /// * If string extends outside the memory of the guest with `MemoryAccessViolation`;
     /// * If string is not UTF-8 returns `BadUtf8`.
     /// * If string is longer than `max_log_len` returns `BadUtf8`.
+    ///
+    /// # Cost
+    /// `base + cost of reading and decoding a utf8 string`
     pub fn panic_utf8(&mut self, len: u64, ptr: u64) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         Err(HostError::GuestPanic(self.get_utf8_string(len, ptr)?).into())
     }
 
@@ -1396,9 +1552,16 @@ impl<'a> VMLogic<'a> {
     /// * If string extends outside the memory of the guest with `MemoryAccessViolation`;
     /// * If string is not UTF-8 returns `BadUtf8`.
     /// * If string is longer than `max_log_len` returns `BadUtf8`.
+    ///
+    /// # Cost
+    ///
+    /// `base + log_base + log_byte + num_bytes + utf8 decoding cost`
     pub fn log_utf8(&mut self, len: u64, ptr: u64) -> Result<()> {
-        let message = format!("LOG: {}", self.get_utf8_string(len, ptr)?);
-        self.logs.push(message);
+        self.gas_counter.pay_base(base)?;
+        let message = self.get_utf8_string(len, ptr)?;
+        self.gas_counter.pay_base(log_base)?;
+        self.gas_counter.pay_per_byte(log_byte, message.as_bytes().len() as u64)?;
+        self.logs.push(format!("LOG: {}", message));
         Ok(())
     }
 
@@ -1409,37 +1572,67 @@ impl<'a> VMLogic<'a> {
     ///
     /// * If string extends outside the memory of the guest with `MemoryAccessViolation`;
     /// * If string is not UTF-16 returns `BadUtf16`.
+    ///
+    /// # Cost
+    ///
+    /// `base + log_base + log_byte * num_bytes + utf16 decoding cost`
     pub fn log_utf16(&mut self, len: u64, ptr: u64) -> Result<()> {
-        let message = format!("LOG: {}", self.get_utf16_string(len, ptr)?);
-        self.logs.push(message);
+        self.gas_counter.pay_base(base)?;
+        let message = self.get_utf16_string(len, ptr)?;
+        self.gas_counter.pay_base(log_base)?;
+        self.gas_counter.pay_per_byte(
+            log_byte,
+            message.encode_utf16().count() as u64 * size_of::<u16>() as u64,
+        )?;
+        self.logs.push(format!("LOG: {}", message));
         Ok(())
     }
 
     /// Special import kept for compatibility with AssemblyScript contracts. Not called by smart
     /// contracts directly, but instead called by the code generated by AssemblyScript.
+    ///
+    /// # Cost
+    ///
+    /// `base +  log_base + log_byte * num_bytes + utf16 decoding cost`
     pub fn abort(&mut self, msg_ptr: u32, filename_ptr: u32, line: u32, col: u32) -> Result<()> {
+        self.gas_counter.pay_base(base)?;
         if msg_ptr < 4 || filename_ptr < 4 {
             return Err(HostError::BadUTF16.into());
         }
-        let msg_len = Self::read_memory_u32(self.memory, (msg_ptr - 4) as u64)?;
-        let filename_len = Self::read_memory_u32(self.memory, (filename_ptr - 4) as u64)?;
+
+        let msg_len = self.memory_get_u32((msg_ptr - 4) as u64)?;
+        let filename_len = self.memory_get_u32((filename_ptr - 4) as u64)?;
 
         let msg = self.get_utf16_string(msg_len as u64, msg_ptr as u64)?;
         let filename = self.get_utf16_string(filename_len as u64, filename_ptr as u64)?;
 
         let message = format!("{}, filename: \"{}\" line: {} col: {}", msg, filename, line, col);
+        self.gas_counter.pay_base(log_base)?;
+        self.gas_counter.pay_per_byte(log_byte, message.as_bytes().len() as u64)?;
         self.logs.push(format!("ABORT: {}", message));
 
         Err(HostError::GuestPanic(message).into())
     }
 
+    // ###############
+    // # Storage API #
+    // ###############
+
     /// Reads account id from the given location in memory.
     ///
     /// # Errors
     ///
     /// * If account is not UTF-8 encoded then returns `BadUtf8`;
-    pub fn read_and_parse_account_id(&self, ptr: u64, len: u64) -> Result<AccountId> {
-        let buf = Self::get_from_memory_or_register(self.memory, &self.registers, ptr, len)?;
+    ///
+    /// # Cost
+    ///
+    /// This is a helper function that encapsulates the following costs:
+    /// cost of reading buffer from register or memory,
+    /// `utf8_decoding_base + utf8_decoding_byte * num_bytes`.
+    fn read_and_parse_account_id(&mut self, ptr: u64, len: u64) -> Result<AccountId> {
+        let buf = self.get_vec_from_memory_or_register(ptr, len)?;
+        self.gas_counter.pay_base(utf8_decoding_base)?;
+        self.gas_counter.pay_per_byte(utf8_decoding_byte, buf.len() as u64)?;
         let account_id = AccountId::from_utf8(buf).map_err(|_| HostError::BadUTF8)?;
         Ok(account_id)
     }
@@ -1454,6 +1647,13 @@ impl<'a> VMLogic<'a> {
     ///   to an unused register it returns `MemoryAccessViolation`;
     /// * If returning the preempted value into the registers exceed the memory container it returns
     ///   `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_write_base + storage_write_key_byte * num_key_bytes + storage_write_value_byte * num_value_bytes
+    /// + get_vec_from_memory_or_register_cost x 2`.
+    ///
+    /// If a value was evicted it costs additional `storage_write_value_evicted_byte * num_evicted_bytes + internal_write_register_cost`.
     pub fn storage_write(
         &mut self,
         key_len: u64,
@@ -1462,42 +1662,28 @@ impl<'a> VMLogic<'a> {
         value_ptr: u64,
         register_id: u64,
     ) -> Result<u64> {
-        let Self {
-            memory,
-            registers,
-            gas_counter,
-            config,
-            fees_config,
-            valid_iterators,
-            invalid_iterators,
-            ext,
-            ..
-        } = self;
-        gas_counter.pay_base(config.ext_costs.storage_write_base)?;
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(storage_write_base)?;
         // All iterators that were valid now become invalid
-        for invalidated_iter_idx in valid_iterators.drain() {
-            ext.storage_iter_drop(invalidated_iter_idx)?;
-            invalid_iterators.insert(invalidated_iter_idx);
+        for invalidated_iter_idx in self.valid_iterators.drain() {
+            self.ext.storage_iter_drop(invalidated_iter_idx)?;
+            self.invalid_iterators.insert(invalidated_iter_idx);
         }
-        let key = Self::get_from_memory_or_register(*memory, registers, key_ptr, key_len)?;
-        let value = Self::get_from_memory_or_register(*memory, registers, value_ptr, value_len)?;
-        gas_counter.pay_per_byte(config.ext_costs.storage_write_value_byte, key.len() as u64)?;
-        gas_counter.pay_per_byte(config.ext_costs.storage_write_value_byte, value.len() as u64)?;
+        let key = self.get_vec_from_memory_or_register(key_ptr, key_len)?;
+        let value = self.get_vec_from_memory_or_register(value_ptr, value_len)?;
+        self.gas_counter.pay_per_byte(storage_write_key_byte, key.len() as u64)?;
+        self.gas_counter.pay_per_byte(storage_write_value_byte, value.len() as u64)?;
         let evicted = self.ext.storage_set(&key, &value)?;
-        let storage_config = &fees_config.storage_usage_config;
+        let storage_config = &self.fees_config.storage_usage_config;
         match evicted {
             Some(old_value) => {
                 self.current_storage_usage -=
                     (old_value.len() as u64) * storage_config.value_cost_per_byte;
                 self.current_storage_usage +=
                     value.len() as u64 * storage_config.value_cost_per_byte;
-                Self::internal_write_register(
-                    registers,
-                    gas_counter,
-                    config,
-                    register_id,
-                    &old_value,
-                )?;
+                self.gas_counter
+                    .pay_per_byte(storage_write_evicted_byte, old_value.len() as u64)?;
+                self.internal_write_register(register_id, old_value)?;
                 Ok(1)
             }
             None => {
@@ -1521,17 +1707,25 @@ impl<'a> VMLogic<'a> {
     ///   returns `MemoryAccessViolation`;
     /// * If returning the preempted value into the registers exceed the memory container it returns
     ///   `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_read_base + storage_read_key_byte * num_key_bytes + storage_read_value_byte + num_value_bytes
+    ///  cost to read key from register + cost to write value into register`.
     pub fn storage_read(&mut self, key_len: u64, key_ptr: u64, register_id: u64) -> Result<u64> {
-        let Self { ext, memory, registers, gas_counter, config, .. } = self;
-        gas_counter.pay_base(config.ext_costs.storage_read_base)?;
-        let key = Self::get_from_memory_or_register(*memory, registers, key_ptr, key_len)?;
-        gas_counter.pay_per_byte(config.ext_costs.storage_read_key_byte, key.len() as u64)?;
-        let read = ext.storage_get(&key)?;
-        match read {
+        self.gas_counter.pay_base(base)?;
+
+        self.gas_counter.pay_base(storage_read_base)?;
+        let key = self.get_vec_from_memory_or_register(key_ptr, key_len)?;
+        self.gas_counter.pay_per_byte(storage_read_key_byte, key.len() as u64)?;
+        let nodes_before = self.ext.get_touched_nodes_count();
+        let read = self.ext.storage_get(&key);
+        self.gas_counter
+            .pay_per_byte(touching_trie_node, self.ext.get_touched_nodes_count() - nodes_before)?;
+        match read? {
             Some(value) => {
-                gas_counter
-                    .pay_per_byte(config.ext_costs.storage_read_key_byte, value.len() as u64)?;
-                Self::internal_write_register(registers, gas_counter, config, register_id, &value)?;
+                self.gas_counter.pay_per_byte(storage_read_value_byte, value.len() as u64)?;
+                self.internal_write_register(register_id, value)?;
                 Ok(1)
             }
             None => Ok(0),
@@ -1550,40 +1744,35 @@ impl<'a> VMLogic<'a> {
     /// * If the registers exceed the memory limit returns `MemoryAccessViolation`;
     /// * If returning the preempted value into the registers exceed the memory container it returns
     ///   `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_remove_base + storage_remove_key_byte * num_key_bytes + storage_remove_ret_value_byte * num_value_bytes
+    /// + cost to read the key + cost to write the value`.
     pub fn storage_remove(&mut self, key_len: u64, key_ptr: u64, register_id: u64) -> Result<u64> {
-        let Self {
-            ext,
-            memory,
-            registers,
-            gas_counter,
-            config,
-            fees_config,
-            valid_iterators,
-            invalid_iterators,
-            ..
-        } = self;
-        gas_counter.pay_base(config.ext_costs.storage_remove_base)?;
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(storage_remove_base)?;
         // All iterators that were valid now become invalid
-        for invalidated_iter_idx in valid_iterators.drain() {
-            ext.storage_iter_drop(invalidated_iter_idx)?;
-            invalid_iterators.insert(invalidated_iter_idx);
+        for invalidated_iter_idx in self.valid_iterators.drain() {
+            self.ext.storage_iter_drop(invalidated_iter_idx)?;
+            self.invalid_iterators.insert(invalidated_iter_idx);
         }
-        let key = Self::get_from_memory_or_register(*memory, registers, key_ptr, key_len)?;
+        let key = self.get_vec_from_memory_or_register(key_ptr, key_len)?;
 
-        gas_counter.pay_per_byte(config.ext_costs.storage_remove_key_byte, key.len() as u64)?;
-        let removed = ext.storage_remove(&key)?;
-        let storage_config = &fees_config.storage_usage_config;
-        match removed {
+        self.gas_counter.pay_per_byte(storage_remove_key_byte, key.len() as u64)?;
+        let nodes_before = self.ext.get_touched_nodes_count();
+        let removed = self.ext.storage_remove(&key);
+        self.gas_counter
+            .pay_per_byte(touching_trie_node, self.ext.get_touched_nodes_count() - nodes_before)?;
+        let storage_config = &self.fees_config.storage_usage_config;
+        match removed? {
             Some(value) => {
-                gas_counter.pay_per_byte(
-                    config.ext_costs.storage_remove_ret_value_byte,
-                    value.len() as u64,
-                )?;
+                self.gas_counter.pay_per_byte(storage_remove_ret_value_byte, value.len() as u64)?;
                 self.current_storage_usage -=
                     (value.len() as u64) * storage_config.value_cost_per_byte;
                 self.current_storage_usage -= key.len() as u64 * storage_config.key_cost_per_byte;
                 self.current_storage_usage -= storage_config.data_record_cost;
-                Self::internal_write_register(registers, gas_counter, config, register_id, &value)?;
+                self.internal_write_register(register_id, value)?;
                 Ok(1)
             }
             None => Ok(0),
@@ -1597,14 +1786,20 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If `key_len + key_ptr` exceeds the memory container it returns `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_has_key_base + storage_has_key_byte * num_bytes + cost of reading key`
     pub fn storage_has_key(&mut self, key_len: u64, key_ptr: u64) -> Result<u64> {
-        self.gas_counter.pay_base(self.config.ext_costs.storage_has_key_base)?;
-        let key =
-            Self::get_from_memory_or_register(self.memory, &self.registers, key_ptr, key_len)?;
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(storage_has_key_base)?;
+        let key = self.get_vec_from_memory_or_register(key_ptr, key_len)?;
+        self.gas_counter.pay_per_byte(storage_has_key_byte, key.len() as u64)?;
+        let nodes_before = self.ext.get_touched_nodes_count();
+        let res = self.ext.storage_has_key(&key);
         self.gas_counter
-            .pay_per_byte(self.config.ext_costs.storage_has_key_byte, key.len() as u64)?;
-        let res = self.ext.storage_has_key(&key)?;
-        Ok(res as u64)
+            .pay_per_byte(touching_trie_node, self.ext.get_touched_nodes_count() - nodes_before)?;
+        Ok(res? as u64)
     }
 
     /// Creates an iterator object inside the host. Returns the identifier that uniquely
@@ -1616,19 +1811,22 @@ impl<'a> VMLogic<'a> {
     /// # Errors
     ///
     /// If `prefix_len + prefix_ptr` exceeds the memory container it returns `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_iter_create_prefix_base + storage_iter_create_key_byte * num_prefix_bytes
+    ///  cost of reading the prefix`.
     pub fn storage_iter_prefix(&mut self, prefix_len: u64, prefix_ptr: u64) -> Result<u64> {
-        self.gas_counter.pay_base(self.config.ext_costs.storage_iter_create_prefix_base)?;
-        let prefix = Self::get_from_memory_or_register(
-            self.memory,
-            &self.registers,
-            prefix_ptr,
-            prefix_len,
-        )?;
-        self.gas_counter.pay_per_byte(
-            self.config.ext_costs.storage_iter_create_key_byte,
-            prefix.len() as u64,
-        )?;
-        let iterator_index = self.ext.storage_iter(&prefix)?;
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(storage_iter_create_prefix_base)?;
+
+        let prefix = self.get_vec_from_memory_or_register(prefix_ptr, prefix_len)?;
+        self.gas_counter.pay_per_byte(storage_iter_create_prefix_byte, prefix.len() as u64)?;
+        let nodes_before = self.ext.get_touched_nodes_count();
+        let iterator_index = self.ext.storage_iter(&prefix);
+        self.gas_counter
+            .pay_per_byte(touching_trie_node, self.ext.get_touched_nodes_count() - nodes_before)?;
+        let iterator_index = iterator_index?;
         self.valid_iterators.insert(iterator_index);
         Ok(iterator_index)
     }
@@ -1642,6 +1840,11 @@ impl<'a> VMLogic<'a> {
     ///
     /// If `start_len + start_ptr` or `end_len + end_ptr` exceeds the memory container or points to
     /// an unused register it returns `MemoryAccessViolation`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_iter_create_range_base + storage_iter_create_from_byte * num_from_bytes
+    ///  + storage_iter_create_to_byte * num_to_bytes + reading from prefix + reading to prefix`.
     pub fn storage_iter_range(
         &mut self,
         start_len: u64,
@@ -1649,20 +1852,18 @@ impl<'a> VMLogic<'a> {
         end_len: u64,
         end_ptr: u64,
     ) -> Result<u64> {
-        self.gas_counter.pay_base(self.config.ext_costs.storage_iter_create_range_base)?;
-        let start_key =
-            Self::get_from_memory_or_register(self.memory, &self.registers, start_ptr, start_len)?;
-        let end_key =
-            Self::get_from_memory_or_register(self.memory, &self.registers, end_ptr, end_len)?;
-        self.gas_counter.pay_per_byte(
-            self.config.ext_costs.storage_iter_create_key_byte,
-            start_key.len() as u64,
-        )?;
-        self.gas_counter.pay_per_byte(
-            self.config.ext_costs.storage_iter_create_key_byte,
-            end_key.len() as u64,
-        )?;
-        let iterator_index = self.ext.storage_iter_range(&start_key, &end_key)?;
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(storage_iter_create_range_base)?;
+        let start_key = self.get_vec_from_memory_or_register(start_ptr, start_len)?;
+        let end_key = self.get_vec_from_memory_or_register(end_ptr, end_len)?;
+        self.gas_counter.pay_per_byte(storage_iter_create_from_byte, start_key.len() as u64)?;
+        self.gas_counter.pay_per_byte(storage_iter_create_to_byte, end_key.len() as u64)?;
+
+        let nodes_before = self.ext.get_touched_nodes_count();
+        let iterator_index = self.ext.storage_iter_range(&start_key, &end_key);
+        self.gas_counter
+            .pay_per_byte(touching_trie_node, self.ext.get_touched_nodes_count() - nodes_before)?;
+        let iterator_index = iterator_index?;
         self.valid_iterators.insert(iterator_index);
         Ok(iterator_index)
     }
@@ -1689,45 +1890,35 @@ impl<'a> VMLogic<'a> {
     ///     * Iterator was not called `next` yet.
     ///     * `next` was already called on the iterator and it is currently pointing at the key
     ///       `curr` such that `curr<=key<end`.
+    ///
+    /// # Cost
+    ///
+    /// `base + storage_iter_next_base + storage_iter_next_key_byte * num_key_bytes + storage_iter_next_value_byte * num_value_bytes
+    ///  + writing key to register + writing value to register`.
     pub fn storage_iter_next(
         &mut self,
         iterator_id: u64,
         key_register_id: u64,
         value_register_id: u64,
     ) -> Result<u64> {
-        let Self {
-            ext, registers, gas_counter, config, valid_iterators, invalid_iterators, ..
-        } = self;
-        gas_counter.pay_base(config.ext_costs.storage_iter_next_base)?;
-        if invalid_iterators.contains(&iterator_id) {
+        self.gas_counter.pay_base(base)?;
+        self.gas_counter.pay_base(storage_iter_next_base)?;
+        if self.invalid_iterators.contains(&iterator_id) {
             return Err(HostError::IteratorWasInvalidated.into());
-        } else if !valid_iterators.contains(&iterator_id) {
+        } else if !self.valid_iterators.contains(&iterator_id) {
             return Err(HostError::InvalidIteratorIndex.into());
         }
 
-        let value = ext.storage_iter_next(iterator_id)?;
-        match value {
+        let nodes_before = self.ext.get_touched_nodes_count();
+        let value = self.ext.storage_iter_next(iterator_id);
+        self.gas_counter
+            .pay_per_byte(touching_trie_node, self.ext.get_touched_nodes_count() - nodes_before)?;
+        match value? {
             Some((key, value)) => {
-                gas_counter
-                    .pay_per_byte(config.ext_costs.storage_iter_next_key_byte, key.len() as u64)?;
-                gas_counter.pay_per_byte(
-                    config.ext_costs.storage_iter_next_value_byte,
-                    value.len() as u64,
-                )?;
-                Self::internal_write_register(
-                    registers,
-                    gas_counter,
-                    config,
-                    key_register_id,
-                    &key,
-                )?;
-                Self::internal_write_register(
-                    registers,
-                    gas_counter,
-                    config,
-                    value_register_id,
-                    &value,
-                )?;
+                self.gas_counter.pay_per_byte(storage_iter_next_key_byte, key.len() as u64)?;
+                self.gas_counter.pay_per_byte(storage_iter_next_value_byte, value.len() as u64)?;
+                self.internal_write_register(key_register_id, key)?;
+                self.internal_write_register(value_register_id, value)?;
                 Ok(1)
             }
             None => Ok(0),
diff --git a/runtime/near-vm-logic/src/mocks/mock_external.rs b/runtime/near-vm-logic/src/mocks/mock_external.rs
index 344620307d1..a2188d6a012 100644
--- a/runtime/near-vm-logic/src/mocks/mock_external.rs
+++ b/runtime/near-vm-logic/src/mocks/mock_external.rs
@@ -230,6 +230,12 @@ impl External for MockedExternal {
         let value_hash = sodiumoxide::crypto::hash::sha256::hash(data);
         Ok(value_hash.as_ref().to_vec())
     }
+
+    fn get_touched_nodes_count(&self) -> u64 {
+        0
+    }
+
+    fn reset_touched_nodes_counter(&mut self) {}
 }
 
 #[derive(Serialize, Deserialize, Clone, Debug)]
diff --git a/runtime/near-vm-logic/tests/helpers.rs b/runtime/near-vm-logic/tests/helpers.rs
index ff990992117..fd0d1cc6c09 100644
--- a/runtime/near-vm-logic/tests/helpers.rs
+++ b/runtime/near-vm-logic/tests/helpers.rs
@@ -1,6 +1,7 @@
 use near_vm_errors::HostErrorOrStorageError;
 use near_vm_logic::types::Gas;
-use near_vm_logic::VMLogic;
+use near_vm_logic::{ExtCosts, VMLogic, EXT_COSTS_COUNTER};
+use std::collections::HashMap;
 
 type Result<T> = ::std::result::Result<T, HostErrorOrStorageError>;
 
@@ -65,3 +66,35 @@ pub fn promise_batch_action_add_key_with_function_call(
         method_names.as_ptr() as _,
     )
 }
+
+#[macro_export]
+macro_rules! map(
+    { $($key:path: $value:expr,)+ } => {
+        {
+            let mut m = ::std::collections::HashMap::new();
+            $(
+                m.insert($key, $value);
+            )+
+            m
+        }
+     };
+);
+
+#[allow(dead_code)]
+pub fn print_costs() {
+    EXT_COSTS_COUNTER.with(|f| {
+        println!("{:#?}", f.borrow().iter().collect::<std::collections::BTreeMap<_, _>>())
+    });
+    reset_costs_counter();
+}
+
+pub fn reset_costs_counter() {
+    EXT_COSTS_COUNTER.with(|f| f.borrow_mut().clear());
+}
+
+pub fn assert_costs(expected: HashMap<ExtCosts, u64>) {
+    EXT_COSTS_COUNTER.with(|f| {
+        assert_eq!(f.borrow().clone(), expected);
+    });
+    reset_costs_counter();
+}
diff --git a/runtime/near-vm-logic/tests/test_iterators.rs b/runtime/near-vm-logic/tests/test_iterators.rs
index 6b410f6fa97..5c881603d87 100644
--- a/runtime/near-vm-logic/tests/test_iterators.rs
+++ b/runtime/near-vm-logic/tests/test_iterators.rs
@@ -23,7 +23,7 @@ fn iter_prefix_check(
     use_register: bool,
 ) -> u64 {
     let iter_id = if use_register {
-        logic.write_register(3, prefix).unwrap();
+        logic.wrapped_internal_write_register(3, prefix).unwrap();
         logic.storage_iter_prefix(std::u64::MAX, 3)
     } else {
         logic.storage_iter_prefix(prefix.len() as _, prefix.as_ptr() as _)
@@ -56,8 +56,8 @@ fn iter_range_check(
     use_register: bool,
 ) -> u64 {
     let iter_id = if use_register {
-        logic.write_register(3, start).unwrap();
-        logic.write_register(4, end).unwrap();
+        logic.wrapped_internal_write_register(3, start).unwrap();
+        logic.wrapped_internal_write_register(4, end).unwrap();
         logic.storage_iter_range(std::u64::MAX, 0, std::u64::MAX, 1)
     } else {
         logic.storage_iter_range(
diff --git a/runtime/near-vm-logic/tests/test_miscs.rs b/runtime/near-vm-logic/tests/test_miscs.rs
index 917d8e7fa09..7d198f0b80c 100644
--- a/runtime/near-vm-logic/tests/test_miscs.rs
+++ b/runtime/near-vm-logic/tests/test_miscs.rs
@@ -3,29 +3,33 @@ mod vm_logic_builder;
 
 use fixtures::get_context;
 use near_vm_errors::HostError;
-use near_vm_logic::VMConfig;
+use near_vm_logic::ExtCosts;
+use std::collections::HashMap;
 use vm_logic_builder::VMLogicBuilder;
-
-fn check_gas_for_data_len(len: u64, used_gas: u64, config: &VMConfig) {
-    let base = config.ext_costs.log_base;
-    let per_byte = config.ext_costs.log_per_byte;
-    assert_eq!(base + per_byte * len, used_gas, "Wrong amount of gas spent");
-}
+mod helpers;
+use helpers::*;
 
 #[test]
 fn test_valid_utf8() {
     let mut logic_builder = VMLogicBuilder::default();
     let mut logic = logic_builder.build(get_context(vec![], false));
     let string_bytes = "j ñ r'ø qò$`5 y'5 øò{%÷ `Võ%".as_bytes().to_vec();
-    logic
-        .log_utf8(string_bytes.len() as _, string_bytes.as_ptr() as _)
-        .expect("Valid utf-8 string_bytes");
+    let len = string_bytes.len() as u64;
+    logic.log_utf8(len, string_bytes.as_ptr() as _).expect("Valid utf-8 string_bytes");
     let outcome = logic.outcome();
     assert_eq!(
         outcome.logs[0],
         format!("LOG: {}", String::from_utf8(string_bytes.clone()).unwrap())
     );
-    check_gas_for_data_len(string_bytes.len() as _, outcome.used_gas, &logic_builder.config);
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::log_base:  1,
+        ExtCosts::log_byte: len,
+        ExtCosts::read_memory_base: 1,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf8_decoding_base: 1,
+        ExtCosts::utf8_decoding_byte: len,
+    });
 }
 
 #[test]
@@ -33,13 +37,17 @@ fn test_invalid_utf8() {
     let mut logic_builder = VMLogicBuilder::default();
     let mut logic = logic_builder.build(get_context(vec![], false));
     let string_bytes = [128].to_vec();
-    assert_eq!(
-        logic.log_utf8(string_bytes.len() as _, string_bytes.as_ptr() as _),
-        Err(HostError::BadUTF8.into())
-    );
+    let len = string_bytes.len() as u64;
+    assert_eq!(logic.log_utf8(len, string_bytes.as_ptr() as _), Err(HostError::BadUTF8.into()));
     let outcome = logic.outcome();
     assert_eq!(outcome.logs.len(), 0);
-    check_gas_for_data_len(string_bytes.len() as _, outcome.used_gas, &logic_builder.config);
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: 1,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf8_decoding_base: 1,
+        ExtCosts::utf8_decoding_byte: len,
+    });
 }
 
 #[test]
@@ -56,11 +64,20 @@ fn test_valid_null_terminated_utf8() {
         .expect("Valid null-terminated utf-8 string_bytes");
     string_bytes.pop();
     let outcome = logic.outcome();
+    let len = bytes_len as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::log_base: 1,
+        ExtCosts::log_byte: len - 1,
+        ExtCosts::read_memory_base: len,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf8_decoding_base: 1,
+        ExtCosts::utf8_decoding_byte: len - 1,
+    });
     assert_eq!(
         outcome.logs[0],
         format!("LOG: {}", String::from_utf8(string_bytes.clone()).unwrap())
     );
-    check_gas_for_data_len(bytes_len as _, outcome.used_gas, &logic_builder.config);
 }
 
 #[test]
@@ -74,9 +91,14 @@ fn test_log_max_limit() {
         logic.log_utf8(string_bytes.len() as _, string_bytes.as_ptr() as _),
         Err(HostError::BadUTF8.into())
     );
+
+    assert_costs(map! {
+      ExtCosts::base: 1,
+      ExtCosts::utf8_decoding_base: 1,
+    });
+
     let outcome = logic.outcome();
     assert_eq!(outcome.logs.len(), 0);
-    assert_eq!(outcome.used_gas, logic_builder.config.ext_costs.log_base);
 }
 
 #[test]
@@ -91,10 +113,17 @@ fn test_log_utf8_max_limit_null_terminated() {
         logic.log_utf8(std::u64::MAX, string_bytes.as_ptr() as _),
         Err(HostError::BadUTF8.into())
     );
+
+    let len = string_bytes.len() as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: len - 1 ,
+        ExtCosts::read_memory_byte: len - 1,
+        ExtCosts::utf8_decoding_base: 1,
+    });
+
     let outcome = logic.outcome();
     assert_eq!(outcome.logs.len(), 0);
-
-    check_gas_for_data_len(35, outcome.used_gas, &logic_builder.config);
 }
 
 #[test]
@@ -110,9 +139,19 @@ fn test_valid_log_utf16() {
     logic
         .log_utf16(utf16_bytes.len() as _, utf16_bytes.as_ptr() as _)
         .expect("Valid utf-16 string_bytes");
+
+    let len = utf16_bytes.len() as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: 1,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf16_decoding_base: 1,
+        ExtCosts::utf16_decoding_byte: len,
+        ExtCosts::log_base: 1,
+        ExtCosts::log_byte: len,
+    });
     let outcome = logic.outcome();
     assert_eq!(outcome.logs[0], format!("LOG: {}", string));
-    assert_eq!(outcome.used_gas, 13);
 }
 
 #[test]
@@ -129,6 +168,17 @@ fn test_valid_log_utf16_max_log_len_not_even() {
     utf16_bytes.extend_from_slice(&[0, 0]);
     logic.log_utf16(std::u64::MAX, utf16_bytes.as_ptr() as _).expect("Valid utf-16 string_bytes");
 
+    let len = utf16_bytes.len() as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: len / 2,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf16_decoding_base: 1,
+        ExtCosts::utf16_decoding_byte: len - 2,
+        ExtCosts::log_base: 1,
+        ExtCosts::log_byte: len - 2 ,
+    });
+
     let string = "abc";
     let mut utf16_bytes: Vec<u8> = Vec::new();
     for u16_ in string.encode_utf16() {
@@ -140,6 +190,13 @@ fn test_valid_log_utf16_max_log_len_not_even() {
         logic.log_utf16(std::u64::MAX, utf16_bytes.as_ptr() as _),
         Err(HostError::BadUTF16.into())
     );
+
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: logic_builder.config.max_log_len/2 + 1,
+        ExtCosts::read_memory_byte: 2*(logic_builder.config.max_log_len/2 + 1),
+        ExtCosts::utf16_decoding_base: 1,
+    });
 }
 
 #[test]
@@ -151,7 +208,12 @@ fn test_log_utf8_max_limit_null_terminated_fail() {
     let mut logic = logic_builder.build(get_context(vec![], false));
     let res = logic.log_utf8(std::u64::MAX, string_bytes.as_ptr() as _);
     assert_eq!(res, Err(HostError::BadUTF8.into()));
-    check_gas_for_data_len(4, logic.outcome().used_gas, &logic_builder.config);
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: logic_builder.config.max_log_len + 1,
+        ExtCosts::read_memory_byte: logic_builder.config.max_log_len + 1,
+        ExtCosts::utf8_decoding_base: 1,
+    });
 }
 
 #[test]
@@ -167,9 +229,19 @@ fn test_valid_log_utf16_null_terminated() {
     utf16_bytes.push(0);
     utf16_bytes.push(0);
     logic.log_utf16(std::u64::MAX, utf16_bytes.as_ptr() as _).expect("Valid utf-16 string_bytes");
+
+    let len = utf16_bytes.len() as u64;
     let outcome = logic.outcome();
     assert_eq!(outcome.logs[0], format!("LOG: {}", string));
-    assert_eq!(outcome.used_gas, 15);
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: len / 2 ,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf16_decoding_base: 1,
+        ExtCosts::utf16_decoding_byte: len - 2,
+        ExtCosts::log_base: 1,
+        ExtCosts::log_byte: len - 2,
+    });
 }
 
 #[test]
@@ -183,8 +255,15 @@ fn test_invalid_log_utf16() {
         utf16_bytes.push(u16_ as u8);
     }
     let res = logic.log_utf8(utf16_bytes.len() as _, utf16_bytes.as_ptr() as _);
+    let len = utf16_bytes.len() as u64;
     assert_eq!(res, Err(HostError::BadUTF8.into()));
-    assert_eq!(logic.outcome().used_gas, 13);
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: 1,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf8_decoding_base: 1,
+        ExtCosts::utf8_decoding_byte: len,
+    });
 }
 
 #[test]
@@ -202,6 +281,16 @@ fn test_valid_log_utf16_null_terminated_fail() {
     utf16_bytes.push(0);
     utf16_bytes.push(0);
     logic.log_utf16(std::u64::MAX, utf16_bytes.as_ptr() as _).expect("Valid utf-16 string_bytes");
+    let len = utf16_bytes.len() as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: len / 2 ,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::utf16_decoding_base: 1,
+        ExtCosts::utf16_decoding_byte: len - 2,
+        ExtCosts::log_base: 1,
+        ExtCosts::log_byte: len - 2,
+    });
     assert_ne!(logic.outcome().logs[0], format!("LOG: {}", string));
 }
 
@@ -221,6 +310,20 @@ fn test_hash256() {
             92, 255, 88, 43, 83, 147, 122, 55, 26, 36, 42, 156, 160, 158,
         ]
     );
+    let len = data.len() as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::read_memory_base: 1,
+        ExtCosts::read_memory_byte: len,
+        ExtCosts::write_memory_base: 1,
+        ExtCosts::write_memory_byte: 32,
+        ExtCosts::read_register_base: 1,
+        ExtCosts::read_register_byte: 32,
+        ExtCosts::write_register_base: 1,
+        ExtCosts::write_register_byte: 32,
+        ExtCosts::sha256_base: 1,
+        ExtCosts::sha256_byte: len,
+    });
 }
 
 #[test]
@@ -228,7 +331,7 @@ fn test_hash256_register() {
     let mut logic_builder = VMLogicBuilder::default();
     let mut logic = logic_builder.build(get_context(vec![], false));
     let data = b"tesdsst";
-    logic.write_register(1, data).unwrap();
+    logic.wrapped_internal_write_register(1, data).unwrap();
 
     logic.sha256(std::u64::MAX, 1, 0).unwrap();
     let res = &vec![0u8; 32];
@@ -240,4 +343,17 @@ fn test_hash256_register() {
             92, 255, 88, 43, 83, 147, 122, 55, 26, 36, 42, 156, 160, 158,
         ]
     );
+
+    let len = data.len() as u64;
+    assert_costs(map! {
+        ExtCosts::base: 1,
+        ExtCosts::write_memory_base: 1,
+        ExtCosts::write_memory_byte: 32,
+        ExtCosts::read_register_base: 2,
+        ExtCosts::read_register_byte: 32 + len,
+        ExtCosts::write_register_base: 2,
+        ExtCosts::write_register_byte: 32 + len,
+        ExtCosts::sha256_base: 1,
+        ExtCosts::sha256_byte: len,
+    });
 }
diff --git a/runtime/near-vm-logic/tests/test_promises.rs b/runtime/near-vm-logic/tests/test_promises.rs
index 002f9bdc8ec..9d6935ef5b8 100644
--- a/runtime/near-vm-logic/tests/test_promises.rs
+++ b/runtime/near-vm-logic/tests/test_promises.rs
@@ -83,7 +83,7 @@ fn test_promise_batch_action_create_account() {
     logic
         .promise_batch_action_create_account(index)
         .expect("should add an action to create account");
-    assert_eq!(logic.used_gas().unwrap(), 430);
+    assert_eq!(logic.used_gas().unwrap(), 502);
     let expected = serde_json::json!([
         {
             "receipt_indices": [],
@@ -127,7 +127,7 @@ fn test_promise_batch_action_deploy_contract() {
     logic
         .promise_batch_action_deploy_contract(index, code.len() as u64, code.as_ptr() as _)
         .expect("should add an action to deploy contract");
-    assert_eq!(logic.used_gas().unwrap(), 550);
+    assert_eq!(logic.used_gas().unwrap(), 643);
     let expected = serde_json::json!(
       [
         {
@@ -183,7 +183,7 @@ fn test_promise_batch_action_transfer() {
     logic
         .promise_batch_action_transfer(index, 1u128.to_le_bytes().as_ptr() as _)
         .expect_err("not enough money");
-    assert_eq!(logic.used_gas().unwrap(), 450);
+    assert_eq!(logic.used_gas().unwrap(), 591);
     let expected = serde_json::json!(
     [
         {
@@ -257,7 +257,7 @@ fn test_promise_batch_action_stake() {
             key.as_ptr() as _,
         )
         .expect_err("not enough money to stake");
-    assert_eq!(logic.used_gas().unwrap(), 450);
+    assert_eq!(logic.used_gas().unwrap(), 803);
     let expected = serde_json::json!([
         {
             "receipt_indices": [],
@@ -334,7 +334,7 @@ fn test_promise_batch_action_add_key_with_function_call() {
         method_names,
     )
     .expect("should add allowance");
-    assert_eq!(logic.used_gas().unwrap(), 590);
+    assert_eq!(logic.used_gas().unwrap(), 920);
     let expected = serde_json::json!(
     [
         {
@@ -393,7 +393,7 @@ fn test_promise_batch_then() {
     logic
         .promise_batch_then(index, account_id.len() as u64, account_id.as_ptr() as _)
         .expect("promise batch should run ok");
-    assert_eq!(logic.used_gas().unwrap(), 490);
+    assert_eq!(logic.used_gas().unwrap(), 622);
     let expected = serde_json::json!([
         {
             "receipt_indices": [],
diff --git a/runtime/near-vm-logic/tests/test_registers.rs b/runtime/near-vm-logic/tests/test_registers.rs
index 0df2e126cd0..7d13058104b 100644
--- a/runtime/near-vm-logic/tests/test_registers.rs
+++ b/runtime/near-vm-logic/tests/test_registers.rs
@@ -10,7 +10,7 @@ fn test_one_register() {
     let mut logic_builder = VMLogicBuilder::default();
     let mut logic = logic_builder.build(get_context(vec![], false));
 
-    logic.write_register(0, &vec![0, 1, 2]).unwrap();
+    logic.wrapped_internal_write_register(0, &vec![0, 1, 2]).unwrap();
     assert_eq!(logic.register_len(0).unwrap(), 3u64);
     let buffer = [0u8; 3];
     logic.read_register(0, buffer.as_ptr() as u64).unwrap();
@@ -38,7 +38,7 @@ fn test_many_registers() {
 
     for i in 0..max_registers {
         let value = (i * 10).to_le_bytes();
-        logic.write_register(i, &value).unwrap();
+        logic.wrapped_internal_write_register(i, &value).unwrap();
 
         let buffer = [0u8; std::mem::size_of::<u64>()];
         logic.read_register(i, buffer.as_ptr() as u64).unwrap();
@@ -47,7 +47,7 @@ fn test_many_registers() {
 
     // One more register hits the boundary check.
     assert_eq!(
-        logic.write_register(max_registers, &[]),
+        logic.wrapped_internal_write_register(max_registers, &[]),
         Err(HostError::MemoryAccessViolation.into())
     )
 }
@@ -60,7 +60,10 @@ fn test_max_register_size() {
 
     let value = vec![0u8; (max_register_size + 1) as usize];
 
-    assert_eq!(logic.write_register(0, &value), Err(HostError::MemoryAccessViolation.into()));
+    assert_eq!(
+        logic.wrapped_internal_write_register(0, &value),
+        Err(HostError::MemoryAccessViolation.into())
+    );
 }
 
 #[test]
@@ -74,11 +77,11 @@ fn test_max_register_memory_limit() {
 
     for i in 0..max_registers {
         let value = vec![1u8; config.max_register_size as usize];
-        logic.write_register(i, &value).expect("should be written successfully");
+        logic.wrapped_internal_write_register(i, &value).expect("should be written successfully");
     }
     let last = vec![1u8; config.max_register_size as usize];
     assert_eq!(
-        logic.write_register(max_registers, &last),
+        logic.wrapped_internal_write_register(max_registers, &last),
         Err(HostError::MemoryAccessViolation.into())
     );
 }
diff --git a/runtime/near-vm-logic/tests/test_storage_read_write.rs b/runtime/near-vm-logic/tests/test_storage_read_write.rs
index 0ab0a887975..29ae77c38ef 100644
--- a/runtime/near-vm-logic/tests/test_storage_read_write.rs
+++ b/runtime/near-vm-logic/tests/test_storage_read_write.rs
@@ -13,8 +13,8 @@ fn test_storage_write_with_register() {
     let key: &[u8] = b"foo";
     let val: &[u8] = b"bar";
 
-    logic.write_register(1, key).unwrap();
-    logic.write_register(2, val).unwrap();
+    logic.wrapped_internal_write_register(1, key).unwrap();
+    logic.wrapped_internal_write_register(2, val).unwrap();
 
     logic.storage_write(std::u64::MAX, 1 as _, std::u64::MAX, 2 as _, 0).expect("storage write ok");
 
@@ -31,7 +31,7 @@ fn test_storage_read_with_register() {
     logic_builder.ext.storage_set(key, val).unwrap();
     let mut logic = logic_builder.build(get_context(vec![], false));
 
-    logic.write_register(1, key).unwrap();
+    logic.wrapped_internal_write_register(1, key).unwrap();
 
     logic.storage_read(std::u64::MAX, 1 as _, 0).expect("storage read ok");
     let res = [0u8; 3];
@@ -51,7 +51,7 @@ fn test_storage_remove_with_register() {
         .storage_write(key.len() as _, key.as_ptr() as _, val.len() as _, val.as_ptr() as _, 0)
         .expect("storage write ok");
 
-    logic.write_register(1, key).unwrap();
+    logic.wrapped_internal_write_register(1, key).unwrap();
 
     logic.storage_remove(std::u64::MAX, 1 as _, 0).expect("storage remove ok");
     let res = [0u8; 3];
@@ -69,7 +69,7 @@ fn test_storage_has_key_with_register() {
 
     let mut logic = logic_builder.build(get_context(vec![], false));
 
-    logic.write_register(1, key).unwrap();
+    logic.wrapped_internal_write_register(1, key).unwrap();
 
     assert_eq!(logic.storage_has_key(std::u64::MAX, 1 as _), Ok(1));
 }
diff --git a/runtime/near-vm-runner/Cargo.toml b/runtime/near-vm-runner/Cargo.toml
index a4dac9bbe8f..58e301c0458 100644
--- a/runtime/near-vm-runner/Cargo.toml
+++ b/runtime/near-vm-runner/Cargo.toml
@@ -14,10 +14,10 @@ This crate implements the specification of the interface that Near blockchain ex
 
 [dependencies]
 cached = "0.9.0"
-wasmer-runtime = { version = "0.9.0", features = ["singlepass"] }
-wasmer-runtime-core = { version = "0.9.0"}
+wasmer-runtime = { version = "0.9.0", features = ["default-backend-singlepass"], default-features = false }
+wasmer-runtime-core = { version = "0.9.0", features = ["backend-singlepass"]}
 near-runtime-fees = { path="../near-runtime-fees", version = "0.4.0" }
-near-vm-logic = { path="../near-vm-logic", version = "0.4.0"}
+near-vm-logic = { path="../near-vm-logic", version = "0.4.0", default-features = false, features = []}
 near-vm-errors = { path = "../near-vm-errors", version = "0.4.0" }
 pwasm-utils = "0.12.0"
 parity-wasm = "0.41.0"
@@ -28,6 +28,12 @@ assert_matches = "1.3.0"
 wabt = "0.9"
 bencher = "0.1.5"
 
+[features]
+default = []
+
+# Use this feature to enable counting of fees and costs applied.
+costs_counting = ["near-vm-logic/costs_counting"]
+
 [[bench]]
 name = "bench"
 harness = false
diff --git a/runtime/near-vm-runner/src/imports.rs b/runtime/near-vm-runner/src/imports.rs
index f0c8128d214..4856123a61c 100644
--- a/runtime/near-vm-runner/src/imports.rs
+++ b/runtime/near-vm-runner/src/imports.rs
@@ -43,6 +43,7 @@ wrapped_imports! {
     // #############
     read_register<[register_id: u64, ptr: u64] -> []>,
     register_len<[register_id: u64] -> [u64]>,
+    write_register<[register_id: u64, data_len: u64, data_ptr: u64] -> []>,
     // ###############
     // # Context API #
     // ###############
diff --git a/runtime/near-vm-runner/src/lib.rs b/runtime/near-vm-runner/src/lib.rs
index 1c5d4641a11..24518861c9d 100644
--- a/runtime/near-vm-runner/src/lib.rs
+++ b/runtime/near-vm-runner/src/lib.rs
@@ -6,3 +6,6 @@ mod prepare;
 mod runner;
 pub use near_vm_errors::VMError;
 pub use runner::run;
+
+#[cfg(feature = "costs_counting")]
+pub use near_vm_logic::EXT_COSTS_COUNTER;
diff --git a/runtime/near-vm-runner/tests/test_error_cases.rs b/runtime/near-vm-runner/tests/test_error_cases.rs
index 62ff00b7c2e..d92b93ca26f 100644
--- a/runtime/near-vm-runner/tests/test_error_cases.rs
+++ b/runtime/near-vm-runner/tests/test_error_cases.rs
@@ -227,7 +227,7 @@ fn test_guest_panic() {
     assert_eq!(
         make_simple_contract_call(&guest_panic(), b"hello"),
         (
-            Some(vm_outcome_with_gas(1)),
+            Some(vm_outcome_with_gas(2)),
             Some(VMError::FunctionCallError(FunctionCallError::HostError(HostError::GuestPanic(
                 "explicit guest panic".to_string()
             ))))
@@ -252,7 +252,7 @@ fn test_stack_overflow() {
     assert_eq!(
         make_simple_contract_call(&stack_overflow(), b"hello"),
         (
-            Some(vm_outcome_with_gas(32768)),
+            Some(vm_outcome_with_gas(16384)),
             Some(VMError::FunctionCallError(FunctionCallError::WasmTrap("unknown".to_string())))
         )
     );
diff --git a/runtime/runtime-params-estimator/Cargo.toml b/runtime/runtime-params-estimator/Cargo.toml
index d4d3b814eac..a381ccf8567 100644
--- a/runtime/runtime-params-estimator/Cargo.toml
+++ b/runtime/runtime-params-estimator/Cargo.toml
@@ -10,13 +10,17 @@ indicatif = {version = "0.12.0", features = ["with_rayon"]}
 rayon = "1.1"
 tempdir = "0.3.7"
 rand = "0.7.2"
+rand_xorshift = "0.2.0"
 gnuplot = "0.0.32"
 serde_json = "1.0.40"
 csv = "1.1.1"
 clap = "2.33.0"
 
+near-runtime-fees = { path = "../../runtime/near-runtime-fees" }
 near-crypto = { path = "../../core/crypto" }
-node-runtime = { path = "../../runtime/runtime" }
+near-vm-logic = {path = "../../runtime/near-vm-logic" , features = ["costs_counting"]}
+near-vm-runner = {path = "../../runtime/near-vm-runner" , features = ["costs_counting"]}
+node-runtime = { path = "../../runtime/runtime" , features = ["costs_counting"]}
 near-store = { path = "../../core/store" }
 near-primitives = { path = "../../core/primitives" }
 near = { path = "../../near" }
diff --git a/runtime/runtime-params-estimator/README.md b/runtime/runtime-params-estimator/README.md
index f27dbd9104f..bd660f3529c 100644
--- a/runtime/runtime-params-estimator/README.md
+++ b/runtime/runtime-params-estimator/README.md
@@ -10,9 +10,9 @@ Use this tool to measure the running time of elementary runtime operations that
 
 2. Run the estimator
     ```bash
-    cargo run --package runtime-params-estimator --bin runtime-params-estimator -- --home /tmp/data --accounts-num 20000 --smallest-block-size-pow2 3 --largest-block-size-pow2 4 --iters 1 --warmup-iters 1
+    cargo run --package runtime-params-estimator --bin runtime-params-estimator -- --home /tmp/data --accounts-num 20000 --iters 1 --warmup-iters 1
     ```
      
     With the given parameters above estimator will run relatively fast. We will be using different parameters to do the actual parameter estimation.
 
-Note, you would need to install [gnuplot](http://gnuplot.info/) to see the graphs.
+Note, if you use the plotting functionality you would need to install [gnuplot](http://gnuplot.info/) to see the graphs.
diff --git a/runtime/runtime-params-estimator/src/cases.rs b/runtime/runtime-params-estimator/src/cases.rs
index 26d9b843d76..73cb0156d84 100644
--- a/runtime/runtime-params-estimator/src/cases.rs
+++ b/runtime/runtime-params-estimator/src/cases.rs
@@ -1,10 +1,8 @@
 use std::cell::RefCell;
 use std::collections::{HashMap, HashSet};
-use std::path::PathBuf;
 
-use rand::distributions::Standard;
 use rand::seq::SliceRandom;
-use rand::Rng;
+use rand::{Rng, SeedableRng};
 
 use near_crypto::{InMemorySigner, KeyType, PublicKey};
 use near_primitives::account::{AccessKey, AccessKeyPermission, FunctionCallPermission};
@@ -14,20 +12,160 @@ use near_primitives::transaction::{
     DeployContractAction, FunctionCallAction, SignedTransaction, StakeAction, TransferAction,
 };
 
+use crate::ext_costs_generator::ExtCostsGenerator;
+use crate::runtime_fees_generator::RuntimeFeesGenerator;
 use crate::stats::Measurements;
 use crate::testbed::RuntimeTestbed;
 use crate::testbed_runners::{get_account_id, measure_actions, measure_transactions, Config};
+use crate::wasmer_estimator::nanosec_per_op;
+use near_runtime_fees::{
+    AccessKeyCreationConfig, ActionCreationConfig, DataReceiptCreationConfig, Fee, Fraction,
+    RuntimeFeesConfig, StorageUsageConfig,
+};
+use near_vm_logic::{ExtCosts, ExtCostsConfig, VMConfig};
+use node_runtime::config::RuntimeConfig;
+
+/// How much gas there is in a nanosecond worth of computation.
+const GAS_IN_NANOS: f64 = 1_000_000f64;
+/// The block limit is
+const GAS_IN_BLOCK: u64 = 1_000_000_000_000_000u64;
+
+fn measure_function(
+    metric: Metric,
+    method_name: &'static str,
+    measurements: &mut Measurements,
+    testbed: RuntimeTestbed,
+    accounts_deployed: &[usize],
+    nonces: &mut HashMap<usize, u64>,
+    config: &Config,
+    allow_failures: bool,
+    args: Vec<u8>,
+) -> RuntimeTestbed {
+    // Measure the speed of creating a function fixture with 1MiB input.
+    let mut rng = rand_xorshift::XorShiftRng::from_seed([0u8; 16]);
+    let mut f = || {
+        let account_idx = *accounts_deployed.choose(&mut rng).unwrap();
+        let account_id = get_account_id(account_idx);
+        let signer = InMemorySigner::from_seed(&account_id, KeyType::ED25519, &account_id);
+        let nonce = *nonces.entry(account_idx).and_modify(|x| *x += 1).or_insert(1);
+        let function_call = Action::FunctionCall(FunctionCallAction {
+            method_name: method_name.to_string(),
+            args: args.clone(),
+            gas: 10u64.pow(18),
+            deposit: 0,
+        });
+        SignedTransaction::from_actions(
+            nonce as u64,
+            account_id.clone(),
+            account_id.clone(),
+            &signer,
+            vec![function_call],
+            CryptoHash::default(),
+        )
+    };
+    measure_transactions(metric, measurements, config, Some(testbed), &mut f, allow_failures)
+}
+
+macro_rules! calls_helper(
+    { $($el:ident => $method_name:ident),* } => {
+    {
+        let mut v: Vec<(Metric, &str)> = vec![];
+        $(
+            v.push((Metric::$el, stringify!($method_name)));
+        )*
+        v
+    }
+    };
+);
+
+#[derive(Debug, PartialEq, Eq, Ord, PartialOrd, Clone, Copy)]
+#[allow(non_camel_case_types)]
+pub enum Metric {
+    Receipt,
+    ActionTransfer,
+    ActionCreateAccount,
+    ActionDeleteAccount,
+    ActionAddFullAccessKey,
+    ActionAddFunctionAccessKey1Method,
+    ActionAddFunctionAccessKey1000Methods,
+    ActionDeleteAccessKey,
+    ActionStake,
+    ActionDeploy10K,
+    ActionDeploy100K,
+    ActionDeploy1M,
+
+    warmup,
+    noop_1MiB,
+    noop,
+    base_1M,
+    read_memory_10b_10k,
+    read_memory_1Mib_10k,
+    write_memory_10b_10k,
+    write_memory_1Mib_10k,
+    read_register_10b_10k,
+    read_register_1Mib_10k,
+    write_register_10b_10k,
+    write_register_1Mib_10k,
+    utf8_log_10b_10k,
+    utf8_log_10kib_10k,
+    nul_utf8_log_10b_10k,
+    nul_utf8_log_10kib_10k,
+    utf16_log_10b_10k,
+    utf16_log_10kib_10k,
+    nul_utf16_log_10b_10k,
+    nul_utf16_log_10kib_10k,
+    sha256_10b_10k,
+    sha256_10kib_10k,
+    storage_write_10b_key_10b_value_1k,
+    storage_write_10kib_key_10b_value_1k,
+    storage_write_10b_key_10kib_value_1k,
+    storage_write_10b_key_10kib_value_1k_evict,
+    storage_read_10b_key_10b_value_1k,
+    storage_read_10kib_key_10b_value_1k,
+    storage_read_10b_key_10kib_value_1k,
+    storage_remove_10b_key_10b_value_1k,
+    storage_remove_10kib_key_10b_value_1k,
+    storage_remove_10b_key_10kib_value_1k,
+    storage_has_key_10b_key_10b_value_1k,
+    storage_has_key_10kib_key_10b_value_1k,
+    storage_has_key_10b_key_10kib_value_1k,
+    storage_iter_prefix_10b_1k,
+    storage_iter_prefix_10kib_1k,
+    storage_iter_range_10b_from_10b_to_1k,
+    storage_iter_range_10kib_from_10b_to_1k,
+    storage_iter_range_10b_from_10kib_to_1k,
+
+    storage_next_10b_from_10b_to_1k_10b_key_10b_value,
+    storage_next_10kib_from_10b_to_1k_10b_key_10b_value,
+    storage_next_10b_from_10kib_to_1k_10b_key_10b_value,
+
+    storage_next_10b_from_10b_to_1k_10kib_key_10b_value,
+    storage_next_10kib_from_10b_to_1k_10kib_key_10b_value,
+    storage_next_10b_from_10kib_to_1k_10kib_key_10b_value,
 
-pub fn run(config: Config) {
+    storage_next_10b_from_10b_to_1k_10b_key_10kib_value,
+    storage_next_10kib_from_10b_to_1k_10b_key_10kib_value,
+    storage_next_10b_from_10kib_to_1k_10b_key_10kib_value,
+
+    promise_and_100k,
+    promise_and_100k_on_1k_and,
+    promise_return_100k,
+    data_producer_10b,
+    data_producer_100kib,
+    data_receipt_10b_1000,
+    data_receipt_100kib_1000,
+    cpu_ram_soak_test,
+}
+
+pub fn run(mut config: Config) -> RuntimeConfig {
     let mut m = Measurements::new();
+    config.block_sizes = vec![100];
     // Measure the speed of processing empty receipts.
-    measure_actions("receipt only", 1, None, &mut m, &config, None, vec![], false, false);
+    measure_actions(Metric::Receipt, &mut m, &config, None, vec![], false, false);
 
     // Measure the speed of processing simple transfers.
     measure_actions(
-        "transfer",
-        1,
-        None,
+        Metric::ActionTransfer,
         &mut m,
         &config,
         None,
@@ -53,7 +191,7 @@ pub fn run(config: Config) {
             CryptoHash::default(),
         )
     };
-    measure_transactions("create_account", 1, None, &mut m, &config, None, &mut f);
+    measure_transactions(Metric::ActionCreateAccount, &mut m, &config, None, &mut f, false);
 
     // Measure the speed of deleting an account.
     let mut nonces: HashMap<usize, u64> = HashMap::new();
@@ -87,13 +225,11 @@ pub fn run(config: Config) {
             CryptoHash::default(),
         )
     };
-    measure_transactions("delete_account", 1, None, &mut m, &config, None, &mut f);
+    measure_transactions(Metric::ActionDeleteAccount, &mut m, &config, None, &mut f, false);
 
     // Measure the speed of adding a full access key.
     measure_actions(
-        "add access key full",
-        1,
-        None,
+        Metric::ActionAddFullAccessKey,
         &mut m,
         &config,
         None,
@@ -110,9 +246,32 @@ pub fn run(config: Config) {
 
     // Measure the speed of adding a function call access key.
     measure_actions(
-        "add access key full",
-        1,
+        Metric::ActionAddFunctionAccessKey1Method,
+        &mut m,
+        &config,
         None,
+        vec![Action::AddKey(AddKeyAction {
+            public_key: serde_json::from_str(
+                "\"ed25519:DcA2MzgpJbrUATQLLceocVckhhAqrkingax4oJ9kZ847\"",
+            )
+            .unwrap(),
+            access_key: AccessKey {
+                nonce: 0,
+                permission: AccessKeyPermission::FunctionCall(FunctionCallPermission {
+                    allowance: Some(100),
+                    receiver_id: get_account_id(0),
+                    method_names: vec!["method1".to_string()],
+                }),
+            },
+        })],
+        true,
+        true,
+    );
+
+    // Measure the speed of adding an access key with 1k methods each 10bytes long.
+    let many_methods: Vec<_> = (0..1000).map(|i| format!("a123456{:03}", i)).collect();
+    measure_actions(
+        Metric::ActionAddFunctionAccessKey1000Methods,
         &mut m,
         &config,
         None,
@@ -126,11 +285,7 @@ pub fn run(config: Config) {
                 permission: AccessKeyPermission::FunctionCall(FunctionCallPermission {
                     allowance: Some(100),
                     receiver_id: get_account_id(0),
-                    method_names: vec![
-                        "method1".to_string(),
-                        "method2".to_string(),
-                        "method3".to_string(),
-                    ],
+                    method_names: many_methods,
                 }),
             },
         })],
@@ -162,13 +317,11 @@ pub fn run(config: Config) {
             CryptoHash::default(),
         )
     };
-    measure_transactions("delete_access_key", 1, None, &mut m, &config, None, &mut f);
+    measure_transactions(Metric::ActionDeleteAccessKey, &mut m, &config, None, &mut f, false);
 
     // Measure the speed of staking.
     measure_actions(
-        "stake",
-        1,
-        None,
+        Metric::ActionStake,
         &mut m,
         &config,
         None,
@@ -181,10 +334,10 @@ pub fn run(config: Config) {
     );
 
     // Measure the speed of deploying some code.
-    let small_code = include_bytes!("../test-contract/res/small_contract.wasm");
-    let medium_code = include_bytes!("../test-contract/res/medium_contract.wasm");
-    let large_code = include_bytes!("../test-contract/res/large_contract.wasm");
-    let curr_code = RefCell::new(small_code.to_vec());
+    let code_10k = include_bytes!("../test-contract/res/small_contract.wasm");
+    let code_100k = include_bytes!("../test-contract/res/medium_contract.wasm");
+    let code_1m = include_bytes!("../test-contract/res/large_contract.wasm");
+    let curr_code = RefCell::new(code_10k.to_vec());
     let mut nonces: HashMap<usize, u64> = HashMap::new();
     let mut accounts_deployed = HashSet::new();
     let mut f = || {
@@ -209,170 +362,269 @@ pub fn run(config: Config) {
         )
     };
     let mut testbed =
-        measure_transactions("deploy", 1, Some(small_code.len()), &mut m, &config, None, &mut f);
-    *curr_code.borrow_mut() = medium_code.to_vec();
+        measure_transactions(Metric::ActionDeploy10K, &mut m, &config, None, &mut f, false);
+    *curr_code.borrow_mut() = code_100k.to_vec();
     testbed = measure_transactions(
-        "deploy",
-        1,
-        Some(medium_code.len()),
+        Metric::ActionDeploy100K,
         &mut m,
         &config,
         Some(testbed),
         &mut f,
+        false,
     );
-    *curr_code.borrow_mut() = large_code.to_vec();
-    testbed = measure_transactions(
-        "deploy",
-        1,
-        Some(large_code.len()),
+    *curr_code.borrow_mut() = code_1m.to_vec();
+    testbed =
+        measure_transactions(Metric::ActionDeploy1M, &mut m, &config, Some(testbed), &mut f, false);
+
+    let ad: Vec<_> = accounts_deployed.into_iter().collect();
+
+    testbed = measure_function(
+        Metric::warmup,
+        "noop",
         &mut m,
+        testbed,
+        &ad,
+        &mut nonces,
         &config,
-        Some(testbed),
-        &mut f,
+        false,
+        vec![],
     );
 
-    let ad: Vec<_> = accounts_deployed.into_iter().collect();
+    testbed = measure_function(
+        Metric::noop_1MiB,
+        "noop",
+        &mut m,
+        testbed,
+        &ad,
+        &mut nonces,
+        &config,
+        false,
+        (&[0u8; 1024 * 1024]).to_vec(),
+    );
 
-    // Measure the speed of processing function calls that do nothing.
-    let mut f = || {
-        let account_idx = *ad.as_slice().choose(&mut rand::thread_rng()).unwrap();
-        let account_id = get_account_id(account_idx);
-        let signer = InMemorySigner::from_seed(&account_id, KeyType::ED25519, &account_id);
-        let nonce = *nonces.entry(account_idx).and_modify(|x| *x += 1).or_insert(1);
-        let function_call = Action::FunctionCall(FunctionCallAction {
-            method_name: "noop".to_string(),
-            args: vec![],
-            gas: 10_000_000,
-            deposit: 0,
-        });
-        SignedTransaction::from_actions(
-            nonce as u64,
-            account_id.clone(),
-            account_id.clone(),
-            &signer,
-            vec![function_call],
-            CryptoHash::default(),
-        )
-    };
-    testbed = measure_transactions("call noop", 1, None, &mut m, &config, Some(testbed), &mut f);
+    testbed = measure_function(
+        Metric::noop,
+        "noop",
+        &mut m,
+        testbed,
+        &ad,
+        &mut nonces,
+        &config,
+        false,
+        vec![],
+    );
+
+    config.block_sizes = vec![2];
+
+    let v = calls_helper! {
+    cpu_ram_soak_test => cpu_ram_soak_test,
+    base_1M => base_1M,
+    read_memory_10b_10k => read_memory_10b_10k,
+    read_memory_1Mib_10k => read_memory_1Mib_10k,
+    write_memory_10b_10k => write_memory_10b_10k,
+    write_memory_1Mib_10k => write_memory_1Mib_10k,
+    read_register_10b_10k => read_register_10b_10k,
+    read_register_1Mib_10k => read_register_1Mib_10k,
+    write_register_10b_10k => write_register_10b_10k,
+    write_register_1Mib_10k => write_register_1Mib_10k,
+    utf8_log_10b_10k => utf8_log_10b_10k,
+    utf8_log_10kib_10k => utf8_log_10kib_10k,
+    nul_utf8_log_10b_10k => nul_utf8_log_10b_10k,
+    nul_utf8_log_10kib_10k => nul_utf8_log_10kib_10k,
+    utf16_log_10b_10k => utf16_log_10b_10k,
+    utf16_log_10kib_10k => utf16_log_10kib_10k,
+    nul_utf16_log_10b_10k => nul_utf16_log_10b_10k,
+    nul_utf16_log_10kib_10k => nul_utf16_log_10kib_10k,
+    sha256_10b_10k => sha256_10b_10k,
+    sha256_10kib_10k => sha256_10kib_10k,
+    storage_write_10b_key_10b_value_1k => storage_write_10b_key_10b_value_1k,
+    storage_read_10b_key_10b_value_1k => storage_read_10b_key_10b_value_1k,
+    storage_has_key_10b_key_10b_value_1k => storage_has_key_10b_key_10b_value_1k,
+    storage_iter_prefix_10b_1k => storage_iter_prefix_10b_1k,
+    storage_iter_range_10b_from_10b_to_1k => storage_iter_range_10b_from_10b_to_1k,
+    storage_next_10b_from_10b_to_1k_10b_key_10b_value =>   storage_next_10b_from_10b_to_1k,
+    storage_next_10kib_from_10b_to_1k_10b_key_10b_value =>   storage_next_10kib_from_10b_to_1k,
+    storage_next_10b_from_10kib_to_1k_10b_key_10b_value =>   storage_next_10b_from_10kib_to_1k,
+    storage_remove_10b_key_10b_value_1k => storage_remove_10b_key_10b_value_1k,
+    storage_write_10kib_key_10b_value_1k => storage_write_10kib_key_10b_value_1k,
+    storage_read_10kib_key_10b_value_1k => storage_read_10kib_key_10b_value_1k,
+    storage_has_key_10kib_key_10b_value_1k => storage_has_key_10kib_key_10b_value_1k,
+    storage_iter_prefix_10kib_1k => storage_iter_prefix_10kib_1k,
+    storage_iter_range_10kib_from_10b_to_1k => storage_iter_range_10kib_from_10b_to_1k,
+    storage_iter_range_10b_from_10kib_to_1k => storage_iter_range_10b_from_10kib_to_1k,
+    storage_next_10b_from_10b_to_1k_10kib_key_10b_value =>   storage_next_10b_from_10b_to_1k ,
+    storage_next_10kib_from_10b_to_1k_10kib_key_10b_value =>   storage_next_10kib_from_10b_to_1k,
+    storage_next_10b_from_10kib_to_1k_10kib_key_10b_value =>   storage_next_10b_from_10kib_to_1k ,
+    storage_remove_10kib_key_10b_value_1k => storage_remove_10kib_key_10b_value_1k,
+    storage_write_10b_key_10kib_value_1k => storage_write_10b_key_10kib_value_1k,
+    storage_write_10b_key_10kib_value_1k_evict => storage_write_10b_key_10kib_value_1k,
+    storage_read_10b_key_10kib_value_1k => storage_read_10b_key_10kib_value_1k,
+    storage_has_key_10b_key_10kib_value_1k => storage_has_key_10b_key_10kib_value_1k,
+    storage_next_10b_from_10b_to_1k_10b_key_10kib_value =>      storage_next_10b_from_10b_to_1k,
+    storage_next_10kib_from_10b_to_1k_10b_key_10kib_value =>   storage_next_10kib_from_10b_to_1k ,
+    storage_next_10b_from_10kib_to_1k_10b_key_10kib_value =>   storage_next_10b_from_10kib_to_1k ,
+    storage_remove_10b_key_10kib_value_1k =>   storage_remove_10b_key_10kib_value_1k ,
+    promise_and_100k => promise_and_100k,
+    promise_and_100k_on_1k_and => promise_and_100k_on_1k_and,
+    promise_return_100k => promise_return_100k,
+    data_producer_10b => data_producer_10b,
+    data_producer_100kib => data_producer_100kib,
+    data_receipt_10b_1000 => data_receipt_10b_1000,
+    data_receipt_100kib_1000 => data_receipt_100kib_1000
+        };
 
     // Measure the speed of all extern function calls.
-    testbed = measure_function("call_fixture10", 10, &mut m, testbed, &ad, &mut nonces, &config);
-    for (method_name, n) in &[
-        ("factorization", 10000),
-        ("call_input", 20),
-        ("call_input_register_len", 20),
-        ("call_input_read_register", 20),
-        ("call_current_account_id", 100),
-        ("call_signer_account_id", 100),
-        ("call_signer_account_pk", 100),
-        ("call_predecessor_account_id", 100),
-        ("call_block_index", 100),
-        ("call_storage_usage", 100),
-        ("call_account_balance", 100),
-        ("call_attached_deposit", 100),
-        ("call_prepaid_gas", 100),
-        ("call_used_gas", 100),
-        ("call_random_seed", 100),
-        ("call_sha256", 20),
-        ("call_value_return", 20),
-        ("call_log_utf8", 20),
-        ("call_log_utf16", 20),
-        ("call_promise_batch_create", 100),
-        ("call_promise_batch_create_promise_batch_then", 100),
-        ("call_promise_batch_create_promise_batch_action_create_account", 20),
-        ("call_promise_batch_create_promise_batch_action_create_account_batch_action_deploy_contract", 20),
-        ("call_promise_results_count", 100),
-        ("call_promise_batch_create_promise_return", 20),
-        ("call_storage_write", 20),
-        ("call_storage_read", 20),
-        ("call_storage_remove", 20),
-        ("call_storage_has_key", 20),
-        ("call_storage_iter_prefix", 20),
-        ("call_storage_iter_range", 20),
-        ("call_storage_iter_next", 20),
-    ] {
-        testbed = measure_function(method_name, *n, &mut m, testbed, &ad, &mut nonces, &config);
+    for (metric, method_name) in v {
+        testbed = measure_function(
+            metric,
+            method_name,
+            &mut m,
+            testbed,
+            &ad,
+            &mut nonces,
+            &config,
+            false,
+            vec![],
+        );
     }
 
-    let mut csv_path = PathBuf::from(&config.state_dump_path);
-    csv_path.push("./metrics.csv");
-    m.save_to_csv(csv_path.as_path());
+    get_runtime_config(&m)
 
-    m.plot(PathBuf::from(&config.state_dump_path).as_path());
+    //    let mut csv_path = PathBuf::from(&config.state_dump_path);
+    //    csv_path.push("./metrics.csv");
+    //    m.save_to_csv(csv_path.as_path());
+    //
+    //    m.plot(PathBuf::from(&config.state_dump_path).as_path());
 }
 
-fn create_args(n: usize, blob_size: usize) -> Vec<u8> {
-    let mut res = vec![];
-    res.extend_from_slice(&(n as u64).to_le_bytes());
-    let blob: Vec<u8> = rand::thread_rng().sample_iter(Standard).take(blob_size).collect();
-    let blob: Vec<u8> = blob.into_iter().map(|x| x % (b'z' - b'a' + 1) + b'a').collect();
-    res.extend(blob);
-    res
+/// Converts time of a certain action to a fee, spliting it evenly between send and execution fee.
+fn f64_to_fee(value: f64) -> Fee {
+    let value = if value >= 0f64 { value } else { 0f64 };
+    let value: u64 = (value * GAS_IN_NANOS) as u64;
+    Fee { send_sir: value / 2, send_not_sir: value / 2, execution: value / 2 }
 }
 
-fn measure_function(
-    method_name: &'static str,
-    n: usize,
-    measurements: &mut Measurements,
-    mut testbed: RuntimeTestbed,
-    accounts_deployed: &[usize],
-    nonces: &mut HashMap<usize, u64>,
-    config: &Config,
-) -> RuntimeTestbed {
-    for blob_size in &[10, 10000] {
-        testbed = measure_function_with_blob_size(
-            method_name,
-            n,
-            *blob_size as _,
-            measurements,
-            testbed,
-            accounts_deployed,
-            nonces,
-            config,
-        );
+fn f64_to_gas(value: f64) -> u64 {
+    let value = if value >= 0f64 { value } else { 0f64 };
+    (value * GAS_IN_NANOS) as u64
+}
+
+fn get_runtime_fees_config(measurement: &Measurements) -> RuntimeFeesConfig {
+    use crate::runtime_fees_generator::ReceiptFeesFloat::*;
+    let generator = RuntimeFeesGenerator::new(measurement);
+    let pure = generator.compute();
+    RuntimeFeesConfig {
+        action_receipt_creation_config: f64_to_fee(pure[&ActionReceiptCreation]),
+        data_receipt_creation_config: DataReceiptCreationConfig {
+            base_cost: f64_to_fee(pure[&DataReceiptCreationBase]),
+            cost_per_byte: f64_to_fee(pure[&DataReceiptCreationPerByte]),
+        },
+        action_creation_config: ActionCreationConfig {
+            create_account_cost: f64_to_fee(pure[&ActionCreateAccount]),
+            deploy_contract_cost: f64_to_fee(pure[&ActionDeployContractBase]),
+            deploy_contract_cost_per_byte: f64_to_fee(pure[&ActionDeployContractPerByte]),
+            function_call_cost: f64_to_fee(pure[&ActionFunctionCallBase]),
+            function_call_cost_per_byte: f64_to_fee(pure[&ActionFunctionCallPerByte]),
+            transfer_cost: f64_to_fee(pure[&ActionTransfer]),
+            stake_cost: f64_to_fee(pure[&ActionStake]),
+            add_key_cost: AccessKeyCreationConfig {
+                full_access_cost: f64_to_fee(pure[&ActionAddFullAccessKey]),
+                function_call_cost: f64_to_fee(pure[&ActionAddFunctionAccessKeyBase]),
+                function_call_cost_per_byte: f64_to_fee(pure[&ActionAddFunctionAccessKeyPerByte]),
+            },
+            delete_key_cost: f64_to_fee(pure[&ActionDeleteKey]),
+            delete_account_cost: f64_to_fee(pure[&ActionDeleteAccount]),
+        },
+        storage_usage_config: StorageUsageConfig {
+            account_cost: 0,
+            data_record_cost: 0,
+            key_cost_per_byte: 0,
+            value_cost_per_byte: 0,
+            code_cost_per_byte: 0,
+        },
+        burnt_gas_reward: Fraction { numerator: 1, denominator: 3 },
     }
-    testbed
 }
 
-fn measure_function_with_blob_size(
-    method_name: &'static str,
-    n: usize,
-    blob_size: usize,
-    measurements: &mut Measurements,
-    testbed: RuntimeTestbed,
-    accounts_deployed: &[usize],
-    nonces: &mut HashMap<usize, u64>,
-    config: &Config,
-) -> RuntimeTestbed {
-    // Measure the speed of creating a function fixture with 1MiB input.
-    let mut f = || {
-        let account_idx = *accounts_deployed.choose(&mut rand::thread_rng()).unwrap();
-        let account_id = get_account_id(account_idx);
-        let signer = InMemorySigner::from_seed(&account_id, KeyType::ED25519, &account_id);
-        let nonce = *nonces.entry(account_idx).and_modify(|x| *x += 1).or_insert(1);
-        let function_call = Action::FunctionCall(FunctionCallAction {
-            method_name: method_name.to_string(),
-            args: create_args(n, blob_size),
-            gas: 10u64.pow(18),
-            deposit: 0,
-        });
-        SignedTransaction::from_actions(
-            nonce as u64,
-            account_id.clone(),
-            account_id.clone(),
-            &signer,
-            vec![function_call],
-            CryptoHash::default(),
-        )
-    };
-    measure_transactions(
-        method_name,
-        n,
-        Some(blob_size),
-        measurements,
-        config,
-        Some(testbed),
-        &mut f,
-    )
+fn get_ext_costs_config(measurement: &Measurements) -> ExtCostsConfig {
+    let mut generator = ExtCostsGenerator::new(measurement);
+    let pure = generator.compute();
+    use ExtCosts::*;
+    ExtCostsConfig {
+        base: f64_to_gas(pure[&base]),
+        read_memory_base: f64_to_gas(pure[&read_memory_base]),
+        read_memory_byte: f64_to_gas(pure[&read_memory_byte]),
+        write_memory_base: f64_to_gas(pure[&write_memory_base]),
+        write_memory_byte: f64_to_gas(pure[&write_memory_byte]),
+        read_register_base: f64_to_gas(pure[&read_register_base]),
+        read_register_byte: f64_to_gas(pure[&read_register_byte]),
+        write_register_base: f64_to_gas(pure[&write_register_base]),
+        write_register_byte: f64_to_gas(pure[&write_register_byte]),
+        utf8_decoding_base: f64_to_gas(pure[&utf8_decoding_base]),
+        utf8_decoding_byte: f64_to_gas(pure[&utf8_decoding_byte]),
+        utf16_decoding_base: f64_to_gas(pure[&utf16_decoding_base]),
+        utf16_decoding_byte: f64_to_gas(pure[&utf16_decoding_byte]),
+        sha256_base: f64_to_gas(pure[&sha256_base]),
+        sha256_byte: f64_to_gas(pure[&sha256_byte]),
+        log_base: f64_to_gas(pure[&log_base]),
+        log_byte: f64_to_gas(pure[&log_byte]),
+        storage_write_base: f64_to_gas(pure[&storage_write_base]),
+        storage_write_key_byte: f64_to_gas(pure[&storage_write_key_byte]),
+        storage_write_value_byte: f64_to_gas(pure[&storage_write_value_byte]),
+        storage_write_evicted_byte: f64_to_gas(pure[&storage_write_evicted_byte]),
+        storage_read_base: f64_to_gas(pure[&storage_read_base]),
+        storage_read_key_byte: f64_to_gas(pure[&storage_read_key_byte]),
+        storage_read_value_byte: f64_to_gas(pure[&storage_read_value_byte]),
+        storage_remove_base: f64_to_gas(pure[&storage_remove_base]),
+        storage_remove_key_byte: f64_to_gas(pure[&storage_remove_key_byte]),
+        storage_remove_ret_value_byte: f64_to_gas(pure[&storage_remove_ret_value_byte]),
+        storage_has_key_base: f64_to_gas(pure[&storage_has_key_base]),
+        storage_has_key_byte: f64_to_gas(pure[&storage_has_key_byte]),
+        storage_iter_create_prefix_base: f64_to_gas(pure[&storage_iter_create_prefix_base]),
+        storage_iter_create_prefix_byte: f64_to_gas(pure[&storage_iter_create_prefix_byte]),
+        storage_iter_create_range_base: f64_to_gas(pure[&storage_iter_create_range_base]),
+        storage_iter_create_from_byte: f64_to_gas(pure[&storage_iter_create_from_byte]),
+        storage_iter_create_to_byte: f64_to_gas(pure[&storage_iter_create_to_byte]),
+        storage_iter_next_base: f64_to_gas(pure[&storage_iter_next_base]),
+        storage_iter_next_key_byte: f64_to_gas(pure[&storage_iter_next_key_byte]),
+        storage_iter_next_value_byte: f64_to_gas(pure[&storage_iter_next_value_byte]),
+        // TODO: Actually compute it once our storage is complete.
+        touching_trie_node: 1,
+        promise_and_base: f64_to_gas(pure[&promise_and_base]),
+        promise_and_per_promise: f64_to_gas(pure[&promise_and_per_promise]),
+        promise_return: f64_to_gas(pure[&promise_return]),
+    }
+}
+
+fn get_vm_config(measurement: &Measurements) -> VMConfig {
+    VMConfig {
+        ext_costs: get_ext_costs_config(measurement),
+        // TODO: Figure out whether we need this fee at all. If we do what should be the memory
+        // growth cost.
+        grow_mem_cost: 1,
+        regular_op_cost: f64_to_gas(nanosec_per_op()) as u32,
+        max_gas_burnt: 10u64.pow(9),
+        max_stack_height: 32 * 1024,        // 32Kib of stack.
+        initial_memory_pages: 2u32.pow(10), // 64Mib of memory.
+        max_memory_pages: 2u32.pow(11),     // 128Mib of memory.
+        // By default registers are limited by 1GiB of memory.
+        registers_memory_limit: 2u64.pow(30),
+        // By default each register is limited by 100MiB of memory.
+        max_register_size: 2u64.pow(20) * 100,
+        // By default there is at most 100 registers.
+        max_number_registers: 100,
+        max_number_logs: 100,
+        max_log_len: 500,
+    }
+}
+
+fn get_runtime_config(measurement: &Measurements) -> RuntimeConfig {
+    RuntimeConfig {
+        transaction_costs: get_runtime_fees_config(measurement),
+        wasm_config: get_vm_config(measurement),
+        // TODO: Figure out the following values.
+        storage_cost_byte_per_block: 1,
+        poke_threshold: 60,
+        account_length_baseline_cost_per_block: 6561,
+    }
 }
diff --git a/runtime/runtime-params-estimator/src/ext_costs_generator.rs b/runtime/runtime-params-estimator/src/ext_costs_generator.rs
new file mode 100644
index 00000000000..0ba98888c8b
--- /dev/null
+++ b/runtime/runtime-params-estimator/src/ext_costs_generator.rs
@@ -0,0 +1,181 @@
+use crate::cases::Metric;
+use crate::stats::{DataStats, Measurements};
+use near_vm_logic::ExtCosts;
+use std::collections::BTreeMap;
+
+pub struct ExtCostsGenerator {
+    agg: BTreeMap<Metric, DataStats>,
+    result: BTreeMap<ExtCosts, f64>,
+}
+
+impl ExtCostsGenerator {
+    pub fn new(measurement: &Measurements) -> Self {
+        let aggregated = measurement.aggregate();
+        Self { agg: aggregated, result: Default::default() }
+    }
+
+    fn extract_value(
+        &mut self,
+        metric: Metric,
+        ext_cost: ExtCosts,
+        ignore_costs: &[ExtCosts],
+    ) -> f64 {
+        let Self { agg, result } = self;
+        let agg = &agg[&metric];
+        let mut res = agg.upper() as f64;
+        let mut multiplier = None;
+        for (k, v) in &agg.ext_costs {
+            if ignore_costs.contains(k) {
+                continue;
+            }
+            if k == &ext_cost {
+                multiplier = Some(*v);
+                continue;
+            }
+            res -= match result.get(k) {
+                Some(x) => x * (*v),
+                None => panic!(
+                    "While extracting {:?} cost from {:?} metric, {:?} cost was not computed yet",
+                    ext_cost, metric, k
+                ),
+            };
+        }
+        match multiplier {
+            Some(x) => res /= x,
+            None => panic!(
+                "While extracting {:?} cost from {:?} metric the cost was not found",
+                ext_cost, metric
+            ),
+        };
+        res
+    }
+
+    fn extract(&mut self, metric: Metric, ext_cost: ExtCosts, ignore_costs: &[ExtCosts]) {
+        let value = self.extract_value(metric, ext_cost, ignore_costs);
+        self.result.insert(ext_cost, value);
+    }
+
+    pub fn compute(&mut self) -> BTreeMap<ExtCosts, f64> {
+        self.result.clear();
+        use ExtCosts::*;
+        use Metric::*;
+        self.extract(base_1M, base, &[]);
+        self.extract(read_memory_10b_10k, read_memory_base, &[read_memory_byte]);
+        self.extract(read_memory_1Mib_10k, read_memory_byte, &[]);
+        self.extract(write_register_10b_10k, write_register_base, &[write_register_byte]);
+        self.extract(write_register_1Mib_10k, write_register_byte, &[]);
+        self.extract(read_register_10b_10k, read_register_base, &[read_register_byte]);
+        self.extract(read_register_1Mib_10k, read_register_byte, &[]);
+        self.extract(write_memory_10b_10k, write_memory_base, &[write_memory_byte]);
+        self.extract(write_memory_1Mib_10k, write_memory_byte, &[]);
+
+        self.result.insert(log_base, 0f64);
+        self.result.insert(log_byte, 0f64);
+        self.extract(utf8_log_10b_10k, utf8_decoding_base, &[utf8_decoding_byte]);
+        // Charge the maximum between non-nul-terminated and nul-terminated costs.
+        let utf8_byte = self.extract_value(utf8_log_10kib_10k, utf8_decoding_byte, &[]);
+        let nul_utf8_byte = self.extract_value(nul_utf8_log_10kib_10k, utf8_decoding_byte, &[]);
+        self.result.insert(utf8_decoding_byte, utf8_byte.max(nul_utf8_byte));
+        self.extract(utf16_log_10b_10k, utf16_decoding_base, &[utf16_decoding_byte]);
+        // Charge the maximum between non-nul-terminated and nul-terminated costs.
+        let utf16_byte = self.extract_value(utf16_log_10kib_10k, utf16_decoding_byte, &[]);
+        let nul_utf16_byte = self.extract_value(nul_utf16_log_10kib_10k, utf16_decoding_byte, &[]);
+        self.result.insert(utf16_decoding_byte, utf16_byte.max(nul_utf16_byte));
+
+        self.extract(sha256_10b_10k, sha256_base, &[sha256_byte]);
+        self.extract(sha256_10kib_10k, sha256_byte, &[]);
+
+        // TODO: Redo storage costs once we have counting of nodes and we have size peek.
+        self.extract(
+            storage_write_10b_key_10b_value_1k,
+            storage_write_base,
+            &[storage_write_key_byte, storage_write_value_byte, storage_write_evicted_byte],
+        );
+        self.extract(
+            storage_write_10kib_key_10b_value_1k,
+            storage_write_key_byte,
+            &[storage_write_value_byte, storage_write_evicted_byte],
+        );
+        self.extract(
+            storage_write_10b_key_10kib_value_1k,
+            storage_write_value_byte,
+            &[storage_write_evicted_byte],
+        );
+        self.extract(storage_write_10b_key_10kib_value_1k_evict, storage_write_evicted_byte, &[]);
+        self.extract(
+            storage_read_10b_key_10b_value_1k,
+            storage_read_base,
+            &[storage_read_key_byte, storage_read_value_byte],
+        );
+        self.extract(
+            storage_read_10kib_key_10b_value_1k,
+            storage_read_key_byte,
+            &[storage_read_value_byte],
+        );
+        self.extract(storage_read_10b_key_10kib_value_1k, storage_read_value_byte, &[]);
+        self.extract(
+            storage_remove_10b_key_10b_value_1k,
+            storage_remove_base,
+            &[storage_remove_key_byte, storage_remove_ret_value_byte],
+        );
+        self.extract(
+            storage_remove_10kib_key_10b_value_1k,
+            storage_remove_key_byte,
+            &[storage_remove_ret_value_byte],
+        );
+        self.extract(storage_remove_10b_key_10kib_value_1k, storage_remove_ret_value_byte, &[]);
+        self.extract(
+            storage_has_key_10b_key_10b_value_1k,
+            storage_has_key_base,
+            &[storage_has_key_byte],
+        );
+        self.extract(storage_has_key_10kib_key_10b_value_1k, storage_has_key_byte, &[]);
+
+        self.extract(
+            storage_iter_prefix_10b_1k,
+            storage_iter_create_prefix_base,
+            &[storage_iter_create_prefix_byte],
+        );
+        self.extract(storage_iter_prefix_10kib_1k, storage_iter_create_prefix_byte, &[]);
+        self.extract(
+            storage_iter_range_10b_from_10b_to_1k,
+            storage_iter_create_range_base,
+            &[storage_iter_create_from_byte, storage_iter_create_to_byte],
+        );
+        self.extract(
+            storage_iter_range_10kib_from_10b_to_1k,
+            storage_iter_create_from_byte,
+            &[storage_iter_create_to_byte],
+        );
+        self.extract(storage_iter_range_10b_from_10kib_to_1k, storage_iter_create_to_byte, &[]);
+
+        self.extract(
+            storage_next_10b_from_10b_to_1k_10b_key_10b_value,
+            storage_iter_next_base,
+            &[storage_iter_next_key_byte, storage_iter_next_value_byte],
+        );
+        self.extract(
+            storage_next_10kib_from_10b_to_1k_10b_key_10b_value,
+            storage_iter_next_key_byte,
+            &[storage_iter_next_value_byte],
+        );
+        self.extract(
+            storage_next_10b_from_10kib_to_1k_10b_key_10b_value,
+            storage_iter_next_value_byte,
+            &[],
+        );
+
+        self.extract(promise_and_100k, promise_and_base, &[promise_and_per_promise]);
+        self.extract(promise_and_100k_on_1k_and, promise_and_per_promise, &[]);
+        self.extract(promise_return_100k, promise_return, &[]);
+        self.result.clone()
+    }
+}
+impl std::fmt::Display for ExtCostsGenerator {
+    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+        for (k, v) in &self.agg {
+            writeln!(f, "{:?}\t\t\t\t{}", k, v)?;
+        }
+        Ok(())
+    }
+}
diff --git a/runtime/runtime-params-estimator/src/lib.rs b/runtime/runtime-params-estimator/src/lib.rs
index 81d7ebae3b8..29f441fffd9 100644
--- a/runtime/runtime-params-estimator/src/lib.rs
+++ b/runtime/runtime-params-estimator/src/lib.rs
@@ -1,5 +1,11 @@
 // Lists all cases that we want to measure.
 pub mod cases;
+// Generates runtime fees from the measurements.
+pub mod runtime_fees_generator;
+// Generates external costs from the measurements.
+pub mod ext_costs_generator;
+// Runs Wasmer on the given contract and measures the time it takes to do a single operation.
+pub mod wasmer_estimator;
 // Collects and processes stats. Prints them on display, plots them, writes them into a file.
 pub mod stats;
 // Encapsulates the runtime so that it can be run separately from the rest of the node.
diff --git a/runtime/runtime-params-estimator/src/main.rs b/runtime/runtime-params-estimator/src/main.rs
index 74f7f833882..b369b445683 100644
--- a/runtime/runtime-params-estimator/src/main.rs
+++ b/runtime/runtime-params-estimator/src/main.rs
@@ -2,6 +2,9 @@ use clap::{App, Arg};
 use near::get_default_home;
 use runtime_params_estimator::cases::run;
 use runtime_params_estimator::testbed_runners::Config;
+use std::fs::File;
+use std::io::Write;
+use std::path::Path;
 
 fn main() {
     let default_home = get_default_home();
@@ -37,38 +40,28 @@ fn main() {
                 .takes_value(true)
                 .help("How many accounts were generated with `genesis-populate`."),
         )
-        .arg(
-            Arg::with_name("smallest-block-size-pow2")
-                .long("smallest-block-size-pow2")
-                .default_value("4")
-                .required(true)
-                .takes_value(true)
-                .help("Smallest size of the block expressed as power of 2."),
-        )
-        .arg(
-            Arg::with_name("largest-block-size-pow2")
-                .long("largest-block-size-pow2")
-                .default_value("11")
-                .required(true)
-                .takes_value(true)
-                .help("Largest size of the block expressed as power of 2."),
-        )
         .get_matches();
 
     let state_dump_path = matches.value_of("home").unwrap().to_string();
     let warmup_iters_per_block = matches.value_of("warmup-iters").unwrap().parse().unwrap();
     let iter_per_block = matches.value_of("iters").unwrap().parse().unwrap();
     let active_accounts = matches.value_of("accounts-num").unwrap().parse().unwrap();
-    let smallest_block_size_pow2 =
-        matches.value_of("smallest-block-size-pow2").unwrap().parse().unwrap();
-    let largest_block_size_pow2 =
-        matches.value_of("largest-block-size-pow2").unwrap().parse().unwrap();
-    run(Config {
+    let runtime_config = run(Config {
         warmup_iters_per_block,
         iter_per_block,
         active_accounts,
-        smallest_block_size_pow2,
-        largest_block_size_pow2,
-        state_dump_path,
+        block_sizes: vec![],
+        state_dump_path: state_dump_path.clone(),
     });
+
+    println!("Generated RuntimeConfig:");
+    println!("{:#?}", runtime_config);
+
+    let str = serde_json::to_string_pretty(&runtime_config)
+        .expect("Failed serializing the runtime config");
+    let mut file = File::create(Path::new(&state_dump_path).join("runtime_config.json"))
+        .expect("Failed to create file");
+    if let Err(err) = file.write_all(str.as_bytes()) {
+        panic!("Failed to write runtime config to file {}", err);
+    }
 }
diff --git a/runtime/runtime-params-estimator/src/runtime_fees_generator.rs b/runtime/runtime-params-estimator/src/runtime_fees_generator.rs
new file mode 100644
index 00000000000..e37b39d3c10
--- /dev/null
+++ b/runtime/runtime-params-estimator/src/runtime_fees_generator.rs
@@ -0,0 +1,128 @@
+use crate::cases::Metric;
+use crate::stats::{DataStats, Measurements};
+use std::collections::BTreeMap;
+
+pub struct RuntimeFeesGenerator {
+    aggregated: BTreeMap<Metric, DataStats>,
+}
+
+/// Fees for receipts and actions expressed in micros as floats.
+#[derive(Clone, Copy, Hash, PartialEq, Eq, Debug, PartialOrd, Ord)]
+pub enum ReceiptFeesFloat {
+    ActionReceiptCreation,
+    DataReceiptCreationBase,
+    DataReceiptCreationPerByte,
+    ActionCreateAccount,
+    ActionDeployContractBase,
+    ActionDeployContractPerByte,
+    ActionFunctionCallBase,
+    ActionFunctionCallPerByte,
+    ActionTransfer,
+    ActionStake,
+    ActionAddFullAccessKey,
+    ActionAddFunctionAccessKeyBase,
+    ActionAddFunctionAccessKeyPerByte,
+    ActionDeleteKey,
+    ActionDeleteAccount,
+}
+
+impl RuntimeFeesGenerator {
+    pub fn new(measurement: &Measurements) -> Self {
+        let aggregated = measurement.aggregate();
+        Self { aggregated }
+    }
+
+    /// Compute fees for receipts and actions in microseconds as floats.
+    pub fn compute(&self) -> BTreeMap<ReceiptFeesFloat, f64> {
+        let mut res: BTreeMap<ReceiptFeesFloat, f64> = Default::default();
+        res.insert(
+            ReceiptFeesFloat::ActionReceiptCreation,
+            self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::DataReceiptCreationBase,
+            self.aggregated[&Metric::data_receipt_10b_1000].upper() as f64 / 1000f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::DataReceiptCreationPerByte,
+            self.aggregated[&Metric::data_receipt_100kib_1000].upper() as f64
+                / (1000f64 * 100f64 * 1024f64),
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionCreateAccount,
+            self.aggregated[&Metric::ActionCreateAccount].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionDeployContractBase,
+            // TODO: This is a base cost, so we should not be charging for bytes here.
+            // We ignore the fact that this includes 10K contract.
+            self.aggregated[&Metric::ActionDeploy10K].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionDeployContractPerByte,
+            (self.aggregated[&Metric::ActionDeploy1M].upper() as f64
+                - self.aggregated[&Metric::ActionDeploy100K].upper() as f64)
+                / (1024f64 * 1024f64 - 100f64 * 1024f64),
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionFunctionCallBase,
+            (self.aggregated[&Metric::noop].upper() - self.aggregated[&Metric::Receipt].upper())
+                as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionFunctionCallPerByte,
+            (self.aggregated[&Metric::noop_1MiB].upper() - self.aggregated[&Metric::noop].upper())
+                as f64
+                / (1024f64 * 1024f64),
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionTransfer,
+            self.aggregated[&Metric::ActionTransfer].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionStake,
+            self.aggregated[&Metric::ActionStake].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionAddFullAccessKey,
+            self.aggregated[&Metric::ActionAddFullAccessKey].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionAddFunctionAccessKeyBase,
+            self.aggregated[&Metric::ActionAddFunctionAccessKey1Method].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionAddFunctionAccessKeyPerByte,
+            // These are 1k methods each 10bytes long.
+            (self.aggregated[&Metric::ActionAddFunctionAccessKey1000Methods].upper() as f64
+                - self.aggregated[&Metric::ActionAddFunctionAccessKey1Method].upper() as f64)
+                / (1000f64 * 10f64),
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionDeleteKey,
+            self.aggregated[&Metric::ActionDeleteAccessKey].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res.insert(
+            ReceiptFeesFloat::ActionDeleteAccount,
+            self.aggregated[&Metric::ActionDeleteAccount].upper() as f64
+                - self.aggregated[&Metric::Receipt].upper() as f64,
+        );
+        res
+    }
+}
+
+impl std::fmt::Display for RuntimeFeesGenerator {
+    fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
+        for (k, v) in self.compute() {
+            writeln!(f, "{:?}\t\t\t\t{}", k, v)?;
+        }
+        Ok(())
+    }
+}
diff --git a/runtime/runtime-params-estimator/src/stats.rs b/runtime/runtime-params-estimator/src/stats.rs
index b386512e636..ce77e1d5f4a 100644
--- a/runtime/runtime-params-estimator/src/stats.rs
+++ b/runtime/runtime-params-estimator/src/stats.rs
@@ -1,29 +1,15 @@
+use crate::cases::Metric;
 use gnuplot::{AxesCommon, Caption, Color, DotDotDash, Figure, Graph, LineStyle, PointSymbol};
-use std::collections::BTreeMap;
+use near_vm_logic::ExtCosts;
+use rand::Rng;
+use std::collections::{BTreeMap, HashMap};
 use std::path::Path;
 use std::time::Duration;
 
-/// A single measurement data point -- we ran a block that performed a certain operation multiple
-/// times by processing multiple transactions, also a single transaction might have performed the
-/// same operation multiple times.
-pub struct DataPoint {
-    /// The name of the metric that we are measuring.
-    metric_name: &'static str,
-    /// What is the block size in terms of number of transactions per block (excluding receipts from
-    /// the previous blocks).
-    block_size: usize,
-    /// How much time did it take to process this block.
-    block_duration: Duration,
-    /// How many times this operation was repeated within a single transaction.
-    operation_repetitions: usize,
-    /// If operation is parametrized how much did we try to load this operation in bytes?
-    operation_load: Option<usize>,
-}
-
 /// Stores measurements per block.
-#[derive(Default)]
+#[derive(Default, Clone)]
 pub struct Measurements {
-    data: Vec<DataPoint>,
+    data: BTreeMap<Metric, Vec<(usize, Duration, HashMap<ExtCosts, u64>)>>,
 }
 
 impl Measurements {
@@ -33,47 +19,29 @@ impl Measurements {
 
     pub fn record_measurement(
         &mut self,
-        metric_name: &'static str,
+        metric: Metric,
         block_size: usize,
         block_duration: Duration,
-        operation_repetitions: usize,
-        operation_load: Option<usize>,
     ) {
-        self.data.push(DataPoint {
-            metric_name,
+        let ext_costs = node_runtime::EXT_COSTS_COUNTER
+            .with(|f| f.borrow_mut().drain().collect::<HashMap<_, _>>());
+        self.data.entry(metric).or_insert_with(Vec::new).push((
             block_size,
             block_duration,
-            operation_repetitions,
-            operation_load,
-        });
+            ext_costs,
+        ));
     }
 
-    /// Groups measurements into stats by:
-    /// `metric_name`, `operation_repetitions`, `operation_load`.
-    pub fn group(&self) -> Vec<(&'static str, usize, Option<usize>, DataStats)> {
-        let mut grouped: BTreeMap<(&'static str, usize, Option<usize>), Vec<u128>> =
-            Default::default();
-        for point in &self.data {
-            grouped
-                .entry((point.metric_name, point.operation_repetitions, point.operation_load))
-                .or_insert_with(Vec::new)
-                .push(point.block_duration.as_nanos() / point.block_size as u128);
-        }
-        grouped
-            .into_iter()
-            .map(|((metric_name, operation_repetitions, operation_load), v)| {
-                (metric_name, operation_repetitions, operation_load, DataStats::from_nanos(v))
-            })
+    pub fn aggregate(&self) -> BTreeMap<Metric, DataStats> {
+        self.data
+            .iter()
+            .map(|(metric, measurements)| (metric.clone(), DataStats::aggregate(measurements)))
             .collect()
     }
 
     pub fn print(&self) {
-        println!("metrics_name\t\toperation_repetitions\t\toperation_load\t\tstats");
-        for (metric_name, operation_repetitions, operation_load, stats) in self.group() {
-            println!(
-                "{}\t\t{}\t\t{:?}\t\t{}",
-                metric_name, operation_repetitions, operation_load, stats
-            );
+        for (metric, stats) in self.aggregate() {
+            println!("{:?}\t\t\t\t{}", metric, stats);
         }
     }
 
@@ -81,21 +49,17 @@ impl Measurements {
         let mut writer = csv::Writer::from_path(path).unwrap();
         writer
             .write_record(&[
-                "metric_name",
-                "operation_repetitions",
-                "operation_load",
+                "metric",
                 "mean_micros",
                 "stddev_micros",
                 "5ile_micros",
                 "95ile_micros",
             ])
             .unwrap();
-        for (metric_name, operation_repetitions, operation_load, stats) in self.group() {
+        for (metric, stats) in self.aggregate() {
             writer
                 .write_record(&[
-                    format!("{}", metric_name),
-                    format!("{}", operation_repetitions),
-                    format!("{:?}", operation_load),
+                    format!("{:?}", metric),
                     format!("{}", stats.mean.as_micros()),
                     format!("{}", stats.stddev.as_micros()),
                     format!("{}", stats.ile5.as_micros()),
@@ -107,112 +71,117 @@ impl Measurements {
     }
 
     pub fn plot(&self, path: &Path) {
-        // metric_name -> (operation_repetitions, operation_size -> [block_size -> Vec<block duration>])
-        let mut grouped_by_metric: BTreeMap<
-            &'static str,
-            BTreeMap<(usize, Option<usize>), BTreeMap<usize, Vec<Duration>>>,
-        > = Default::default();
-
-        for point in &self.data {
-            grouped_by_metric
-                .entry(point.metric_name)
-                .or_insert_with(Default::default)
-                .entry((point.operation_repetitions, point.operation_load))
-                .or_insert_with(Default::default)
-                .entry(point.block_size)
-                .or_insert_with(Default::default)
-                .push(point.block_duration);
-        }
-
-        // Different metrics are displayed with different graph windows.
-
-        for (metric_name, data) in grouped_by_metric {
-            const COLORS: &[&str] = &["red", "orange", "green", "blue", "violet"];
+        // Different metrics are displayed with different colors.
+        let mut fg = Figure::new();
+        let axes = fg
+            .axes2d()
+            .set_title("Metrics in micros", &[])
+            .set_legend(Graph(0.5), Graph(0.9), &[], &[])
+            .set_x_label("Block size", &[])
+            .set_y_label("Duration micros", &[])
+            .set_grid_options(true, &[LineStyle(DotDotDash), Color("black")])
+            .set_x_log(Some(2.0))
+            .set_x_grid(true)
+            .set_y_log(Some(2.0))
+            .set_y_grid(true);
+
+        for (i, (metric, data)) in self.data.iter().enumerate() {
             const POINTS: &[char] = &['o', 'x', '*', 's', 't', 'd', 'r'];
-
-            let mut fg = Figure::new();
-            let axes = fg
-                .axes2d()
-                .set_title(metric_name, &[])
-                .set_legend(Graph(0.5), Graph(0.9), &[], &[])
-                .set_x_label("Block size", &[])
-                .set_y_label("Duration micros", &[])
-                .set_grid_options(true, &[LineStyle(DotDotDash), Color("black")])
-                .set_x_log(Some(2.0))
-                .set_x_grid(true)
-                .set_y_log(Some(2.0))
-                .set_y_grid(true);
-
-            for (i, ((operation_repetitions, operation_load), points)) in
-                data.into_iter().enumerate()
-            {
-                let line_caption = if let Some(operation_load) = operation_load {
-                    format!("{}b x {}", operation_load, operation_repetitions)
-                } else {
-                    format!("x {}", operation_repetitions)
-                };
-                let mut xs = vec![];
-                let mut ys = vec![];
-                let mut mean_xs = vec![];
-                let mut mean_ys = vec![];
-                for (block_size, durations) in points {
-                    for duration in &durations {
-                        xs.push(block_size as u64);
-                        ys.push(duration.as_micros() as u64 / block_size as u64);
-                    }
-                    mean_xs.push(block_size as u64);
-                    mean_ys.push(
-                        durations.iter().map(|d| d.as_micros() as u64).sum::<u64>()
-                            / durations.len() as u64
-                            / block_size as u64,
-                    );
-                }
-                axes.points(
-                    xs.as_slice(),
-                    ys.as_slice(),
-                    &[Color(COLORS[i % COLORS.len()]), PointSymbol(POINTS[i % POINTS.len()])],
-                )
-                .lines_points(
-                    mean_xs.as_slice(),
-                    mean_ys.as_slice(),
-                    &[
-                        Color(COLORS[i % COLORS.len()]),
-                        PointSymbol('.'),
-                        Caption(line_caption.as_str()),
-                    ],
-                );
+            let marker = POINTS[i % POINTS.len()];
+
+            let (xs, ys): (Vec<_>, Vec<_>) = data
+                .iter()
+                .cloned()
+                .map(|(block_size, block_duration, _)| {
+                    (block_size as u64, (block_duration.as_micros() / block_size as u128) as u64)
+                })
+                .unzip();
+
+            // Aggregate per block size.
+            let mut aggregate: BTreeMap<usize, Vec<u64>> = Default::default();
+            for (x, y) in xs.iter().zip(ys.iter()) {
+                aggregate.entry(*x as usize).or_insert_with(Vec::new).push(*y);
             }
-            let mut buf = path.to_path_buf();
-            buf.push(format!("{}.svg", metric_name));
-            fg.save_to_svg(buf.to_str().unwrap(), 800, 800).unwrap();
+            let (mean_xs, mean_ys): (Vec<_>, Vec<_>) = aggregate
+                .into_iter()
+                .map(|(x, ys)| (x, (ys.iter().sum::<u64>() as u64) / (ys.len() as u64)))
+                .unzip();
+
+            let metric_name = format!("{:?}", metric);
+            let color = random_color();
+            axes.points(
+                xs.as_slice(),
+                ys.as_slice(),
+                &[Color(color.as_str()), PointSymbol(marker)],
+            )
+            .lines_points(
+                mean_xs.as_slice(),
+                mean_ys.as_slice(),
+                &[Color(color.as_str()), PointSymbol('.'), Caption(metric_name.as_str())],
+            );
         }
+        fg.save_to_svg(path.join("metrics.svg").to_str().unwrap(), 800, 800).unwrap();
     }
 }
 
+pub fn random_color() -> String {
+    let res = (0..3)
+        .map(|_| {
+            let b = rand::thread_rng().gen::<u8>();
+            format!("{:02X}", b)
+        })
+        .collect::<Vec<_>>()
+        .join("");
+    format!("#{}", res).to_uppercase()
+}
+
 pub struct DataStats {
     pub mean: Duration,
     pub stddev: Duration,
     pub ile5: Duration,
     pub ile95: Duration,
+    pub ext_costs: BTreeMap<ExtCosts, f64>,
 }
 
 impl DataStats {
-    pub fn from_nanos(mut nanos: Vec<u128>) -> Self {
+    pub fn aggregate(un_aggregated: &Vec<(usize, Duration, HashMap<ExtCosts, u64>)>) -> Self {
+        let mut nanos = un_aggregated
+            .iter()
+            .map(|(block_size, duration, _)| duration.as_nanos() / *block_size as u128)
+            .collect::<Vec<_>>();
         nanos.sort();
         let mean = (nanos.iter().sum::<u128>() / (nanos.len() as u128)) as i128;
         let stddev2 = nanos.iter().map(|x| (*x as i128 - mean) * (*x as i128 - mean)).sum::<i128>()
-            / (nanos.len() as i128 - 1);
+            / if nanos.len() > 1 { nanos.len() as i128 - 1 } else { 1 };
         let stddev = (stddev2 as f64).sqrt() as u128;
         let ile5 = nanos[nanos.len() * 5 / 100];
         let ile95 = nanos[nanos.len() * 95 / 100];
 
+        let mut ext_costs: BTreeMap<ExtCosts, f64> = BTreeMap::new();
+        let mut div: BTreeMap<ExtCosts, u64> = BTreeMap::new();
+        for (block_size, _, un_aggregated_ext_costs) in un_aggregated {
+            for (ext_cost, count) in un_aggregated_ext_costs {
+                *ext_costs.entry(*ext_cost).or_default() += *count as f64;
+                *div.entry(*ext_cost).or_default() += *block_size as u64;
+            }
+        }
+        for (k, v) in div {
+            *ext_costs.get_mut(&k).unwrap() /= v as f64;
+        }
+
         Self {
             mean: Duration::from_nanos(mean as u64),
             stddev: Duration::from_nanos(stddev as u64),
             ile5: Duration::from_nanos(ile5 as u64),
             ile95: Duration::from_nanos(ile95 as u64),
+            ext_costs,
         }
     }
+
+    /// Get mean + 4*sigma in micros
+    pub fn upper(&self) -> u128 {
+        self.mean.as_nanos() + 4u128 * self.stddev.as_nanos()
+    }
 }
 
 impl std::fmt::Display for DataStats {
@@ -225,7 +194,7 @@ impl std::fmt::Display for DataStats {
                 self.stddev.as_secs(),
                 self.ile5.as_secs(),
                 self.ile95.as_secs()
-            )
+            )?;
         } else if self.mean.as_millis() > 100 {
             write!(
                 f,
@@ -234,7 +203,7 @@ impl std::fmt::Display for DataStats {
                 self.stddev.as_millis(),
                 self.ile5.as_millis(),
                 self.ile95.as_millis()
-            )
+            )?;
         } else if self.mean.as_micros() > 100 {
             write!(
                 f,
@@ -243,7 +212,7 @@ impl std::fmt::Display for DataStats {
                 self.stddev.as_micros(),
                 self.ile5.as_micros(),
                 self.ile95.as_micros()
-            )
+            )?;
         } else {
             write!(
                 f,
@@ -252,7 +221,11 @@ impl std::fmt::Display for DataStats {
                 self.stddev.as_nanos(),
                 self.ile5.as_nanos(),
                 self.ile95.as_nanos()
-            )
+            )?;
+        }
+        for (ext_cost, cnt) in &self.ext_costs {
+            write!(f, " {:?}=>{:.2}", ext_cost, cnt)?;
         }
+        Ok(())
     }
 }
diff --git a/runtime/runtime-params-estimator/src/testbed.rs b/runtime/runtime-params-estimator/src/testbed.rs
index 81f2731df89..6238d3c1956 100644
--- a/runtime/runtime-params-estimator/src/testbed.rs
+++ b/runtime/runtime-params-estimator/src/testbed.rs
@@ -2,7 +2,7 @@ use borsh::BorshDeserialize;
 use near::get_store_path;
 use near_primitives::receipt::Receipt;
 use near_primitives::transaction::{ExecutionStatus, SignedTransaction};
-use near_primitives::types::MerkleHash;
+use near_primitives::types::{Gas, MerkleHash, StateRoot};
 use near_store::{create_store, Trie, COL_STATE};
 use node_runtime::config::RuntimeConfig;
 use node_runtime::{ApplyState, Runtime};
@@ -43,13 +43,18 @@ impl RuntimeTestbed {
         let mut file = File::open(roots_files).expect("Failed to open genesis roots file.");
         let mut data = vec![];
         file.read_to_end(&mut data).expect("Failed to read genesis roots file.");
-        let mut state_roots: Vec<MerkleHash> =
+        let state_roots: Vec<StateRoot> =
             BorshDeserialize::try_from_slice(&data).expect("Failed to deserialize genesis roots");
         assert!(state_roots.len() <= 1, "Parameter estimation works with one shard only.");
         assert!(!state_roots.is_empty(), "No state roots found.");
-        let root = state_roots.pop().unwrap();
+        let root = state_roots[0];
 
-        let runtime_config = RuntimeConfig::default();
+        let mut runtime_config = RuntimeConfig::default();
+        runtime_config.wasm_config.max_log_len = std::u64::MAX;
+        runtime_config.wasm_config.max_number_registers = std::u64::MAX;
+        runtime_config.wasm_config.max_gas_burnt = std::u64::MAX;
+        runtime_config.wasm_config.max_register_size = std::u64::MAX;
+        runtime_config.wasm_config.max_number_logs = std::u64::MAX;
         let runtime = Runtime::new(runtime_config);
         let prev_receipts = vec![];
 
@@ -60,11 +65,16 @@ impl RuntimeTestbed {
             epoch_length: 4,
             gas_price: 1,
             block_timestamp: 0,
+            gas_limit: None,
         };
         Self { workdir, trie, root, runtime, prev_receipts, apply_state }
     }
 
-    pub fn process_block(&mut self, transactions: &[SignedTransaction], allow_failures: bool) {
+    pub fn process_block(
+        &mut self,
+        transactions: &[SignedTransaction],
+        allow_failures: bool,
+    ) -> Gas {
         let apply_result = self
             .runtime
             .apply(
@@ -83,8 +93,10 @@ impl RuntimeTestbed {
         store_update.commit().unwrap();
         self.apply_state.block_index += 1;
 
+        let mut total_burnt_gas = 0;
         if !allow_failures {
             for outcome in &apply_result.outcomes {
+                total_burnt_gas += outcome.outcome.gas_burnt;
                 match &outcome.outcome.status {
                     ExecutionStatus::Failure(e) => panic!("Execution failed {:#?}", e),
                     _ => (),
@@ -92,5 +104,12 @@ impl RuntimeTestbed {
             }
         }
         self.prev_receipts = apply_result.new_receipts;
+        total_burnt_gas
+    }
+
+    pub fn process_blocks_until_no_receipts(&mut self, allow_failures: bool) {
+        while !self.prev_receipts.is_empty() {
+            self.process_block(&[], allow_failures);
+        }
     }
 }
diff --git a/runtime/runtime-params-estimator/src/testbed_runners.rs b/runtime/runtime-params-estimator/src/testbed_runners.rs
index 41f2db303ea..833c26273d2 100644
--- a/runtime/runtime-params-estimator/src/testbed_runners.rs
+++ b/runtime/runtime-params-estimator/src/testbed_runners.rs
@@ -1,3 +1,4 @@
+use crate::cases::Metric;
 use crate::stats::Measurements;
 use crate::testbed::RuntimeTestbed;
 use indicatif::{ProgressBar, ProgressStyle};
@@ -14,20 +15,13 @@ pub fn get_account_id(account_index: usize) -> String {
     format!("near_{}_{}", account_index, account_index)
 }
 
-/// Block sizes that we are going to try running with.
-fn block_sizes(config: &Config) -> Vec<usize> {
-    (config.smallest_block_size_pow2..=config.largest_block_size_pow2)
-        .map(|x| 2usize.pow(x))
-        .collect()
-}
-
 /// Total number of transactions that we need to prepare.
 fn total_transactions(config: &Config) -> usize {
-    block_sizes(config).iter().sum::<usize>() * config.iter_per_block
+    config.block_sizes.iter().sum::<usize>() * config.iter_per_block
 }
 
 fn warmup_total_transactions(config: &Config) -> usize {
-    block_sizes(config).iter().sum::<usize>() * config.warmup_iters_per_block
+    config.block_sizes.iter().sum::<usize>() * config.warmup_iters_per_block
 }
 
 /// Configuration which we use to run measurements.
@@ -39,19 +33,15 @@ pub struct Config {
     pub iter_per_block: usize,
     /// Total active accounts.
     pub active_accounts: usize,
-    /// Smallest size of the block expressed as power of 2.
-    pub smallest_block_size_pow2: u32,
-    /// Largest size of the block expressed as power of 2.
-    pub largest_block_size_pow2: u32,
+    /// Number of the transactions in the block.
+    pub block_sizes: Vec<usize>,
     /// Where state dump is located in case we need to create a testbed.
     pub state_dump_path: String,
 }
 
 /// Measure the speed of transactions containing certain simple actions.
 pub fn measure_actions(
-    name: &'static str,
-    operation_repetitions: usize,
-    operation_load: Option<usize>,
+    metric: Metric,
     measurements: &mut Measurements,
     config: &Config,
     testbed: Option<RuntimeTestbed>,
@@ -96,39 +86,30 @@ pub fn measure_actions(
             CryptoHash::default(),
         )
     };
-    measure_transactions(
-        name,
-        operation_repetitions,
-        operation_load,
-        measurements,
-        config,
-        testbed,
-        &mut f,
-    )
+    measure_transactions(metric, measurements, config, testbed, &mut f, false)
 }
 
 /// Measure the speed of the transactions, given a transactions-generator function.
 /// Returns testbed so that it can be reused.
 pub fn measure_transactions<F>(
-    name: &'static str,
-    operation_repetitions: usize,
-    operation_load: Option<usize>,
+    metric: Metric,
     measurements: &mut Measurements,
     config: &Config,
     testbed: Option<RuntimeTestbed>,
     f: &mut F,
+    allow_failures: bool,
 ) -> RuntimeTestbed
 where
     F: FnMut() -> SignedTransaction,
 {
     let mut testbed = match testbed {
         Some(x) => {
-            println!("{}. Reusing testbed.", name);
+            println!("{:?}. Reusing testbed.", metric);
             x
         }
         None => {
             let path = PathBuf::from(config.state_dump_path.as_str());
-            println!("{}. Preparing testbed. Loading state.", name);
+            println!("{:?}. Preparing testbed. Loading state.", metric);
             RuntimeTestbed::from_state_dump(&path)
         }
     };
@@ -137,10 +118,10 @@ where
     bar.set_style(ProgressStyle::default_bar().template(
         "[elapsed {elapsed_precise} remaining {eta_precise}] Warm up {bar} {pos:>7}/{len:7} {msg}",
     ));
-    for block_size in block_sizes(config) {
+    for block_size in config.block_sizes.clone() {
         for _ in 0..config.warmup_iters_per_block {
             let block: Vec<_> = (0..block_size).map(|_| (*f)()).collect();
-            testbed.process_block(&block, false);
+            testbed.process_block(&block, allow_failures);
             bar.inc(block_size as _);
             bar.set_message(format!("Block size: {}", block_size).as_str());
         }
@@ -151,23 +132,21 @@ where
     bar.set_style(ProgressStyle::default_bar().template(
         "[elapsed {elapsed_precise} remaining {eta_precise}] Measuring {bar} {pos:>7}/{len:7} {msg}",
     ));
-    for block_size in block_sizes(config) {
+    node_runtime::EXT_COSTS_COUNTER.with(|f| {
+        f.borrow_mut().clear();
+    });
+    for block_size in config.block_sizes.clone() {
         for _ in 0..config.iter_per_block {
             let block: Vec<_> = (0..block_size).map(|_| (*f)()).collect();
             let start_time = Instant::now();
-            testbed.process_block(&block, false);
+            testbed.process_block(&block, allow_failures);
             let end_time = Instant::now();
-            measurements.record_measurement(
-                name,
-                block_size,
-                end_time - start_time,
-                operation_repetitions,
-                operation_load,
-            );
+            measurements.record_measurement(metric.clone(), block_size, end_time - start_time);
             bar.inc(block_size as _);
             bar.set_message(format!("Block size: {}", block_size).as_str());
         }
     }
+    testbed.process_blocks_until_no_receipts(allow_failures);
     bar.finish();
     measurements.print();
     testbed
diff --git a/runtime/runtime-params-estimator/src/wasmer_estimator.rs b/runtime/runtime-params-estimator/src/wasmer_estimator.rs
new file mode 100644
index 00000000000..d4e3ebe6c0a
--- /dev/null
+++ b/runtime/runtime-params-estimator/src/wasmer_estimator.rs
@@ -0,0 +1,70 @@
+use near_runtime_fees::RuntimeFeesConfig;
+use near_vm_logic::mocks::mock_external::MockedExternal;
+use near_vm_logic::{VMConfig, VMContext, VMOutcome};
+use near_vm_runner::VMError;
+use std::collections::hash_map::DefaultHasher;
+use std::hash::{Hash, Hasher};
+use std::time::Instant;
+
+const CURRENT_ACCOUNT_ID: &str = "alice";
+const SIGNER_ACCOUNT_ID: &str = "bob";
+const SIGNER_ACCOUNT_PK: [u8; 3] = [0, 1, 2];
+const PREDECESSOR_ACCOUNT_ID: &str = "carol";
+
+fn create_context(input: Vec<u8>) -> VMContext {
+    VMContext {
+        current_account_id: CURRENT_ACCOUNT_ID.to_owned(),
+        signer_account_id: SIGNER_ACCOUNT_ID.to_owned(),
+        signer_account_pk: Vec::from(&SIGNER_ACCOUNT_PK[..]),
+        predecessor_account_id: PREDECESSOR_ACCOUNT_ID.to_owned(),
+        input,
+        block_index: 10,
+        block_timestamp: 42,
+        account_balance: 2u128,
+        storage_usage: 12,
+        attached_deposit: 2u128,
+        prepaid_gas: 10_u64.pow(18),
+        random_seed: vec![0, 1, 2],
+        is_view: false,
+        output_data_receivers: vec![],
+    }
+}
+
+fn call() -> (Option<VMOutcome>, Option<VMError>) {
+    let code = include_bytes!("../test-contract/res/large_contract.wasm");
+    let mut fake_external = MockedExternal::new();
+    let context = create_context(vec![]);
+    let config = VMConfig::default();
+    let fees = RuntimeFeesConfig::default();
+
+    let promise_results = vec![];
+
+    let mut hash = DefaultHasher::new();
+    code.hash(&mut hash);
+    let code_hash = hash.finish().to_le_bytes().to_vec();
+    near_vm_runner::run(
+        code_hash,
+        code,
+        b"cpu_ram_soak_test",
+        &mut fake_external,
+        context,
+        &config,
+        &fees,
+        &promise_results,
+    )
+}
+
+const NUM_ITERATIONS: usize = 10;
+
+/// Amount of nanosec it takes to execute the most CPU demanding operation.
+pub fn nanosec_per_op() -> f64 {
+    // Call once for the warmup.
+    let (outcome, _) = call();
+    let outcome = outcome.unwrap();
+    let start = Instant::now();
+    for _ in 0..NUM_ITERATIONS {
+        call();
+    }
+    let duration = Instant::now().duration_since(start).as_nanos() as f64;
+    duration / (outcome.burnt_gas as f64 * NUM_ITERATIONS as f64)
+}
diff --git a/runtime/runtime-params-estimator/test-contract/Cargo.lock b/runtime/runtime-params-estimator/test-contract/Cargo.lock
index 7ec616155a0..a959532fb4b 100644
--- a/runtime/runtime-params-estimator/test-contract/Cargo.lock
+++ b/runtime/runtime-params-estimator/test-contract/Cargo.lock
@@ -3,3 +3,4 @@
 [[package]]
 name = "test-contract"
 version = "0.1.0"
+
diff --git a/runtime/runtime-params-estimator/test-contract/res/large_contract.wasm b/runtime/runtime-params-estimator/test-contract/res/large_contract.wasm
index 1bb3d48699d..ecb29dea04b 100755
Binary files a/runtime/runtime-params-estimator/test-contract/res/large_contract.wasm and b/runtime/runtime-params-estimator/test-contract/res/large_contract.wasm differ
diff --git a/runtime/runtime-params-estimator/test-contract/res/large_payload b/runtime/runtime-params-estimator/test-contract/res/large_payload
index 045813f75e1..f996d6b131d 100644
Binary files a/runtime/runtime-params-estimator/test-contract/res/large_payload and b/runtime/runtime-params-estimator/test-contract/res/large_payload differ
diff --git a/runtime/runtime-params-estimator/test-contract/res/medium_contract.wasm b/runtime/runtime-params-estimator/test-contract/res/medium_contract.wasm
index 2bb90241a53..96746b42ff8 100755
Binary files a/runtime/runtime-params-estimator/test-contract/res/medium_contract.wasm and b/runtime/runtime-params-estimator/test-contract/res/medium_contract.wasm differ
diff --git a/runtime/runtime-params-estimator/test-contract/res/medium_payload b/runtime/runtime-params-estimator/test-contract/res/medium_payload
index a65080fcbd5..dbbad77aed7 100644
Binary files a/runtime/runtime-params-estimator/test-contract/res/medium_payload and b/runtime/runtime-params-estimator/test-contract/res/medium_payload differ
diff --git a/runtime/runtime-params-estimator/test-contract/res/small_contract.wasm b/runtime/runtime-params-estimator/test-contract/res/small_contract.wasm
index 6102ac3f039..a90ab4aae9b 100755
Binary files a/runtime/runtime-params-estimator/test-contract/res/small_contract.wasm and b/runtime/runtime-params-estimator/test-contract/res/small_contract.wasm differ
diff --git a/runtime/runtime-params-estimator/test-contract/res/small_payload b/runtime/runtime-params-estimator/test-contract/res/small_payload
index 0aaf61d85cd..ccc07db6c01 100644
Binary files a/runtime/runtime-params-estimator/test-contract/res/small_payload and b/runtime/runtime-params-estimator/test-contract/res/small_payload differ
diff --git a/runtime/runtime-params-estimator/test-contract/src/lib.rs b/runtime/runtime-params-estimator/test-contract/src/lib.rs
index 49d7802fc31..d5f6aaad0f6 100644
--- a/runtime/runtime-params-estimator/test-contract/src/lib.rs
+++ b/runtime/runtime-params-estimator/test-contract/src/lib.rs
@@ -9,8 +9,6 @@ pub fn panic(_info: &::core::panic::PanicInfo) -> ! {
     }
 }
 
-use core::mem::size_of;
-
 #[allow(unused)]
 extern "C" {
     // #############
@@ -18,6 +16,7 @@ extern "C" {
     // #############
     fn read_register(register_id: u64, ptr: u64);
     fn register_len(register_id: u64) -> u64;
+    fn write_register(register_id: u64, data_len: u64, data_ptr: u64);
     // ###############
     // # Context API #
     // ###############
@@ -146,8 +145,8 @@ extern "C" {
     fn storage_iter_next(iterator_id: u64, key_register_id: u64, value_register_id: u64) -> u64;
 }
 
-/// This function is not doing anything useful, it is just here to make sure the payload is getting
-/// compiled into Wasm.
+// This function is not doing anything useful, it is just here to make sure the payload is getting
+// compiled into Wasm.
 #[cfg(feature = "small_payload")]
 #[no_mangle]
 pub fn payload() {
@@ -175,437 +174,560 @@ pub fn payload() {
     }
 }
 
-/// Function that does not do anything at all.
+// Function that does not do anything at all.
 #[no_mangle]
 pub fn noop() {}
 
-/// Just a CPU-heavy function that we can use to correlate gas usage with processing time.
-#[no_mangle]
-pub fn factorization() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, _) = read_u64input(&mut buffer);
-    let mut largest_prime = 1u64;
-    for i in 2..n {
-        let mut is_prime = true;
-        for k in 2..i {
-            if (i / k) as u64 * k == i {
-                is_prime = false;
-                break;
-            }
-        }
-        if is_prime {
-            largest_prime = i;
-        }
-    }
-    let largest_prime = &largest_prime;
-    unsafe {
-        value_return(size_of::<u64>() as u64, largest_prime as *const u64 as u64);
+// Function that we use to measure `base` cost by calling `block_index` many times.
+#[no_mangle]
+pub unsafe fn base_1M() {
+    for _ in 0..1_000_000 {
+        block_index();
     }
 }
 
-/// Returns:
-/// * `n: u64` -- how many times a certain operation should be repeated;
-/// * `blob: &mut [u8]` -- the blob that was also read into the buffer.
-#[inline]
-fn read_u64input(buffer: &mut [u8]) -> (u64, &mut [u8]) {
-    unsafe {
-        input(0);
-        let len = register_len(0);
-        read_register(0, buffer.as_ptr() as u64);
-        let mut data = [0u8; size_of::<u64>()];
-        data.copy_from_slice(&buffer[..size_of::<u64>()]);
-        (
-            u64::from_le_bytes(data),
-            buffer.split_at_mut(len as usize).0.split_at_mut(size_of::<u64>()).1,
-        )
+// Function to measure `read_memory_base` and `read_memory_byte` many times.
+// Reads 10b 10k times from memory.
+#[no_mangle]
+pub unsafe fn read_memory_10b_10k() {
+    let buffer = [0u8; 10];
+    for _ in 0..10_000 {
+        value_return(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
 }
 
-#[inline]
-fn return_u64(value: u64) {
-    unsafe {
-        value_return(size_of::<u64>() as u64, &value as *const u64 as u64);
+// Function to measure `read_memory_base` and `read_memory_byte` many times.
+// Reads 1Mib 10k times from memory.
+#[no_mangle]
+pub unsafe fn read_memory_1Mib_10k() {
+    let buffer = [0u8; 1024 * 1024];
+    for _ in 0..10_000 {
+        value_return(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
 }
 
-const BUFFER_SIZE: usize = 100_000;
-
-/// Call fixture 10 times.
+// Function to measure `write_memory_base` and `write_memory_byte` many times.
+// Writes 10b 10k times into memory. Includes `read_register` costs.
 #[no_mangle]
-pub fn call_fixture10() {
-    for _ in 0..10 {
-        let mut buffer = [0u8; BUFFER_SIZE];
-        let (n, _) = read_u64input(&mut buffer);
-        // Some useful stuff should be happening here.
-        return_u64(n);
+pub unsafe fn write_memory_10b_10k() {
+    let buffer = [0u8; 10];
+    write_register(0, buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
+    for _ in 0..10_000 {
+        read_register(0, buffer.as_ptr() as *const u64 as u64);
     }
 }
 
-/// Call `input` `n` times.
+// Function to measure `write_memory_base` and `write_memory_byte` many times.
+// Writes 1Mib 10k times into memory. Includes `read_register` costs.
 #[no_mangle]
-pub fn call_input() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, _) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            input(0);
-        }
+pub unsafe fn write_memory_1Mib_10k() {
+    let buffer = [0u8; 1024 * 1024];
+    write_register(0, buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
+    for _ in 0..10_000 {
+        read_register(0, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
-/// Call `input`, `register_len` `n` times.
+// Function to measure `read_register_base` and `read_register_byte` many times.
+// Reads 10b 10k times from register.
 #[no_mangle]
-pub fn call_input_register_len() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, _) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            input(0);
-            register_len(0);
-        }
+pub unsafe fn read_register_10b_10k() {
+    let buffer = [0u8; 10];
+    write_register(0, buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
+    for _ in 0..10_000 {
+        value_return(core::u64::MAX, 0);
     }
-    return_u64(n);
 }
 
-/// Call `input`, `read_register` `n` times.
+// Function to measure `read_register_base` and `read_register_byte` many times.
+// Reads 1Mib 10k times from register.
 #[no_mangle]
-pub fn call_input_read_register() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, _) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            input(0);
-            read_register(0, buffer.as_ptr() as *const u64 as u64);
-        }
+pub unsafe fn read_register_1Mib_10k() {
+    let buffer = [0u8; 1024 * 1024];
+    write_register(0, buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
+    for _ in 0..10_000 {
+        value_return(core::u64::MAX, 0);
     }
-    return_u64(n);
 }
 
-macro_rules! call_func {
-    ($exp_name:ident, $call:expr) => {
-        #[no_mangle]
-        pub fn $exp_name() {
-            let mut buffer = [0u8; BUFFER_SIZE];
-            let (n, _) = read_u64input(&mut buffer);
-            for _ in 0..n {
-                unsafe {
-                    $call;
-                }
-            }
-            return_u64(n);
-        }
-    };
+// Function to measure `write_register_base` and `write_register_byte` many times.
+// Writes 10b 10k times to register.
+#[no_mangle]
+pub unsafe fn write_register_10b_10k() {
+    let buffer = [0u8; 10];
+    for _ in 0..10_000 {
+        write_register(0, buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
+    }
 }
 
-// ###############
-// # Context API #
-// ###############
-call_func!(call_current_account_id, current_account_id(0));
-call_func!(call_signer_account_id, signer_account_id(0));
-call_func!(call_signer_account_pk, signer_account_pk(0));
-call_func!(call_predecessor_account_id, predecessor_account_id(0));
-call_func!(call_block_index, block_index());
-call_func!(call_storage_usage, storage_usage());
-
-// #################
-// # Economics API #
-// #################
-#[no_mangle]
-pub fn call_account_balance() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, _) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            account_balance(buffer.as_ptr() as *const u64 as u64);
-        }
+// Function to measure `write_register_base` and `write_register_byte` many times.
+// Writes 1Mib 10k times to register.
+#[no_mangle]
+pub unsafe fn write_register_1Mib_10k() {
+    let buffer = [0u8; 1024 * 1024];
+    for _ in 0..10_000 {
+        write_register(0, buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
+// Function to measure `utf8_decoding_base`, `utf8_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf8 10b 10k times into log.
 #[no_mangle]
-pub fn call_attached_deposit() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, _) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            attached_deposit(buffer.as_ptr() as *const u64 as u64);
-        }
+pub unsafe fn utf8_log_10b_10k() {
+    let buffer = [65u8; 10];
+    for _ in 0..10_000 {
+        log_utf8(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
-call_func!(call_prepaid_gas, prepaid_gas());
-call_func!(call_used_gas, used_gas());
 
-// ############
-// # Math API #
-// ############
-call_func!(call_random_seed, random_seed(0));
+// Function to measure `utf8_decoding_base`, `utf8_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf8 10kib 1k times into log.
 #[no_mangle]
-pub fn call_sha256() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe { sha256(blob.len() as _, blob.as_ptr() as _, 0) }
+pub unsafe fn utf8_log_10kib_10k() {
+    let buffer = [65u8; 10240];
+    for _ in 0..10_000 {
+        log_utf8(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
-// #####################
-// # Miscellaneous API #
-// #####################
+// Nul-terminated versions.
+// Function to measure `utf8_decoding_base`, `utf8_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf8 10b 10k times into log.
 #[no_mangle]
-pub fn call_value_return() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe { value_return(blob.len() as _, blob.as_ptr() as _) }
+pub unsafe fn nul_utf8_log_10b_10k() {
+    let mut buffer = [65u8; 10];
+    buffer[buffer.len() - 1] = 0;
+    for _ in 0..10_000 {
+        log_utf8(core::u64::MAX, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
+// Nul-terminated versions.
+// Function to measure `utf8_decoding_base`, `utf8_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf8 10kib 1k times into log.
 #[no_mangle]
-pub fn call_log_utf8() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        let blob = if blob.len() < 200 { &blob } else { &blob[..200] };
-        unsafe { log_utf8(blob.len() as _, blob.as_ptr() as _) }
+pub unsafe fn nul_utf8_log_10kib_10k() {
+    let mut buffer = [65u8; 10240];
+    buffer[buffer.len() - 1] = 0;
+    for _ in 0..10_000 {
+        log_utf8(core::u64::MAX, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
+// Function to measure `utf16_decoding_base`, `utf16_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf16 10b 10k times into log.
 #[no_mangle]
-pub fn call_log_utf16() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        let blob = if blob.len() < 200 { &blob } else { &blob[..200] };
-        unsafe { log_utf16(blob.len() as _, blob.as_ptr() as _) }
+pub unsafe fn utf16_log_10b_10k() {
+    let buffer = [65u8; 10];
+    for _ in 0..10_000 {
+        log_utf16(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
-// ################
-// # Promises API #
-// ################
-
-// Most of promises API is different in that it converts incoming blobs of data into Rust structures.
-// We don't need to write tests for all of them, and can just test one and extrapolate cost to
-// everything else.
+// Function to measure `utf16_decoding_base`, `utf16_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf16 10kib 1k times into log.
 #[no_mangle]
-pub fn call_promise_batch_create() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            promise_batch_create(blob.len() as _, blob.as_ptr() as _);
-        }
+pub unsafe fn utf16_log_10kib_10k() {
+    let buffer = [65u8; 10240];
+    for _ in 0..10_000 {
+        log_utf16(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
+// Nul-terminated versions.
+// Function to measure `utf16_decoding_base`, `utf16_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf16 10b 10k times into log.
 #[no_mangle]
-pub fn call_promise_batch_create_promise_batch_then() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            let id = promise_batch_create(blob.len() as _, blob.as_ptr() as _);
-            promise_batch_then(id, blob.len() as _, blob.as_ptr() as _);
-        }
+pub unsafe fn nul_utf16_log_10b_10k() {
+    let mut buffer = [65u8; 10];
+    buffer[buffer.len() - 2] = 0;
+    buffer[buffer.len() - 1] = 0;
+    for _ in 0..10_000 {
+        log_utf16(core::u64::MAX, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
 }
 
+// Nul-terminated versions.
+// Function to measure `utf16_decoding_base`, `utf16_decoding_byte`, `log_base`, and `log_byte`;
+// It actually measures them together with `read_memory_base` and `read_memory_byte`.
+// Write utf16 10kib 1k times into log.
 #[no_mangle]
-pub fn call_promise_batch_create_promise_batch_action_create_account() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            if blob[0] > b'z' {
-                blob[0] = b'a';
-                blob[1] += 1;
-                if blob[1] > b'z' {
-                    blob[1] = b'a';
-                }
-            }
-            let acc_name = if blob.len() < 64 { &blob } else { &blob[..64] };
-            let id = promise_batch_create(acc_name.len() as _, acc_name.as_ptr() as _);
-            promise_batch_action_create_account(id);
-            blob[0] += 1;
-        }
+pub unsafe fn nul_utf16_log_10kib_10k() {
+    let mut buffer = [65u8; 10240];
+    buffer[buffer.len() - 2] = 0;
+    buffer[buffer.len() - 1] = 0;
+    for _ in 0..10_000 {
+        log_utf16(core::u64::MAX, buffer.as_ptr() as *const u64 as u64);
     }
-    return_u64(n);
-}
-
-#[no_mangle]
-pub fn call_promise_batch_create_promise_batch_action_create_account_batch_action_deploy_contract()
-{
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            if blob[0] > b'z' {
-                blob[0] = b'a';
-                blob[1] += 1;
-                if blob[1] > b'z' {
-                    blob[1] = b'a';
-                }
-            }
-            let acc_name = if blob.len() < 64 { &blob } else { &blob[..64] };
-            let id = promise_batch_create(acc_name.len() as _, acc_name.as_ptr() as _);
-            promise_batch_action_create_account(id);
-            promise_batch_action_deploy_contract(id, blob.len() as _, blob.as_ptr() as _);
-            blob[0] += 1;
-        }
-    }
-    return_u64(n);
 }
 
-// #######################
-// # Promise API results #
-// #######################
-call_func!(call_promise_results_count, promise_results_count());
-
+// Function to measure `sha256_base` and `sha256_byte`. Also measures `base`, `write_register_base`,
+// and `write_register_byte`. However `sha256` computation is more expensive than register writing
+// so we are okay overcharging it.
+// Compute sha256 on 10b 10k times.
 #[no_mangle]
-pub fn call_promise_batch_create_promise_return() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for _ in 0..n {
-        unsafe {
-            let id = promise_batch_create(blob.len() as _, blob.as_ptr() as _);
-            promise_return(id);
-        }
+pub unsafe fn sha256_10b_10k() {
+    let buffer = [65u8; 10];
+    for _ in 0..10_000 {
+        sha256(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64, 0);
+    }
+}
+// Function to measure `sha256_base` and `sha256_byte`. Also measures `base`, `write_register_base`,
+// and `write_register_byte`. However `sha256` computation is more expensive than register writing
+// so we are okay overcharging it.
+// Compute sha256 on 10kib 10k times.
+#[no_mangle]
+pub unsafe fn sha256_10kib_10k() {
+    let buffer = [65u8; 10240];
+    for _ in 0..10_000 {
+        sha256(buffer.len() as u64, buffer.as_ptr() as *const u64 as u64, 0);
     }
-    return_u64(n);
 }
 
 // ###############
 // # Storage API #
 // ###############
 
-// We need to measure cost of operation for large&small blobs. Also, we need to measure the
-// cost of operation for different depths of the trie.
-#[no_mangle]
-pub fn call_storage_write() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for i in 0..n {
-        unsafe {
-            // Modify blob so that we write different content.
-            blob[0] = (i % 256) as u8;
-            blob[1] = ((i / 256) % 256) as u8;
-            storage_write(
-                blob.len() as _,
-                blob.as_ptr() as _,
-                blob.len() as _,
-                blob.as_ptr() as _,
-                0,
+macro_rules! storage_bench {
+    ($key_buf:ident, $key_len:expr, $value_buf:ident, $value_len:expr, $loop_n:expr, $exp_name:ident,  $call:block) => {
+        #[no_mangle]
+        pub unsafe fn $exp_name() {
+            let mut $key_buf = [0u8; $key_len];
+            let mut $value_buf = [0u8; $value_len];
+            for i in 0..$loop_n {
+                // Modify blob so that we write different content.
+                $key_buf[0] = (i % 256) as u8;
+                $key_buf[1] = ((i / 256) % 256) as u8;
+                $key_buf[2] = ((i / 256 / 256) % 256) as u8;
+
+                $value_buf[0] = (i % 256) as u8;
+                $value_buf[1] = ((i / 256) % 256) as u8;
+                $value_buf[2] = ((i / 256 / 256) % 256) as u8;
+                $call
+            }
+        }
+    };
+}
+
+// Storage writing.
+
+// Function to measure `storage_write_base`.
+// Writes to storage 1k times.
+storage_bench!(key, 10, value, 10, 1000, storage_write_10b_key_10b_value_1k, {
+    storage_write(10, key.as_ptr() as _, 10, value.as_ptr() as _, 0);
+});
+
+// Function to measure `storage_write_base + storage_write_key_byte`.
+// Writes to storage with 10kib key 1000 times.
+storage_bench!(key, 10240, value, 10, 1000, storage_write_10kib_key_10b_value_1k, {
+    storage_write(10240, key.as_ptr() as _, 10, value.as_ptr() as _, 0);
+});
+
+// Function to measure `storage_write_base + storage_write_value_byte`.
+// Writes to storage with 10kib value 1000 times.
+storage_bench!(key, 10, value, 10240, 1000, storage_write_10b_key_10kib_value_1k, {
+    storage_write(10, key.as_ptr() as _, 10240, value.as_ptr() as _, 0);
+});
+
+// Storage reading.
+
+// Function to measure `storage_read_base`.
+// Writes to storage 1k times.
+storage_bench!(key, 10, value, 10, 1000, storage_read_10b_key_10b_value_1k, {
+    storage_read(10, key.as_ptr() as _, 0);
+});
+
+// Function to measure `storage_read_base + storage_read_key_byte`.
+// Writes to storage with 10kib key 1000 times.
+storage_bench!(key, 10240, value, 10, 1000, storage_read_10kib_key_10b_value_1k, {
+    storage_read(10240, key.as_ptr() as _, 0);
+});
+
+// Function to measure `storage_read_base + storage_read_value_byte`.
+// Writes to storage with 10kib value 1000 times.
+storage_bench!(key, 10, value, 10240, 1000, storage_read_10b_key_10kib_value_1k, {
+    storage_read(10, key.as_ptr() as _, 0);
+});
+
+// Storage removing.
+
+// Function to measure `storage_remove_base`.
+// Writes to storage 1k times.
+storage_bench!(key, 10, value, 10, 1000, storage_remove_10b_key_10b_value_1k, {
+    storage_remove(10, key.as_ptr() as _, 0);
+});
+
+// Function to measure `storage_remove_base + storage_remove_key_byte`.
+// Writes to storage with 10kib key 1000 times.
+storage_bench!(key, 10240, value, 10, 1000, storage_remove_10kib_key_10b_value_1k, {
+    storage_remove(10240, key.as_ptr() as _, 0);
+});
+
+// Function to measure `storage_remove_base + storage_remove_value_byte`.
+// Writes to storage with 10kib value 1000 times.
+storage_bench!(key, 10, value, 10240, 1000, storage_remove_10b_key_10kib_value_1k, {
+    storage_remove(10, key.as_ptr() as _, 0);
+});
+
+// Storage has key.
+
+// Function to measure `storage_has_key_base`.
+// Writes to storage 1k times.
+storage_bench!(key, 10, value, 10, 1000, storage_has_key_10b_key_10b_value_1k, {
+    storage_has_key(10, key.as_ptr() as _);
+});
+
+// Function to measure `storage_has_key_base + storage_has_key_key_byte`.
+// Writes to storage with 10kib key 1000 times.
+storage_bench!(key, 10240, value, 10, 1000, storage_has_key_10kib_key_10b_value_1k, {
+    storage_has_key(10240, key.as_ptr() as _);
+});
+
+// Function to measure `storage_has_key_base + storage_has_key_value_byte`.
+// Writes to storage with 10kib value 1000 times.
+storage_bench!(key, 10, value, 10240, 1000, storage_has_key_10b_key_10kib_value_1k, {
+    storage_has_key(10, key.as_ptr() as _);
+});
+
+macro_rules! storage_iter_bench {
+    ($from_buf:ident, $from_len:expr, $to_buf:ident, $to_len:expr, $loop_n:expr, $exp_name:ident, $call:block) => {
+        #[no_mangle]
+        pub unsafe fn $exp_name() {
+            let mut $from_buf = [0u8; $from_len];
+            let mut $to_buf = [0u8; $to_len];
+            for i in 0..$loop_n {
+                // Modify blob so that we write different content.
+                $from_buf[1] = (i % 256) as u8;
+                $from_buf[2] = ((i / 256) % 256) as u8;
+                $from_buf[3] = ((i / 256 / 256) % 256) as u8;
+
+                $to_buf[0] = 255;
+                $to_buf[1] = (i % 256) as u8;
+                $to_buf[2] = ((i / 256) % 256) as u8;
+                $to_buf[3] = ((i / 256 / 256) % 256) as u8;
+                $call
+            }
+        }
+    };
+}
+
+// Storage prefix.
+
+// Function to measure `storage_iter_create_prefix_base`.
+// Create prefix iterator 1k times.
+storage_iter_bench!(from, 10, to, 10, 1000, storage_iter_prefix_10b_1k, {
+    storage_iter_prefix(10, from.as_ptr() as _);
+});
+
+// Function to measure `storage_iter_create_prefix_base + storage_iter_create_prefix_byte`.
+// Create prefix iterator with 10kib prefix 1000 times.
+storage_iter_bench!(from, 10240, to, 10, 1000, storage_iter_prefix_10kib_1k, {
+    storage_iter_prefix(10240, from.as_ptr() as _);
+});
+
+// Storage range.
+
+// Function to measure `storage_iter_create_range_base`.
+// Create prefix iterator 1k times.
+storage_iter_bench!(from, 10, to, 10, 1000, storage_iter_range_10b_from_10b_to_1k, {
+    storage_iter_range(10, from.as_ptr() as _, 10, to.as_ptr() as _);
+});
+
+// Function to measure `storage_iter_create_range_base + storage_iter_create_from_byte`.
+// Create range iterator with 10kib from prefix 1000 times.
+storage_iter_bench!(from, 10240, to, 10, 1000, storage_iter_range_10kib_from_10b_to_1k, {
+    storage_iter_range(10240, from.as_ptr() as _, 10, to.as_ptr() as _);
+});
+
+// Function to measure `storage_iter_create_range_base + storage_iter_create_to_byte`.
+// Create range iterator with 10kib to prefix 1000 times.
+storage_iter_bench!(from, 10, to, 10240, 1000, storage_iter_range_10b_from_10kib_to_1k, {
+    storage_iter_range(10, from.as_ptr() as _, 10240, to.as_ptr() as _);
+});
+
+// Storage iter next.
+
+// Needs to be run after the corresponding write benchmarks.
+macro_rules! storage_next {
+    ($from_buf:ident, $from_len:expr, $to_buf:ident, $to_len:expr, $loop_n:expr, $exp_name:ident) => {
+        #[no_mangle]
+        pub unsafe fn $exp_name() {
+            let $from_buf = [0u8; $from_len];
+            let $to_buf = [255u8; $to_len];
+            let it = storage_iter_range(
+                $from_len,
+                $from_buf.as_ptr() as _,
+                $to_len,
+                $to_buf.as_ptr() as _,
             );
+            for _ in 0..$loop_n {
+                storage_iter_next(it, 1, 2);
+            }
         }
-    }
-    return_u64(n);
+    };
 }
 
+// Function to measure `storage_iter_next_base`, `storage_iter_next_key_byte`, `storage_iter_next_value_byte`.
+// Iterate 1k times.
+storage_next!(from, 10, to, 10, 1000, storage_next_10b_from_10b_to_1k);
+
+// Similar functions as above to check the hidden parameters.
+// Iterate 1k times.
+storage_next!(from, 10240, to, 10, 1000, storage_next_10kib_from_10b_to_1k);
+
+// Similar functions as above to check the hidden parameters.
+// Iterate 1k times.
+storage_next!(from, 10, to, 10240, 1000, storage_next_10b_from_10kib_to_1k);
+
+// Function to measure `promise_and_base`.
 #[no_mangle]
-pub fn call_storage_read() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for i in 0..n {
-        unsafe {
-            // Modify blob so that we read different content.
-            blob[0] = (i % 256) as u8;
-            blob[1] = ((i / 256) % 256) as u8;
-            storage_read(blob.len() as _, blob.as_ptr() as _, 0);
-        }
+pub unsafe fn promise_and_100k() {
+    let account = b"alice_near";
+    let id0 = promise_batch_create(account.len() as _, account.as_ptr() as _);
+    let id1 = promise_batch_create(account.len() as _, account.as_ptr() as _);
+    let ids = [id0, id1];
+    for _ in 0..100_000 {
+        promise_and(ids.as_ptr() as _, 2);
     }
-    return_u64(n);
 }
 
+// Function to measure `promise_and_per_promise`.
 #[no_mangle]
-pub fn call_storage_remove() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for i in 0..n {
-        unsafe {
-            // Modify blob so that we remove different content.
-            blob[0] = (i % 256) as u8;
-            blob[1] = ((i / 256) % 256) as u8;
-            storage_remove(blob.len() as _, blob.as_ptr() as _, 0);
-        }
+pub unsafe fn promise_and_100k_on_1k_and() {
+    let account = b"alice_near";
+    let mut ids = [0u64; 1000];
+    for i in 0..1000 {
+        ids[i] = promise_batch_create(account.len() as _, account.as_ptr() as _);
+    }
+    for _ in 0..100_000 {
+        promise_and(ids.as_ptr() as _, ids.len() as _);
     }
-    return_u64(n);
 }
 
+// Function to measure `promise_return`.
 #[no_mangle]
-pub fn call_storage_has_key() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for i in 0..n {
-        unsafe {
-            // Modify blob so that we remove different content.
-            blob[0] = (i % 256) as u8;
-            blob[1] = ((i / 256) % 256) as u8;
-            storage_has_key(blob.len() as _, blob.as_ptr() as _);
-        }
+pub unsafe fn promise_return_100k() {
+    let account = b"alice_near";
+    let id = promise_batch_create(account.len() as _, account.as_ptr() as _);
+    for _ in 0..100_000 {
+        promise_return(id);
     }
-    return_u64(n);
 }
 
+// Measuring cost for data_receipt_creation_config.
+
+// Function that emits 10b of data.
 #[no_mangle]
-pub fn call_storage_iter_prefix() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for i in 0..n {
-        unsafe {
-            storage_iter_prefix(blob.len() as _, blob.as_ptr() as _);
-        }
-    }
-    return_u64(n);
+pub unsafe fn data_producer_10b() {
+    let data = [0u8; 10];
+    value_return(data.len() as _, data.as_ptr() as _);
 }
 
+// Function that emits 100kib of data.
 #[no_mangle]
-pub fn call_storage_iter_range() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
-    for i in 0..n {
-        unsafe {
-            storage_iter_range(
-                blob.len() as _,
-                blob.as_ptr() as _,
-                blob.len() as _,
-                blob.as_ptr() as _,
-            );
-        }
-    }
-    return_u64(n);
+pub unsafe fn data_producer_100kib() {
+    let data = [0u8; 102400];
+    value_return(data.len() as _, data.as_ptr() as _);
 }
 
+// Function to measure `data_receipt_creation_config`, but we are measure send and execution fee at the same time.
+// Produces 1000 10b data receipts.
 #[no_mangle]
-pub fn call_storage_iter_next() {
-    let mut buffer = [0u8; BUFFER_SIZE];
-    let (n, blob) = read_u64input(&mut buffer);
+pub unsafe fn data_receipt_10b_1000() {
+    let buf = [0u8; 1000];
+    current_account_id(0);
+    let buf_len = register_len(0);
+    read_register(0, buf.as_ptr() as _);
+
+    let method_name = b"data_producer_10b";
+    let args = b"";
+    let mut ids = [0u64; 1000];
+    let amount = 0u128;
+    let gas = prepaid_gas();
+    for i in 0..1000 {
+        ids[i] = promise_create(
+            buf_len,
+            buf.as_ptr() as _,
+            method_name.len() as _,
+            method_name.as_ptr() as _,
+            args.len() as _,
+            args.as_ptr() as _,
+            &amount as *const u128 as *const u64 as u64,
+            gas / 2000,
+        );
+    }
+    let id = promise_and(ids.as_ptr() as _, ids.len() as _);
+    let method_name = b"noop";
+    promise_then(
+        id,
+        buf_len,
+        buf.as_ptr() as _,
+        method_name.len() as _,
+        method_name.as_ptr() as _,
+        args.len() as _,
+        args.as_ptr() as _,
+        &amount as *const u128 as *const u64 as u64,
+        gas / 3,
+    );
+}
 
-    let end = [255u8, 255u8];
-    unsafe {
-        let mut id = storage_iter_range(
-            blob.len() as _,
-            blob.as_ptr() as _,
-            end.len() as _,
-            end.as_ptr() as _,
+// Function to measure `data_receipt_creation_config`, but we are measure send and execution fee at the same time.
+// Produces 1000 10kib data receipts.
+#[no_mangle]
+pub unsafe fn data_receipt_100kib_1000() {
+    let buf = [0u8; 1000];
+    current_account_id(0);
+    let buf_len = register_len(0);
+    read_register(0, buf.as_ptr() as _);
+
+    let method_name = b"data_producer_100kib";
+    let args = b"";
+    let mut ids = [0u64; 1000];
+    let amount = 0u128;
+    let gas = prepaid_gas();
+    for i in 0..1000 {
+        ids[i] = promise_create(
+            buf_len,
+            buf.as_ptr() as _,
+            method_name.len() as _,
+            method_name.as_ptr() as _,
+            args.len() as _,
+            args.as_ptr() as _,
+            &amount as *const u128 as *const u64 as u64,
+            gas / 2000,
         );
-        for i in 0..n {
-            if storage_iter_next(id, 1, 2) == 0 {
-                id = storage_iter_range(
-                    blob.len() as _,
-                    blob.as_ptr() as _,
-                    end.len() as _,
-                    end.as_ptr() as _,
-                );
-            }
-        }
     }
-    return_u64(n);
+    let id = promise_and(ids.as_ptr() as _, ids.len() as _);
+    let method_name = b"noop";
+    promise_then(
+        id,
+        buf_len,
+        buf.as_ptr() as _,
+        method_name.len() as _,
+        method_name.as_ptr() as _,
+        args.len() as _,
+        args.as_ptr() as _,
+        &amount as *const u128 as *const u64 as u64,
+        gas / 3,
+    );
+}
+
+#[no_mangle]
+pub unsafe fn cpu_ram_soak_test() {
+    let mut buf = [0u8; 100 * 1024];
+    let len = buf.len();
+    for i in 0..10_000_000 {
+        let j = (i * 7 + len / 2) % len;
+        let k = (i * 3) % len;
+        let tmp = buf[k];
+        buf[k] = buf[j];
+        buf[j] = tmp;
+    }
 }
diff --git a/runtime/runtime/Cargo.toml b/runtime/runtime/Cargo.toml
index af06965e55b..81ddbcf9604 100644
--- a/runtime/runtime/Cargo.toml
+++ b/runtime/runtime/Cargo.toml
@@ -29,7 +29,10 @@ near-vm-runner = { path = "../../runtime/near-vm-runner" }
 near-vm-errors = { path = "../../runtime/near-vm-errors" }
 
 [features]
-test-utils = []
+default = []
+
+# Use this feature to enable counting of fees and costs applied.
+costs_counting = ["near-vm-logic/costs_counting", "near-vm-runner/costs_counting"]
 
 [dev-dependencies]
 tempdir = "0.3"
@@ -40,5 +43,6 @@ rayon = "1.1"
 assert_matches = "1.3.0"
 
 testlib = { path = "../../test-utils/testlib" }
+near = { path = "../../near" }
 
 genesis-populate = { path = "../../genesis-tools/genesis-populate"}
diff --git a/runtime/runtime/src/balance_checker.rs b/runtime/runtime/src/balance_checker.rs
index 99555f74783..15c4266333c 100644
--- a/runtime/runtime/src/balance_checker.rs
+++ b/runtime/runtime/src/balance_checker.rs
@@ -2,12 +2,15 @@ use crate::config::{
     safe_add_balance, safe_add_gas, safe_gas_to_balance, total_deposit, total_exec_fees,
     total_prepaid_gas,
 };
-use crate::{ApplyStats, ValidatorAccountsUpdate, OVERFLOW_CHECKED_ERR};
+use crate::{ApplyStats, DelayedReceiptIndices, ValidatorAccountsUpdate, OVERFLOW_CHECKED_ERR};
 use near_primitives::errors::{BalanceMismatchError, InvalidTxError, RuntimeError, StorageError};
 use near_primitives::receipt::{Receipt, ReceiptEnum};
 use near_primitives::transaction::SignedTransaction;
 use near_primitives::types::{AccountId, Balance};
-use near_primitives::utils::{key_for_postponed_receipt_id, system_account};
+use near_primitives::utils::col::DELAYED_RECEIPT_INDICES;
+use near_primitives::utils::{
+    key_for_delayed_receipt, key_for_postponed_receipt_id, system_account,
+};
 use near_runtime_fees::RuntimeFeesConfig;
 use near_store::{get, get_account, get_receipt, TrieUpdate};
 use std::collections::HashSet;
@@ -24,11 +27,42 @@ pub(crate) fn check_balance(
     new_receipts: &[Receipt],
     stats: &ApplyStats,
 ) -> Result<(), RuntimeError> {
+    // Delayed receipts
+    let initial_delayed_receipt_indices: DelayedReceiptIndices =
+        get(&initial_state, DELAYED_RECEIPT_INDICES)?.unwrap_or_default();
+    let final_delayed_receipt_indices: DelayedReceiptIndices =
+        get(&final_state, DELAYED_RECEIPT_INDICES)?.unwrap_or_default();
+    let get_delayed_receipts = |from_index, to_index, state| {
+        (from_index..to_index)
+            .map(|index| {
+                get(state, &key_for_delayed_receipt(index))?.ok_or_else(|| {
+                    StorageError::StorageInconsistentState(format!(
+                        "Delayed receipt #{} should be in the state",
+                        index
+                    ))
+                })
+            })
+            .collect::<Result<Vec<Receipt>, StorageError>>()
+    };
+    // Previously delayed receipts that were processed this time.
+    let processed_delayed_receipts = get_delayed_receipts(
+        initial_delayed_receipt_indices.first_index,
+        final_delayed_receipt_indices.first_index,
+        &initial_state,
+    )?;
+    // Receipts that were not processed this time and are delayed now.
+    let new_delayed_receipts = get_delayed_receipts(
+        initial_delayed_receipt_indices.next_available_index,
+        final_delayed_receipt_indices.next_available_index,
+        &final_state,
+    )?;
+
     // Accounts
     let mut all_accounts_ids: HashSet<AccountId> = transactions
         .iter()
         .map(|tx| tx.transaction.signer_id.clone())
         .chain(prev_receipts.iter().map(|r| r.receiver_id.clone()))
+        .chain(processed_delayed_receipts.iter().map(|r| r.receiver_id.clone()))
         .collect();
     let incoming_validator_rewards =
         if let Some(validator_accounts_update) = validator_accounts_update {
@@ -86,12 +120,15 @@ pub(crate) fn check_balance(
     };
     let incoming_receipts_balance = receipts_cost(prev_receipts);
     let outgoing_receipts_balance = receipts_cost(new_receipts);
+    let processed_delayed_receipts_balance = receipts_cost(&processed_delayed_receipts);
+    let new_delayed_receipts_balance = receipts_cost(&new_delayed_receipts);
     // Postponed actions receipts. The receipts can be postponed and stored with the receiver's
     // account ID when the input data is not received yet.
     // We calculate all potential receipts IDs that might be postponed initially or after the
     // execution.
     let all_potential_postponed_receipt_ids = prev_receipts
         .iter()
+        .chain(processed_delayed_receipts.iter())
         .map(|receipt| {
             let account_id = &receipt.receiver_id;
             match &receipt.receipt {
@@ -132,9 +169,11 @@ pub(crate) fn check_balance(
     let initial_balance = incoming_validator_rewards
         + initial_accounts_balance
         + incoming_receipts_balance
+        + processed_delayed_receipts_balance
         + initial_postponed_receipts_balance;
     let final_balance = final_accounts_balance
         + outgoing_receipts_balance
+        + new_delayed_receipts_balance
         + final_postponed_receipts_balance
         + stats.total_rent_paid
         + stats.total_validator_reward
@@ -142,12 +181,16 @@ pub(crate) fn check_balance(
         + stats.total_balance_slashed;
     if initial_balance != final_balance {
         Err(BalanceMismatchError {
+            // Inputs
             incoming_validator_rewards,
             initial_accounts_balance,
-            final_accounts_balance,
             incoming_receipts_balance,
-            outgoing_receipts_balance,
+            processed_delayed_receipts_balance,
             initial_postponed_receipts_balance,
+            // Outputs
+            final_accounts_balance,
+            outgoing_receipts_balance,
+            new_delayed_receipts_balance,
             final_postponed_receipts_balance,
             total_rent_paid: stats.total_rent_paid,
             total_validator_reward: stats.total_validator_reward,
diff --git a/runtime/runtime/src/ext.rs b/runtime/runtime/src/ext.rs
index ee0aba9c544..b450364f260 100644
--- a/runtime/runtime/src/ext.rs
+++ b/runtime/runtime/src/ext.rs
@@ -339,4 +339,12 @@ impl<'a> External for RuntimeExt<'a> {
         let value_hash = sodiumoxide::crypto::hash::sha256::hash(data);
         Ok(value_hash.as_ref().to_vec())
     }
+
+    fn get_touched_nodes_count(&self) -> u64 {
+        self.trie_update.trie.counter.get()
+    }
+
+    fn reset_touched_nodes_counter(&mut self) {
+        self.trie_update.trie.counter.reset()
+    }
 }
diff --git a/runtime/runtime/src/lib.rs b/runtime/runtime/src/lib.rs
index 8faaf2a687d..3d204556707 100644
--- a/runtime/runtime/src/lib.rs
+++ b/runtime/runtime/src/lib.rs
@@ -10,9 +10,16 @@ use std::collections::{HashMap, HashSet};
 use std::convert::TryInto;
 use std::sync::Arc;
 
-use borsh::BorshSerialize;
+use borsh::{BorshDeserialize, BorshSerialize};
 use kvdb::DBValue;
 
+use crate::actions::*;
+use crate::balance_checker::check_balance;
+use crate::config::{
+    exec_fee, safe_add_balance, safe_add_gas, safe_gas_to_balance, total_deposit, total_exec_fees,
+    total_prepaid_gas, total_send_fees, RuntimeConfig,
+};
+pub use crate::store::StateRecord;
 use near_crypto::PublicKey;
 use near_primitives::account::{AccessKey, AccessKeyPermission, Account};
 use near_primitives::contract::ContractCode;
@@ -28,10 +35,11 @@ use near_primitives::transaction::{
 use near_primitives::types::{
     AccountId, Balance, BlockIndex, Gas, Nonce, StateRoot, ValidatorStake,
 };
+use near_primitives::utils::col::DELAYED_RECEIPT_INDICES;
 use near_primitives::utils::{
-    create_nonce_with_nonce, is_valid_account_id, key_for_pending_data_count,
-    key_for_postponed_receipt, key_for_postponed_receipt_id, key_for_received_data, system_account,
-    ACCOUNT_DATA_SEPARATOR,
+    create_nonce_with_nonce, is_valid_account_id, key_for_delayed_receipt,
+    key_for_pending_data_count, key_for_postponed_receipt, key_for_postponed_receipt_id,
+    key_for_received_data, system_account, ACCOUNT_DATA_SEPARATOR,
 };
 use near_runtime_fees::RuntimeFeesConfig;
 use near_store::{
@@ -42,14 +50,6 @@ use near_store::{
 use near_vm_logic::types::PromiseResult;
 use near_vm_logic::ReturnData;
 
-use crate::actions::*;
-use crate::balance_checker::check_balance;
-use crate::config::{
-    exec_fee, safe_add_balance, safe_add_gas, safe_gas_to_balance, total_deposit, total_exec_fees,
-    total_prepaid_gas, total_send_fees, RuntimeConfig,
-};
-pub use crate::store::StateRecord;
-
 mod actions;
 pub mod adapter;
 mod balance_checker;
@@ -60,6 +60,9 @@ mod metrics;
 pub mod state_viewer;
 mod store;
 
+#[cfg(feature = "costs_counting")]
+pub use near_vm_runner::EXT_COSTS_COUNTER;
+
 const OVERFLOW_CHECKED_ERR: &str = "Overflow has already been checked.";
 
 #[derive(Debug)]
@@ -72,6 +75,9 @@ pub struct ApplyState {
     pub gas_price: Balance,
     /// A block timestamp
     pub block_timestamp: u64,
+    /// Gas limit for a given chunk.
+    /// If None is given, assumes there is no gas limit.
+    pub gas_limit: Option<Gas>,
 }
 
 /// Contains information to update validators accounts at the first block of a new epoch.
@@ -114,6 +120,15 @@ pub struct ApplyResult {
     pub stats: ApplyStats,
 }
 
+/// Stores indices for a persistent queue for delayed receipts that didn't fit into a block.
+#[derive(Default, BorshSerialize, BorshDeserialize)]
+pub struct DelayedReceiptIndices {
+    // First inclusive index in the queue.
+    first_index: u64,
+    // Exclusive end index of the queue
+    next_available_index: u64,
+}
+
 #[derive(Debug)]
 pub struct ActionResult {
     pub gas_burnt: Gas,
@@ -914,6 +929,7 @@ impl Runtime {
 
         Ok(())
     }
+
     /// Applies new singed transactions and incoming receipts for some chunk/shard on top of
     /// given trie and the given state root.
     /// If the validator accounts update is provided, updates validators accounts.
@@ -950,21 +966,32 @@ impl Runtime {
         let mut validator_proposals = vec![];
         let mut local_receipts = vec![];
         let mut outcomes = vec![];
+        let mut total_gas_burnt = 0;
 
         for signed_transaction in transactions {
-            outcomes.push(self.process_transaction(
+            let outcome_with_id = self.process_transaction(
                 &mut state_update,
                 apply_state,
                 signed_transaction,
                 &mut local_receipts,
                 &mut new_receipts,
                 &mut stats,
-            )?);
+            )?;
+            total_gas_burnt += outcome_with_id.outcome.gas_burnt;
+
+            outcomes.push(outcome_with_id);
         }
 
-        for receipt in local_receipts.iter().chain(prev_receipts.iter()) {
+        let mut delayed_receipts_indices: DelayedReceiptIndices =
+            get(&state_update, DELAYED_RECEIPT_INDICES)?.unwrap_or_default();
+        let mut delayed_receipts_changed = false;
+
+        let mut process_receipt = |receipt: &Receipt,
+                                   state_update: &mut TrieUpdate,
+                                   total_gas_burnt: &mut Gas|
+         -> Result<_, StorageError> {
             self.process_receipt(
-                &mut state_update,
+                state_update,
                 apply_state,
                 receipt,
                 &mut new_receipts,
@@ -972,7 +999,49 @@ impl Runtime {
                 &mut stats,
             )?
             .into_iter()
-            .for_each(|outcome_with_id| outcomes.push(outcome_with_id));
+            .for_each(|outcome_with_id| {
+                *total_gas_burnt += outcome_with_id.outcome.gas_burnt;
+                outcomes.push(outcome_with_id);
+            });
+            Ok(())
+        };
+
+        let gas_limit = apply_state.gas_limit.unwrap_or(Gas::max_value());
+
+        while delayed_receipts_indices.first_index < delayed_receipts_indices.next_available_index {
+            if total_gas_burnt >= gas_limit {
+                break;
+            }
+            let key = key_for_delayed_receipt(delayed_receipts_indices.first_index);
+            let receipt: Receipt = get(&state_update, &key)?.ok_or_else(|| {
+                StorageError::StorageInconsistentState(format!(
+                    "Delayed receipt #{} should be in the state",
+                    delayed_receipts_indices.first_index
+                ))
+            })?;
+            state_update.remove(&key);
+            delayed_receipts_indices.first_index += 1;
+            process_receipt(&receipt, &mut state_update, &mut total_gas_burnt)?;
+            delayed_receipts_changed = true;
+        }
+
+        for receipt in local_receipts.iter().chain(prev_receipts.iter()) {
+            if total_gas_burnt < gas_limit {
+                process_receipt(&receipt, &mut state_update, &mut total_gas_burnt)?;
+            } else {
+                // Saving to the state as a delayed receipt.
+                set(
+                    &mut state_update,
+                    key_for_delayed_receipt(delayed_receipts_indices.next_available_index),
+                    receipt,
+                );
+                delayed_receipts_indices.next_available_index += 1;
+                delayed_receipts_changed = true;
+            }
+        }
+
+        if delayed_receipts_changed {
+            set(&mut state_update, DELAYED_RECEIPT_INDICES.to_vec(), &delayed_receipts_indices);
         }
 
         check_balance(
@@ -990,8 +1059,9 @@ impl Runtime {
         let key_value_changes = state_update.get_prefix_changes(subscribed_prefixes)?;
 
         let trie_changes = state_update.finalize()?;
+        let state_root = trie_changes.new_root;
         Ok(ApplyResult {
-            state_root: StateRoot { hash: trie_changes.new_root, num_parts: 9 }, /* TODO MOO */
+            state_root,
             trie_changes,
             validator_proposals,
             new_receipts,
@@ -1139,27 +1209,28 @@ impl Runtime {
             set_account(&mut state_update, account_id, &account);
         }
         let trie = state_update.trie.clone();
-        let state_update_state = state_update
+        let (store_update, state_root) = state_update
             .finalize()
             .expect("Genesis state update failed")
             .into(trie)
             .expect("Genesis state update failed");
-        (
-            state_update_state.0,
-            StateRoot { hash: state_update_state.1, num_parts: 9 /* TODO MOO */ },
-        )
+        (store_update, state_root)
     }
 }
 
 #[cfg(test)]
 mod tests {
+    use super::*;
+
+    use near::config::INITIAL_GAS_PRICE;
+    use near_crypto::KeyType;
     use near_primitives::hash::hash;
+    use near_primitives::transaction::TransferAction;
     use near_primitives::types::MerkleHash;
     use near_store::test_utils::create_trie;
+    use testlib::fees_utils::gas_burnt_to_reward;
     use testlib::runtime_utils::{alice_account, bob_account};
 
-    use super::*;
-
     #[test]
     fn test_get_and_set_accounts() {
         let trie = create_trie();
@@ -1190,56 +1261,53 @@ mod tests {
     /* Apply tests */
     /***************/
 
-    #[test]
-    fn test_apply_no_op() {
+    fn setup_runtime(
+        initial_balance: Balance,
+        initial_locked: Balance,
+        gas_limit: Gas,
+    ) -> (Runtime, Arc<Trie>, CryptoHash, ApplyState) {
         let trie = create_trie();
         let root = MerkleHash::default();
         let runtime = Runtime::new(RuntimeConfig::default());
 
         let account_id = alice_account();
 
-        let initial_balance = 1_000_000;
-
         let mut initial_state = TrieUpdate::new(trie.clone(), root);
-        let initial_account = Account::new(initial_balance, hash(&[]), 0);
+        let mut initial_account = Account::new(initial_balance, hash(&[]), 0);
+        initial_account.locked = initial_locked;
         set_account(&mut initial_state, &account_id, &initial_account);
         let trie_changes = initial_state.finalize().unwrap();
         let (store_update, root) = trie_changes.into(trie.clone()).unwrap();
         store_update.commit().unwrap();
 
-        let apply_state =
-            ApplyState { block_index: 0, epoch_length: 3, gas_price: 100, block_timestamp: 100 };
+        let apply_state = ApplyState {
+            block_index: 0,
+            epoch_length: 3,
+            gas_price: INITIAL_GAS_PRICE,
+            block_timestamp: 100,
+            gas_limit: Some(gas_limit),
+        };
+
+        (runtime, trie, root, apply_state)
+    }
 
+    #[test]
+    fn test_apply_no_op() {
+        let (runtime, trie, root, apply_state) = setup_runtime(1_000_000, 0, 10_000_000);
         runtime.apply(trie, root, &None, &apply_state, &[], &[], &HashSet::new()).unwrap();
     }
 
     #[test]
     fn test_apply_check_balance_validation_rewards() {
-        let trie = create_trie();
-        let root = MerkleHash::default();
-        let runtime = Runtime::new(RuntimeConfig::default());
-
-        let account_id = alice_account();
-
-        let initial_balance = 1_000_000;
         let initial_locked = 500_000;
         let reward = 10_000_000;
         let small_refund = 500;
-
-        let mut initial_state = TrieUpdate::new(trie.clone(), root);
-        let mut initial_account = Account::new(initial_balance, hash(&[]), 0);
-        initial_account.locked = initial_locked;
-        set_account(&mut initial_state, &account_id, &initial_account);
-        let trie_changes = initial_state.finalize().unwrap();
-        let (store_update, root) = trie_changes.into(trie.clone()).unwrap();
-        store_update.commit().unwrap();
-
-        let apply_state =
-            ApplyState { block_index: 0, epoch_length: 3, gas_price: 100, block_timestamp: 100 };
+        let (runtime, trie, root, apply_state) =
+            setup_runtime(1_000_000, initial_locked, 10_000_000);
 
         let validator_accounts_update = ValidatorAccountsUpdate {
-            stake_info: vec![(account_id.clone(), initial_locked)].into_iter().collect(),
-            validator_rewards: vec![(account_id.clone(), reward)].into_iter().collect(),
+            stake_info: vec![(alice_account(), initial_locked)].into_iter().collect(),
+            validator_rewards: vec![(alice_account(), reward)].into_iter().collect(),
             last_proposals: Default::default(),
             protocol_treasury_account_id: None,
             slashed_accounts: HashSet::default(),
@@ -1251,10 +1319,161 @@ mod tests {
                 root,
                 &Some(validator_accounts_update),
                 &apply_state,
-                &[Receipt::new_refund(&account_id, small_refund)],
+                &[Receipt::new_refund(&alice_account(), small_refund)],
                 &[],
                 &HashSet::new(),
             )
             .unwrap();
     }
+
+    #[test]
+    fn test_apply_delayed_receipts_feed_all_at_once() {
+        let initial_balance = 1_000_000;
+        let initial_locked = 500_000;
+        let small_transfer = 10_000;
+        let gas_limit = 1;
+        let (runtime, trie, mut root, apply_state) =
+            setup_runtime(initial_balance, initial_locked, gas_limit);
+
+        let n = 10;
+        let receipts = generate_receipts(small_transfer, n);
+
+        let reward_per_receipt = gas_burnt_to_reward(
+            runtime.config.transaction_costs.action_receipt_creation_config.exec_fee()
+                + runtime.config.transaction_costs.action_creation_config.transfer_cost.exec_fee(),
+        );
+
+        // Checking n receipts delayed by 1 + 3 extra
+        for i in 1..=n + 3 {
+            let prev_receipts: &[Receipt] = if i == 1 { &receipts } else { &[] };
+            let apply_result = runtime
+                .apply(trie.clone(), root, &None, &apply_state, prev_receipts, &[], &HashSet::new())
+                .unwrap();
+            let (store_update, new_root) = apply_result.trie_changes.into(trie.clone()).unwrap();
+            root = new_root;
+            store_update.commit().unwrap();
+            let state = TrieUpdate::new(trie.clone(), root);
+            let account = get_account(&state, &alice_account()).unwrap().unwrap();
+            let capped_i = std::cmp::min(i, n);
+            assert_eq!(
+                account.amount,
+                initial_balance
+                    + (small_transfer + reward_per_receipt) * Balance::from(capped_i)
+                    + Balance::from(capped_i * (capped_i - 1) / 2)
+            );
+        }
+    }
+
+    #[test]
+    fn test_apply_delayed_receipts_add_more_using_chunks() {
+        let initial_balance = 1_000_000;
+        let initial_locked = 500_000;
+        let small_transfer = 10_000;
+        let (runtime, trie, mut root, mut apply_state) =
+            setup_runtime(initial_balance, initial_locked, 1);
+
+        let receipt_gas_cost =
+            runtime.config.transaction_costs.action_receipt_creation_config.exec_fee()
+                + runtime.config.transaction_costs.action_creation_config.transfer_cost.exec_fee();
+        apply_state.gas_limit = Some(receipt_gas_cost * 3);
+
+        let n = 40;
+        let receipts = generate_receipts(small_transfer, n);
+        let mut receipt_chunks = receipts.chunks_exact(4);
+
+        let reward_per_receipt = gas_burnt_to_reward(receipt_gas_cost);
+
+        // Every time we'll process 3 receipts, so we need n / 3 rounded up. Then we do 3 extra.
+        for i in 1..=n / 3 + 3 {
+            let prev_receipts: &[Receipt] = receipt_chunks.next().unwrap_or_default();
+            let apply_result = runtime
+                .apply(trie.clone(), root, &None, &apply_state, prev_receipts, &[], &HashSet::new())
+                .unwrap();
+            let (store_update, new_root) = apply_result.trie_changes.into(trie.clone()).unwrap();
+            root = new_root;
+            store_update.commit().unwrap();
+            let state = TrieUpdate::new(trie.clone(), root);
+            let account = get_account(&state, &alice_account()).unwrap().unwrap();
+            let capped_i = std::cmp::min(i * 3, n);
+            assert_eq!(
+                account.amount,
+                initial_balance
+                    + (small_transfer + reward_per_receipt) * Balance::from(capped_i)
+                    + Balance::from(capped_i * (capped_i - 1) / 2)
+            );
+        }
+    }
+
+    #[test]
+    fn test_apply_delayed_receipts_adjustable_gas_limit() {
+        let initial_balance = 1_000_000;
+        let initial_locked = 500_000;
+        let small_transfer = 10_000;
+        let (runtime, trie, mut root, mut apply_state) =
+            setup_runtime(initial_balance, initial_locked, 1);
+
+        let receipt_gas_cost =
+            runtime.config.transaction_costs.action_receipt_creation_config.exec_fee()
+                + runtime.config.transaction_costs.action_creation_config.transfer_cost.exec_fee();
+
+        let n = 120;
+        let receipts = generate_receipts(small_transfer, n);
+        let mut receipt_chunks = receipts.chunks_exact(4);
+
+        let reward_per_receipt = gas_burnt_to_reward(receipt_gas_cost);
+
+        let mut num_receipts_given = 0;
+        let mut num_receipts_processed = 0;
+        let mut num_receipts_per_block = 1;
+        // Test adjusts gas limit based on the number of receipt given and number of receipts processed.
+        while num_receipts_processed < n {
+            if num_receipts_given > num_receipts_processed {
+                num_receipts_per_block += 1;
+            } else if num_receipts_per_block > 1 {
+                num_receipts_per_block -= 1;
+            }
+            apply_state.gas_limit = Some(num_receipts_per_block * receipt_gas_cost);
+            let prev_receipts: &[Receipt] = receipt_chunks.next().unwrap_or_default();
+            num_receipts_given += prev_receipts.len() as u64;
+            let apply_result = runtime
+                .apply(trie.clone(), root, &None, &apply_state, prev_receipts, &[], &HashSet::new())
+                .unwrap();
+            let (store_update, new_root) = apply_result.trie_changes.into(trie.clone()).unwrap();
+            root = new_root;
+            store_update.commit().unwrap();
+            let state = TrieUpdate::new(trie.clone(), root);
+            num_receipts_processed += apply_result.outcomes.len() as u64;
+            let account = get_account(&state, &alice_account()).unwrap().unwrap();
+            assert_eq!(
+                account.amount,
+                initial_balance
+                    + (small_transfer + reward_per_receipt) * Balance::from(num_receipts_processed)
+                    + Balance::from(num_receipts_processed * (num_receipts_processed - 1) / 2)
+            );
+            println!(
+                "{} processed out of {} given. With limit {} receipts per block",
+                num_receipts_processed, num_receipts_given, num_receipts_per_block
+            );
+        }
+    }
+
+    fn generate_receipts(small_transfer: u128, n: u64) -> Vec<Receipt> {
+        (0..n)
+            .map(|i| Receipt {
+                predecessor_id: bob_account(),
+                receiver_id: alice_account(),
+                receipt_id: create_nonce_with_nonce(&CryptoHash::default(), i),
+                receipt: ReceiptEnum::Action(ActionReceipt {
+                    signer_id: bob_account(),
+                    signer_public_key: PublicKey::empty(KeyType::ED25519),
+                    gas_price: 100,
+                    output_data_receivers: vec![],
+                    input_data_ids: vec![],
+                    actions: vec![Action::Transfer(TransferAction {
+                        deposit: small_transfer + Balance::from(i),
+                    })],
+                }),
+            })
+            .collect()
+    }
 }
diff --git a/runtime/runtime/src/state_viewer.rs b/runtime/runtime/src/state_viewer.rs
index c8cea9a4f31..8ea3fc81615 100644
--- a/runtime/runtime/src/state_viewer.rs
+++ b/runtime/runtime/src/state_viewer.rs
@@ -229,7 +229,7 @@ mod tests {
     #[test]
     fn test_view_state() {
         let (_, trie, root) = get_runtime_and_trie();
-        let mut state_update = TrieUpdate::new(trie.clone(), root.hash);
+        let mut state_update = TrieUpdate::new(trie.clone(), root);
         state_update.set(key_for_data(&alice_account(), b"test123"), DBValue::from_slice(b"123"));
         let (db_changes, new_root) = state_update.finalize().unwrap().into(trie.clone()).unwrap();
         db_changes.commit().unwrap();
diff --git a/runtime/runtime/tests/runtime_group_tools/mod.rs b/runtime/runtime/tests/runtime_group_tools/mod.rs
index 9758c5b1218..13c96093f21 100644
--- a/runtime/runtime/tests/runtime_group_tools/mod.rs
+++ b/runtime/runtime/tests/runtime_group_tools/mod.rs
@@ -51,9 +51,10 @@ impl StandaloneRuntime {
             epoch_length: 4,
             gas_price: 100,
             block_timestamp: 0,
+            gas_limit: None,
         };
 
-        Self { apply_state, runtime, trie, signer, root: root.hash }
+        Self { apply_state, runtime, trie, signer, root: root }
     }
 
     pub fn process_block(
diff --git a/runtime/runtime/tests/test_evil_contracts.rs b/runtime/runtime/tests/test_evil_contracts.rs
index 821e00b3b49..e223d1afd75 100644
--- a/runtime/runtime/tests/test_evil_contracts.rs
+++ b/runtime/runtime/tests/test_evil_contracts.rs
@@ -55,8 +55,11 @@ fn test_evil_deep_trie() {
                 0,
             )
             .unwrap();
+        println!("Gas burnt: {}", res.receipts[0].outcome.gas_burnt);
         assert_eq!(res.status, FinalExecutionStatus::SuccessValue(to_base64(&[])), "{:?}", res);
     });
+    let mut first_gas_burnt = 0;
+    let mut last_gas_burnt = 0;
     (0..50).rev().for_each(|i| {
         println!("deleteStrings #{}", i);
         let from = i * 10 as u64;
@@ -75,8 +78,18 @@ fn test_evil_deep_trie() {
                 0,
             )
             .unwrap();
+        if i == 0 {
+            first_gas_burnt = res.receipts[0].outcome.gas_burnt;
+        }
+        if i == 49 {
+            last_gas_burnt = res.receipts[0].outcome.gas_burnt;
+        }
+        println!("Gas burnt: {}", res.receipts[0].outcome.gas_burnt);
         assert_eq!(res.status, FinalExecutionStatus::SuccessValue(to_base64(&[])), "{:?}", res);
     });
+    // storage_remove also has to get previous value from trie which is expensive
+    // ExtCostsConfig.touching_trie_node should be high enough to be more noticeable than cpu costs
+    assert!(last_gas_burnt > first_gas_burnt * 15);
 }
 
 #[test]
@@ -98,7 +111,7 @@ fn test_evil_deep_recursion() {
                 0,
             )
             .unwrap();
-        if n <= 10000 {
+        if n <= 1000 {
             assert_eq!(
                 res.status,
                 FinalExecutionStatus::SuccessValue(to_base64(&n_bytes)),
diff --git a/scripts/nodelib.py b/scripts/nodelib.py
index e859d8aeb46..d5189be5411 100755
--- a/scripts/nodelib.py
+++ b/scripts/nodelib.py
@@ -54,7 +54,7 @@ def compile_package(package_name, is_release):
 
 
 """Checks if there is already everything setup on this machine, otherwise sets up NEAR node."""
-def check_and_setup(nodocker, is_release, image, home_dir, init_flags):
+def check_and_setup(nodocker, is_release, image, home_dir, init_flags, no_gas_price=False):
     if nodocker:
         compile_package('near', is_release)
 
@@ -89,6 +89,11 @@ def check_and_setup(nodocker, is_release, image, home_dir, init_flags):
         nodocker_init(home_dir, is_release, init_flags)
     else:
         docker_init(image, home_dir, init_flags)
+    if no_gas_price:
+        filename = os.path.join(home_dir, 'genesis.json')
+        genesis_config = json.load(open(filename))
+        genesis_config['gas_price'] = 0
+        json.dump(genesis_config, open(filename, 'w'))
 
 
 def print_staking_key(home_dir):
@@ -162,7 +167,7 @@ def run_nodocker(home_dir, is_release, boot_nodes, telemetry_url, verbose):
         print("\nStopping NEARCore.")
 
 
-def setup_and_run(nodocker, is_release, image, home_dir, init_flags, boot_nodes, telemetry_url, verbose=False):
+def setup_and_run(nodocker, is_release, image, home_dir, init_flags, boot_nodes, telemetry_url, verbose=False, no_gas_price=False):
     if nodocker:
         install_cargo()
     else:
@@ -173,7 +178,7 @@ def setup_and_run(nodocker, is_release, image, home_dir, init_flags, boot_nodes,
             print("Failed to fetch docker containers: %s" % exc)
             exit(1)
 
-    check_and_setup(nodocker, is_release, image, home_dir, init_flags)
+    check_and_setup(nodocker, is_release, image, home_dir, init_flags, no_gas_price)
 
     print_staking_key(home_dir)
 
diff --git a/scripts/start_unittest.py b/scripts/start_unittest.py
index 37dab3d6289..a50085f5ace 100755
--- a/scripts/start_unittest.py
+++ b/scripts/start_unittest.py
@@ -24,4 +24,5 @@
                   init_flags=['--chain-id=', '--test-seed=alice.near', '--account-id=test.near', '--fast'],
                   boot_nodes='',
                   telemetry_url='',
-                  verbose=args.verbose)
+                  verbose=args.verbose,
+                  no_gas_price=True)
diff --git a/scripts/state/migrate-from-0.3.py b/scripts/state/migrate-from-0.3.py
index 1c8525070b6..a90aa8ebcb7 100755
--- a/scripts/state/migrate-from-0.3.py
+++ b/scripts/state/migrate-from-0.3.py
@@ -144,7 +144,7 @@
           "promise_results_count": 1,
           "promise_return": 1,
           "log_base": 1,
-          "log_per_byte": 1
+          "log_byte": 1
         }''')
 burnt_gas_reward = json.loads('''{
           "denominator": 10,
diff --git a/test-utils/state-viewer/src/main.rs b/test-utils/state-viewer/src/main.rs
index 025bf71228b..6bc18a207c7 100644
--- a/test-utils/state-viewer/src/main.rs
+++ b/test-utils/state-viewer/src/main.rs
@@ -233,6 +233,7 @@ fn replay_chain(
                     header.inner.prev_hash,
                     header.hash(),
                     header.inner.height,
+                    chain_store.get_block_height(&header.inner.last_quorum_pre_commit).unwrap(),
                     header.inner.validator_proposals,
                     vec![],
                     header.inner.chunk_mask,
@@ -322,7 +323,7 @@ fn main() {
             let (runtime, state_roots, height) = load_trie(store, &home_dir, &near_config);
             println!("Storage roots are {:?}, block height is {}", state_roots, height);
             for state_root in state_roots {
-                let trie = TrieIterator::new(&runtime.trie, &state_root.hash).unwrap();
+                let trie = TrieIterator::new(&runtime.trie, &state_root).unwrap();
                 for item in trie {
                     let (key, value) = item.unwrap();
                     print_state_entry(key, value);
@@ -340,7 +341,7 @@ fn main() {
             );
             near_config.genesis_config.records = vec![];
             for state_root in state_roots {
-                let trie = TrieIterator::new(&runtime.trie, &state_root.hash).unwrap();
+                let trie = TrieIterator::new(&runtime.trie, &state_root).unwrap();
                 for item in trie {
                     let (key, value) = item.unwrap();
                     if let Some(sr) = kv_to_state_record(key, value) {
diff --git a/test-utils/testlib/src/node/runtime_node.rs b/test-utils/testlib/src/node/runtime_node.rs
index c3d639e969e..8284468aa98 100644
--- a/test-utils/testlib/src/node/runtime_node.rs
+++ b/test-utils/testlib/src/node/runtime_node.rs
@@ -27,7 +27,7 @@ impl RuntimeNode {
         let client = Arc::new(RwLock::new(MockClient {
             runtime,
             trie,
-            state_root: root.hash,
+            state_root: root,
             epoch_length: genesis_config.epoch_length,
         }));
         RuntimeNode { signer, client }
diff --git a/test-utils/testlib/src/runtime_utils.rs b/test-utils/testlib/src/runtime_utils.rs
index 9ea66dac796..d0e7688911a 100644
--- a/test-utils/testlib/src/runtime_utils.rs
+++ b/test-utils/testlib/src/runtime_utils.rs
@@ -61,7 +61,7 @@ pub fn get_runtime_and_trie() -> (Runtime, Arc<Trie>, StateRoot) {
 pub fn get_test_trie_viewer() -> (TrieViewer, TrieUpdate) {
     let (_, trie, root) = get_runtime_and_trie();
     let trie_viewer = TrieViewer::new();
-    let state_update = TrieUpdate::new(trie, root.hash);
+    let state_update = TrieUpdate::new(trie, root);
     (trie_viewer, state_update)
 }
 
diff --git a/test-utils/testlib/src/user/runtime_user.rs b/test-utils/testlib/src/user/runtime_user.rs
index daddb921ceb..6820f13ae96 100644
--- a/test-utils/testlib/src/user/runtime_user.rs
+++ b/test-utils/testlib/src/user/runtime_user.rs
@@ -95,7 +95,7 @@ impl RuntimeUser {
                 );
             }
             apply_result.trie_changes.into(client.trie.clone()).unwrap().0.commit().unwrap();
-            client.state_root = apply_result.state_root.hash;
+            client.state_root = apply_result.state_root;
             if apply_result.new_receipts.is_empty() {
                 return Ok(());
             }
@@ -114,6 +114,7 @@ impl RuntimeUser {
             block_timestamp: 0,
             epoch_length: client.epoch_length,
             gas_price: INITIAL_GAS_PRICE,
+            gas_limit: None,
         }
     }
 
diff --git a/tests/test_tps_regression.rs b/tests/test_tps_regression.rs
index d3872c0bd8a..e55edd40094 100644
--- a/tests/test_tps_regression.rs
+++ b/tests/test_tps_regression.rs
@@ -140,10 +140,17 @@ mod test {
                                 .map(|idx| node.read().unwrap().user().get_block(idx).unwrap())
                                 .collect::<Vec<_>>();
                             for b in &blocks {
+                                let gas_used = b.chunks.iter().fold(0, |acc, chunk| {
+                                    if chunk.height_included == b.header.height {
+                                        acc + chunk.gas_used
+                                    } else {
+                                        acc
+                                    }
+                                });
                                 observed_transactions
                                     .write()
                                     .unwrap()
-                                    .push((b.header.gas_used as u64, Instant::now()));
+                                    .push((gas_used, Instant::now()));
                             }
                             prev_ind = new_ind;
                         }