diff --git a/src/Paprika.Runner/Program.cs b/src/Paprika.Runner/Program.cs
index 47563a43..d8162647 100644
--- a/src/Paprika.Runner/Program.cs
+++ b/src/Paprika.Runner/Program.cs
@@ -16,7 +16,7 @@ namespace Paprika.Runner;
public static class Program
{
- private const int BlockCount = PersistentDb ? 20_000 : 3_000;
+ private const int BlockCount = PersistentDb ? 25_000 : 3_000;
private const int AccountsPerBlock = 1000;
private const int MaxReorgDepth = 64;
private const int FinalizeEvery = 32;
@@ -28,7 +28,7 @@ public static class Program
private const long DbFileSize = PersistentDb ? 256 * Gb : 16 * Gb;
private const long Gb = 1024 * 1024 * 1024L;
- private static readonly TimeSpan FlushEvery = TimeSpan.FromSeconds(30);
+ private static readonly TimeSpan FlushEvery = TimeSpan.FromSeconds(5);
private const int LogEvery = BlockCount / NumberOfLogs;
@@ -81,7 +81,8 @@ public static async Task Main(String[] args)
}
Console.WriteLine("Initializing db of size {0}GB", DbFileSize / Gb);
- Console.WriteLine("Starting benchmark. Flush buffer every: {0}ms", ((int)FlushEvery.TotalMilliseconds).ToString());
+ Console.WriteLine("Starting benchmark. Flush buffer every: {0}ms",
+ ((int)FlushEvery.TotalMilliseconds).ToString());
PagedDb db = PersistentDb
? PagedDb.MemoryMappedDb(DbFileSize, MaxReorgDepth, dataPath)
@@ -147,8 +148,9 @@ public static async Task Main(String[] args)
if (actual != expected)
{
- throw new InvalidOperationException($"Invalid account state for account number {i} with address {key.ToString()}. " +
- $"The expected value is {expected} while the actual is {actual}!");
+ throw new InvalidOperationException(
+ $"Invalid account state for account number {i} with address {key.ToString()}. " +
+ $"The expected value is {expected} while the actual is {actual}!");
}
if (UseStorageEveryNAccounts > 0 && i % UseStorageEveryNAccounts == 0)
@@ -185,6 +187,9 @@ public static async Task Main(String[] args)
// the final report
ReportReading(counter);
+ // statistics
+ layout[info].Update(new Panel("Gathering statistics...").Header("Paprika tree statistics").Expand());
+
var stats = new StatisticsReporter();
read.Report(stats);
var table = new Table();
@@ -202,9 +207,18 @@ public static async Task Main(String[] args)
}
var mb = (long)stats.PageCount * Page.PageSize / 1024 / 1024;
- var report = new Layout().SplitRows(
- new Layout(new Paragraph($"General stats:\n1. Size of this Paprika tree: {mb}MB")).Size(3),
- new Layout(table.Expand()));
+
+ var types = string.Join(", ", stats.PageTypes.Select(kvp => $"{kvp.Key}: {kvp.Value}"));
+ var report = new Layout()
+ .SplitRows(
+ new Layout(
+ new Rows(
+ new Text("General stats:"),
+ new Text($"1. Size of this Paprika tree: {mb}MB"),
+ new Text($"2. Types of pages: {types}"),
+ WriteHistogram(stats.PageAge, "2. Age of pages: ")))
+ .Size(5),
+ new Layout(table.Expand()));
layout[info].Update(new Panel(report).Header("Paprika tree statistics").Expand());
@@ -244,7 +258,7 @@ void ReportReading(int i)
private static Random BuildRandom() => new(RandomSeed);
- private static IRenderable WriteHistogram(HistogramBase histogram)
+ private static IRenderable WriteHistogram(HistogramBase histogram, string prefix = "")
{
string Percentile(int percentile, string color)
{
@@ -252,7 +266,7 @@ string Percentile(int percentile, string color)
return $"[{color}]P{percentile}: {value,2}[/] ";
}
- return new Markup(Percentile(50, "green") + Percentile(90, "yellow") + Percentile(95, "red"));
+ return new Markup(prefix + Percentile(50, "green") + Percentile(90, "yellow") + Percentile(95, "red"));
}
private static int Writer(Blockchain blockchain, Keccak bigStorageAccount, Random random,
diff --git a/src/Paprika/Store/DataPage.cs b/src/Paprika/Store/DataPage.cs
index adb2a29f..35bafa4e 100644
--- a/src/Paprika/Store/DataPage.cs
+++ b/src/Paprika/Store/DataPage.cs
@@ -267,11 +267,11 @@ public void Report(IReporter reporter, IPageResolver resolver, int level)
if (emptyBuckets == 0)
{
// all filled
- reporter.Report(level, Payload.BucketCount, new HashingMap(Data.DataSpan).Count);
+ reporter.ReportDataUsage(level, Payload.BucketCount, new HashingMap(Data.DataSpan).Count);
}
else
{
- reporter.Report(level, Payload.BucketCount - emptyBuckets,
+ reporter.ReportDataUsage(level, Payload.BucketCount - emptyBuckets,
new NibbleBasedMap(Data.DataSpan).Count);
}
}
diff --git a/src/Paprika/Store/IReporter.cs b/src/Paprika/Store/IReporter.cs
index 24673f60..09f15fc5 100644
--- a/src/Paprika/Store/IReporter.cs
+++ b/src/Paprika/Store/IReporter.cs
@@ -1,4 +1,5 @@
-using HdrHistogram;
+using System.Runtime.InteropServices;
+using HdrHistogram;
namespace Paprika.Store;
@@ -7,15 +8,23 @@ namespace Paprika.Store;
///
public interface IReporter
{
- void Report(int level, int filledBuckets, int entriesPerPage);
+ void ReportDataUsage(int level, int filledBuckets, int entriesPerPage);
+
+ ///
+ /// Reports how many batches ago the page was updated.
+ ///
+ void ReportPage(uint ageInBatches, PageType type);
}
public class StatisticsReporter : IReporter
{
public readonly SortedDictionary Levels = new();
+ public readonly Dictionary PageTypes = new();
public int PageCount = 0;
- public void Report(int level, int filledBuckets, int entriesPerPage)
+ public readonly IntHistogram PageAge = new(1_000_000_000, 5);
+
+ public void ReportDataUsage(int level, int filledBuckets, int entriesPerPage)
{
if (Levels.TryGetValue(level, out var lvl) == false)
{
@@ -28,6 +37,13 @@ public void Report(int level, int filledBuckets, int entriesPerPage)
lvl.Entries.RecordValue(entriesPerPage);
}
+ public void ReportPage(uint ageInBatches, PageType type)
+ {
+ PageAge.RecordValue(ageInBatches);
+ var value = PageTypes.GetValueOrDefault(type);
+ PageTypes[type] = value + 1;
+ }
+
public class Level
{
public readonly IntHistogram ChildCount = new(1000, 5);
diff --git a/src/Paprika/Store/PagedDb.cs b/src/Paprika/Store/PagedDb.cs
index 902926d0..b5e4418b 100644
--- a/src/Paprika/Store/PagedDb.cs
+++ b/src/Paprika/Store/PagedDb.cs
@@ -1,7 +1,6 @@
using System.Diagnostics;
using System.Diagnostics.Metrics;
using System.Runtime.InteropServices;
-using Nethermind.Int256;
using Paprika.Crypto;
using Paprika.Data;
using Paprika.Store.PageManagers;
@@ -44,7 +43,9 @@ public class PagedDb : IPageResolver, IDb, IDisposable
private readonly Counter _writes;
private readonly Counter _commits;
private readonly Histogram _commitDuration;
- private readonly Histogram _commitPageCount;
+ private readonly Histogram _commitPageCountTotal;
+ private readonly Histogram _commitPageCountReused;
+ private readonly Histogram _commitPageCountNewlyAllocated;
private readonly MetricsExtensions.IAtomicIntGauge _dbSize;
// pool
@@ -76,7 +77,11 @@ private PagedDb(IPageManager manager, byte historyDepth)
_writes = _meter.CreateCounter("Writes", "Writes", "The number of writes db handles");
_commits = _meter.CreateCounter("Commits", "Commits", "The number of batch commits db handles");
_commitDuration = _meter.CreateHistogram("Commit duration", "ms", "The time it takes to perform a commit");
- _commitPageCount = _meter.CreateHistogram("Commit page count", "pages",
+ _commitPageCountTotal = _meter.CreateHistogram("Commit page count (total)", "pages",
+ "The number of pages flushed during the commit");
+ _commitPageCountReused = _meter.CreateHistogram("Commit page count (reused)", "pages",
+ "The number of pages flushed during the commit");
+ _commitPageCountNewlyAllocated = _meter.CreateHistogram("Commit page count (new)", "pages",
"The number of pages flushed during the commit");
}
@@ -96,7 +101,12 @@ private void ReportCommit(TimeSpan elapsed)
private void ReportDbSize(int megabytes) => _dbSize.Set(megabytes);
- private void ReportPageCountPerCommit(int pageCount) => _commitPageCount.Record(pageCount);
+ private void ReportPageCountPerCommit(int totalPageCount, int reused, int newlyAllocated)
+ {
+ _commitPageCountTotal.Record(totalPageCount);
+ _commitPageCountReused.Record(reused);
+ _commitPageCountNewlyAllocated.Record(newlyAllocated);
+ }
private void RootInit()
{
@@ -143,7 +153,7 @@ public IReadOnlyBatch BeginReadOnlyBatch()
lock (_batchLock)
{
var batchId = Root.Header.BatchId;
- var batch = new ReadOnlyBatch(this, batchId, Root.Data.AccountPages.ToArray(), Root.Data.Metadata);
+ var batch = new ReadOnlyBatch(this, batchId, Root.Data.AccountPages.ToArray(), Root.Data.Metadata, Root.Data.NextFreePage);
_batchesReadOnly.Add(batch);
return batch;
}
@@ -234,11 +244,14 @@ class ReadOnlyBatch : IReadOnlyBatch, IReadOnlyBatchContext
private bool _disposed;
private readonly DbAddress[] _rootDataPages;
+ private readonly DbAddress _nextFreePage;
- public ReadOnlyBatch(PagedDb db, uint batchId, DbAddress[] rootDataPages, Metadata metadata)
+ public ReadOnlyBatch(PagedDb db, uint batchId, DbAddress[] rootDataPages, Metadata metadata,
+ DbAddress nextFreePage)
{
_db = db;
_rootDataPages = rootDataPages;
+ _nextFreePage = nextFreePage;
BatchId = batchId;
Metadata = metadata;
}
@@ -279,6 +292,13 @@ public void Report(IReporter reporter)
new DataPage(GetAt(addr)).Report(reporter, this, 1);
}
}
+
+ for (uint i = _db._historyDepth; i < _nextFreePage.Raw; i++)
+ {
+ ref readonly var header = ref GetAt(DbAddress.Page(i)).Header;
+ var pageBatchId = header.BatchId;
+ reporter.ReportPage(BatchId - pageBatchId, header.PageType);
+ }
}
public uint BatchId { get; }
@@ -401,13 +421,7 @@ private void CheckDisposed()
public void Report(IReporter reporter)
{
- foreach (var addr in _root.Data.AccountPages)
- {
- if (addr.IsNull == false)
- {
- new DataPage(GetAt(addr)).Report(reporter, this, 1);
- }
- }
+ throw new NotImplementedException();
}
public async ValueTask Commit(CommitOptions options)
@@ -419,7 +433,7 @@ public async ValueTask Commit(CommitOptions options)
// memoize the abandoned so that it's preserved for future uses
MemoizeAbandoned();
- _db.ReportPageCountPerCommit(_written.Count);
+ _db.ReportPageCountPerCommit(_written.Count, _metrics.PagesReused, _metrics.PagesAllocated);
await _db._manager.FlushPages(_written, options);