Skip to content

Commit

Permalink
towards bigger fanout
Browse files Browse the repository at this point in the history
  • Loading branch information
Scooletz committed Jun 28, 2024
1 parent 271e62c commit bfe308a
Show file tree
Hide file tree
Showing 3 changed files with 34 additions and 11 deletions.
2 changes: 1 addition & 1 deletion src/Paprika.Tests/Store/AbandonedTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ public void Properly_handles_page_addresses_that_are_packed_2()
[TestCase(68421, 10_000, 50, false,
TestName = "Accounts - 10000 to breach the AbandonedPage",
Category = Categories.LongRunning)]
[TestCase(98579, 20_000, 50, true,
[TestCase(97664, 20_000, 50, true,
TestName = "Storage - 20_000 accounts with a single storage slot",
Category = Categories.LongRunning)]
public async Task Reuse_in_limited_environment(int pageCount, int accounts, int repeats, bool isStorage)
Expand Down
41 changes: 32 additions & 9 deletions src/Paprika/Store/FanOutList.cs
Original file line number Diff line number Diff line change
Expand Up @@ -35,30 +35,53 @@ public void ClearCowVector()

public void Set(in NibblePath key, in ReadOnlySpan<byte> data, IBatchContext batch)
{
ref var addr = ref GetBucket(key, out var index);
ref var addr = ref GetBucket(key, out var index, out var bucket);
var cowFlag = 1L << bucket;

// Ensure the first page is properly set
// The page that contains the buckets requires manual management as it has no header.
Page page;
if (addr.IsNull)
{
page = batch.GetNewPage(out addr, true);
page.Header.PageType = PageType.Standard;
page.Header.Level = 0;
// The page did not exist before.
// Get a new but remember that the manual clearing is required to destroy assigned metadata.
page = batch.GetNewPage(out addr, false);
page.Clear();
}
else
{
page = batch.EnsureWritableCopy(ref addr);
if ((_data.CowBitVector & cowFlag) != cowFlag)
{
// This page have not been COWed during this batch.
// This must be done in a manual way as the header is overwritten.
var prev = batch.GetAt(addr);
page = batch.GetNewPage(out addr, false);
prev.CopyTo(page);
batch.RegisterForFutureReuse(prev);

// Mark the flag so that the next one does not COW again.
_data.CowBitVector |= cowFlag;
}
else
{
// This page has been COWed already, just retrieve.
page = batch.GetAt(addr);
}
}

ref var descendant = ref GetDescendantAddress(page, index);
if (descendant.IsNull)
{
batch.GetNewPage(out descendant, true);
}

// The page exists, update
var updated = TPage.Wrap(batch.GetAt(descendant)).Set(key.SliceFrom(ConsumedNibbles), data, batch);
descendant = batch.GetAddress(updated);
}

public bool TryGet(IReadOnlyBatchContext batch, scoped in NibblePath key, out ReadOnlySpan<byte> result)
{
ref var addr = ref GetBucket(key, out var index);
ref var addr = ref GetBucket(key, out var index, out _);

if (addr.IsNull)
{
Expand All @@ -81,7 +104,7 @@ public void Accept(IPageVisitor visitor, IPageResolver resolver)
}
}

private ref DbAddress GetBucket(in NibblePath key, out int index)
private ref DbAddress GetBucket(in NibblePath key, out int index, out int pageNo)
{
Debug.Assert(key.Length > ConsumedNibbles);

Expand All @@ -94,7 +117,7 @@ private ref DbAddress GetBucket(in NibblePath key, out int index)
(key.GetAt(3) << (3 * shift));

Debug.Assert(bucket < DbAddressesPerPage * PageCount);
(index, var pageNo) = Math.DivRem(bucket, PageCount);
(index, pageNo) = Math.DivRem(bucket, PageCount);
return ref _data.Addresses[pageNo];
}

Expand Down
2 changes: 1 addition & 1 deletion src/Paprika/Store/RootPage.cs
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ public struct Payload
[FieldOffset(FanOutsStart + FanOutList.Size + FanOutList.Size)]
private FanOutList StorageMerklePayload;

public FanOutList.Of<DataPage, StandardType> StorageMerkle => new(ref StoragePayload);
public FanOutList.Of<DataPage, StandardType> StorageMerkle => new(ref StorageMerklePayload);


public DbAddress GetNextFreePage()
Expand Down

0 comments on commit bfe308a

Please sign in to comment.