Skip to content

Commit

Permalink
Re-simplify SnapshotHashes incremental list
Browse files Browse the repository at this point in the history
The potential memory savings were minimal at best, and came at a
significant cost in complexity - a simple tagged union is sufficient
to avoid the overhead of an allocation, whilst retaining simplicity
across the rest of the code (especially wrt generic comparisons).
  • Loading branch information
InKryption committed Oct 31, 2024
1 parent ef7e868 commit 2beb3c5
Show file tree
Hide file tree
Showing 4 changed files with 64 additions and 186 deletions.
32 changes: 20 additions & 12 deletions src/accountsdb/db.zig
Original file line number Diff line number Diff line change
Expand Up @@ -4184,18 +4184,26 @@ test "generate snapshot & update gossip snapshot hashes" {
try std.testing.expectEqual(.SnapshotHashes, std.meta.activeTag(queue_item_0.data));
try std.testing.expectEqual(.SnapshotHashes, std.meta.activeTag(queue_item_1.data));

try queue_item_0.data.SnapshotHashes.asComparable(.ignore_wallclock).expectEqual(.{
.from = Pubkey.fromPublicKey(&my_keypair.public_key),
.full = .{ .slot = full_slot, .hash = full_hash },
.incremental = &.{},
.wallclock = null,
});
try queue_item_1.data.SnapshotHashes.asComparable(.ignore_wallclock).expectEqual(.{
.from = Pubkey.fromPublicKey(&my_keypair.public_key),
.full = .{ .slot = full_slot, .hash = full_hash },
.incremental = &.{.{ .slot = inc_slot, .hash = inc_hash }},
.wallclock = null,
});
const SnapshotHashes = sig.gossip.data.SnapshotHashes;

try std.testing.expectEqualDeep(
SnapshotHashes{
.from = Pubkey.fromPublicKey(&my_keypair.public_key),
.full = .{ .slot = full_slot, .hash = full_hash },
.incremental = SnapshotHashes.IncrementalSnapshotsList.EMPTY,
.wallclock = queue_item_0.data.SnapshotHashes.wallclock,
},
queue_item_0.data.SnapshotHashes,
);
try std.testing.expectEqualDeep(
SnapshotHashes{
.from = Pubkey.fromPublicKey(&my_keypair.public_key),
.full = .{ .slot = full_slot, .hash = full_hash },
.incremental = SnapshotHashes.IncrementalSnapshotsList.initSingle(.{ .slot = inc_slot, .hash = inc_hash }),
.wallclock = queue_item_1.data.SnapshotHashes.wallclock,
},
queue_item_1.data.SnapshotHashes,
);
}

pub const BenchmarkAccountsDBSnapshotLoad = struct {
Expand Down
193 changes: 41 additions & 152 deletions src/gossip/data.zig
Original file line number Diff line number Diff line change
Expand Up @@ -212,12 +212,6 @@ pub const SignedGossipData = struct {
// zig fmt: on
};
}

pub fn equals(self: *const Self, other: *const Self) bool {
if (!self.signature.eql(&other.signature)) return false;
if (!self.data.equals(&other.data)) return false;
return true;
}
};

/// Analogous to [CrdsValueLabel](https://github.com/solana-labs/solana/blob/e0203f22dc83cb792fa97f91dbe6e924cbd08af1/gossip/src/crds_value.rs#L500)
Expand Down Expand Up @@ -428,30 +422,6 @@ pub const GossipData = union(GossipDataTag) {
else => null,
};
}

pub fn equals(self: *const GossipData, other: *const GossipData) bool {
if (self.* != std.meta.activeTag(other.*)) return false;
return switch (self.*) {
inline //
.LegacyContactInfo,
.Vote,
.LowestSlot,
.LegacySnapshotHashes,
.AccountsHashes,
.EpochSlots,
.LegacyVersion,
.Version,
.NodeInstance,
.DuplicateShred,
.ContactInfo,
.RestartLastVotedForkSlots,
.RestartHeaviestFork,
=> |self_payload, tag| std.meta.eql(self_payload, @field(other, @tagName(tag))),

.SnapshotHashes,
=> |*snap_hashes| snap_hashes.equals(&other.SnapshotHashes),
};
}
};

/// analogous to [LegactContactInfo](https://github.com/anza-xyz/agave/blob/0d34a1a160129c4293dac248e14231e9e773b4ce/gossip/src/legacy_contact_info.rs#L26)
Expand Down Expand Up @@ -1114,116 +1084,51 @@ pub const SnapshotHashes = struct {
}
}

pub fn equals(
self: *const SnapshotHashes,
other: *const SnapshotHashes,
) bool {
if (self.wallclock != other.wallclock) return false;
if (self.from.equals(&other.from)) return false;
if (self.full.equals(&other.full)) return false;
if (!self.incremental.equals(&other.incremental)) return false;
return true;
}

pub const Comparable = struct {
from: Pubkey,
full: SlotAndHash,
incremental: []const SlotAndHash,
wallclock: ?u64,

pub fn expectEqual(actual: Comparable, expected: Comparable) !void {
try std.testing.expectEqual(expected.from, actual.from);
try std.testing.expectEqual(expected.full, actual.full);

wallclock: {
const actual_wallclock = actual.wallclock orelse break :wallclock;
const expected_wallclock = expected.wallclock orelse break :wallclock;
try std.testing.expectEqual(expected_wallclock, actual_wallclock);
}

try std.testing.expectEqualSlices(SlotAndHash, expected.incremental, actual.incremental);
}
};
pub fn asComparable(
self: *const SnapshotHashes,
wallclock_compare: enum {
compare_wallclock,
ignore_wallclock,
},
) Comparable {
return .{
.from = self.from,
.full = self.full,
.incremental = self.incremental.getSlice(),
.wallclock = switch (wallclock_compare) {
.compare_wallclock => self.wallclock,
.ignore_wallclock => null,
},
};
}

/// List of incremental `SlotAndHash`es.
/// Can be thought of as a tagged union, where the tag is a boolean derived from `.len == 1`.
/// When the tag is `true`, the single item is represented inline in the `items` union.
/// When the tag is `false`, the list of items is pointed to by the `items` union.
///
/// This optimizes the case where we only have a single incremental snapshot.
pub const IncrementalSnapshotsList = struct {
len: usize,
items: SingleOrList,
pub const IncrementalSnapshotsList = union(enum) {
single: SlotAndHash,
list: []const SlotAndHash,

pub const @"!bincode-config": bincode.FieldConfig(IncrementalSnapshotsList) = .{
.serializer = bincode_impl.serializeFn,
.deserializer = bincode_impl.deserializeFn,
.free = bincode_impl.freeFn,
.serializer = bincodeSerializeFn,
.deserializer = bincodeDeserializeFn,
.free = bincodeFreeFn,
.skip = false,
.post_deserialize_fn = null,
};

pub const SingleOrList = union {
/// Active when `.len == 1`.
single: SlotAndHash,
/// Active when `.len != 1`.
list: [*]const SlotAndHash,
};

pub fn getSlice(inc: *const IncrementalSnapshotsList) []const SlotAndHash {
if (inc.len == 1) {
return (&inc.items.single)[0..1];
} else {
return inc.items.list[0..inc.len];
}
return switch (inc.*) {
.single => |*single| single[0..1],
.list => |list| list,
};
}

pub fn deinit(self: *const IncrementalSnapshotsList, allocator: std.mem.Allocator) void {
if (self.len == 1) {
_ = &self.items.single; // safety check
} else {
allocator.free(self.items.list[0..self.len]);
switch (self.*) {
.single => {},
.list => |list| allocator.free(list),
}
}

pub const EMPTY: IncrementalSnapshotsList = .{
.len = 0,
.items = .{ .list = &[_]SlotAndHash{} },
};
/// Can optionally and safely have `.deinit` called.
pub const EMPTY: IncrementalSnapshotsList = .{ .list = &.{} };

/// The returned snapshot collection can optionally and safely have `.deinit` called.
pub fn initSingle(single: SlotAndHash) IncrementalSnapshotsList {
return .{
.len = 1,
.items = .{ .single = single },
};
return .{ .single = single };
}

/// Responsibility to `.deinit` the returned snapshot collection falls to the caller in order to free `list`, if `list` was allocated.
/// Responsibility to `.deinit` the returned snapshot list falls to the caller in order to free `list`, if `list` was allocated.
/// Asserts `list.len != 1`.
pub fn initList(list: []const SlotAndHash) IncrementalSnapshotsList {
std.debug.assert(list.len != 1);
return .{
.len = list.len,
.items = .{ .list = list.ptr },
};
return .{ .list = list };
}

/// Responsibility to `.deinit` the returned snapshot collection with the specified allocator falls to the caller.
Expand All @@ -1235,52 +1140,36 @@ pub const SnapshotHashes = struct {
}

pub fn clone(inc: *const IncrementalSnapshotsList, allocator: std.mem.Allocator) !IncrementalSnapshotsList {
return .{
.len = inc.len,
.items = if (inc.len == 1)
.{ .single = inc.items.single }
else
.{ .list = (try allocator.dupe(SlotAndHash, inc.items.list[0..inc.len])).ptr },
return switch (inc.*) {
.single => |single| .{ .single = single },
.list => |list| .{ .list = try allocator.dupe(SlotAndHash, list) },
};
}

pub fn equals(
self: *const IncrementalSnapshotsList,
other: *const IncrementalSnapshotsList,
) bool {
if (self.len != other.len) return false;
for (self.getSlice(), other.getSlice()) |*a, *b| {
if (!a.equals(b)) return false;
}
return true;
fn bincodeSerializeFn(writer: anytype, inc_list: anytype, params: bincode.Params) !void {
try bincode.write(writer, inc_list.getSlice(), params);
}

const bincode_impl = struct {
fn serializeFn(writer: anytype, inc_list: anytype, params: bincode.Params) !void {
try bincode.write(writer, inc_list.getSlice(), params);
}

fn deserializeFn(allocator: std.mem.Allocator, reader: anytype, params: bincode.Params) !IncrementalSnapshotsList {
const faililng_allocator = sig.utils.allocators.failing.allocator(.{});

const maybe_len = try bincode.readIntAsLength(usize, reader, params);
const len = maybe_len orelse return error.IncrementalListTooBig;
switch (len) {
0 => return EMPTY,
1 => return initSingle(try bincode.read(faililng_allocator, SlotAndHash, reader, params)),
else => {
const list = try allocator.alloc(SlotAndHash, len);
errdefer allocator.free(list);
for (list) |*sah| sah.* = try bincode.read(faililng_allocator, SlotAndHash, reader, params);
return initList(list);
},
}
fn bincodeDeserializeFn(allocator: std.mem.Allocator, reader: anytype, params: bincode.Params) !IncrementalSnapshotsList {
const faililng_allocator = sig.utils.allocators.failing.allocator(.{});

const maybe_len = try bincode.readIntAsLength(usize, reader, params);
const len = maybe_len orelse return error.IncrementalListTooBig;
switch (len) {
0 => return EMPTY,
1 => return initSingle(try bincode.read(faililng_allocator, SlotAndHash, reader, params)),
else => {
const list = try allocator.alloc(SlotAndHash, len);
errdefer allocator.free(list);
for (list) |*sah| sah.* = try bincode.read(faililng_allocator, SlotAndHash, reader, params);
return initList(list);
},
}
}

fn freeFn(allocator: std.mem.Allocator, inc_list: anytype) void {
IncrementalSnapshotsList.deinit(&inc_list, allocator);
}
};
fn bincodeFreeFn(allocator: std.mem.Allocator, inc_list: anytype) void {
IncrementalSnapshotsList.deinit(&inc_list, allocator);
}
};
};

Expand Down
4 changes: 1 addition & 3 deletions src/gossip/message.zig
Original file line number Diff line number Diff line change
Expand Up @@ -296,9 +296,7 @@ test "gossip.message: pull request serializes and deserializes" {
try testing.expectEqualSlices(u8, rust_bytes[0..], serialized);

const deserialized = try bincode.readFromSlice(testing.allocator, GossipMessage, serialized, bincode.Params.standard);
try std.testing.expectEqual(std.meta.activeTag(pull), std.meta.activeTag(deserialized));
try std.testing.expectEqual(pull.PullRequest[0], deserialized.PullRequest[0]);
try std.testing.expect(pull.PullRequest[1].equals(&deserialized.PullRequest[1]));
try std.testing.expectEqualDeep(pull, deserialized);
}

test "gossip.message: push message serializes and deserializes correctly" {
Expand Down
21 changes: 2 additions & 19 deletions src/gossip/table.zig
Original file line number Diff line number Diff line change
Expand Up @@ -140,23 +140,6 @@ pub const GossipTable = struct {
pub fn wasInserted(self: InsertResult) bool {
return self == .InsertedNewEntry or self == .OverwroteExistingEntry;
}

pub fn equals(self: InsertResult, other: InsertResult) bool {
if (self != std.meta.activeTag(other)) return false;
return switch (self) {
.InsertedNewEntry,
.IgnoredOldValue,
.IgnoredDuplicateValue,
.IgnoredTimeout,
.GossipTableFull,
=> |_| true,

.OverwroteExistingEntry => |*a| blk: {
const b = &other.OverwroteExistingEntry;
break :blk a.equals(b);
},
};
}
};

pub fn insert(self: *Self, value: SignedGossipData, now: u64) !InsertResult {
Expand Down Expand Up @@ -1075,7 +1058,7 @@ test "insert and get contact_info" {

// test insertion
const result = try table.insert(gossip_value, 0);
try std.testing.expect(result.equals(.InsertedNewEntry));
try std.testing.expectEqual(.InsertedNewEntry, result);

// test retrieval
var buf: [100]ContactInfo = undefined;
Expand All @@ -1085,7 +1068,7 @@ test "insert and get contact_info" {

// test re-insertion
const result2 = try table.insert(gossip_value, 0);
try std.testing.expect(result2.equals(.IgnoredDuplicateValue));
try std.testing.expectEqual(.IgnoredDuplicateValue, result2);

// test re-insertion with greater wallclock
gossip_value.data.LegacyContactInfo.wallclock += 2;
Expand Down

0 comments on commit 2beb3c5

Please sign in to comment.