From db9ad0df2baded4ca478663013f823491bddcd7c Mon Sep 17 00:00:00 2001 From: Odysseas Gabrielides Date: Tue, 5 Nov 2024 12:31:11 +0200 Subject: [PATCH] refactor: block serialisation and addition of tests (#47) * added: write_compact_size and test * more work * suggestions * suggestions * safety check on write_fixed_bitset --- dash/src/blockdata/block.rs | 16 +- .../special_transaction/coinbase.rs | 23 +- .../special_transaction/quorum_commitment.rs | 135 ++++------ dash/src/consensus/encode.rs | 254 +++++++++++++++++- 4 files changed, 328 insertions(+), 100 deletions(-) diff --git a/dash/src/blockdata/block.rs b/dash/src/blockdata/block.rs index 6a754bb439..52ad6dc576 100644 --- a/dash/src/blockdata/block.rs +++ b/dash/src/blockdata/block.rs @@ -476,11 +476,11 @@ mod tests { "010000004ddccd549d28f385ab457e98d1b11ce80bfea2c5ab93015ade4973e400000000bf4473e53794beae34e64fccc471dace6ae544180816f89591894e0f417a914cd74d6e49ffff001d323b3a7b0201000000010000000000000000000000000000000000000000000000000000000000000000ffffffff0804ffff001d026e04ffffffff0100f2052a0100000043410446ef0102d1ec5240f0d061a4246c1bdef63fc3dbab7733052fbbf0ecd8f41fc26bf049ebb4f9527f374280259e7cfa99c48b0e3f39c51347a19a5819651503a5ac00000000010000000321f75f3139a013f50f315b23b0c9a2b6eac31e2bec98e5891c924664889942260000000049483045022100cb2c6b346a978ab8c61b18b5e9397755cbd17d6eb2fe0083ef32e067fa6c785a02206ce44e613f31d9a6b0517e46f3db1576e9812cc98d159bfdaf759a5014081b5c01ffffffff79cda0945903627c3da1f85fc95d0b8ee3e76ae0cfdc9a65d09744b1f8fc85430000000049483045022047957cdd957cfd0becd642f6b84d82f49b6cb4c51a91f49246908af7c3cfdf4a022100e96b46621f1bffcf5ea5982f88cef651e9354f5791602369bf5a82a6cd61a62501fffffffffe09f5fe3ffbf5ee97a54eb5e5069e9da6b4856ee86fc52938c2f979b0f38e82000000004847304402204165be9a4cbab8049e1af9723b96199bfd3e85f44c6b4c0177e3962686b26073022028f638da23fc003760861ad481ead4099312c60030d4cb57820ce4d33812a5ce01ffffffff01009d966b01000000434104ea1feff861b51fe3f5f8a3b12d0f4712db80e919548a80839fc47c6a21e66d957e9c5d8cd108c7a2d2324bad71f9904ac0ae7336507d785b17a2c115e427a32fac" ); - let prevhash = hex!("85691f6a060e65346c281ed25b99dbd18c139053562ccd001d00000000000000"); - let merkle = hex!("377b6aa24658b7a0ae7b73f0673d047a291de5cbc06907038b288b2ebf491c2c"); + let prevhash = hex!("0ec684405b58b3a0f0144c9a92c7d4296587ba6fc71041fff2130a038a000000"); + let merkle = hex!("78e259b490cfc8a8e50e1933afde3f777a47bdac8b61d504a51b68dede2ac181"); let work_bytes: [u8; 32] = - hex!("000000000000000000000000000000000000000000000000050ec30af44bf25e") + hex!("0000000000000000000000000000000000000000000000000000000000f7b6f1") .try_into() .unwrap(); let work = Work::from_be_bytes(work_bytes); @@ -493,16 +493,16 @@ mod tests { assert_eq!(serialize(&real_decode.header.prev_blockhash), prevhash); assert_eq!(real_decode.header.merkle_root, real_decode.compute_merkle_root().unwrap()); assert_eq!(serialize(&real_decode.header.merkle_root), merkle); - assert_eq!(real_decode.header.time, 1685447065); - assert_eq!(real_decode.header.bits, CompactTarget::from_consensus(422747587)); - assert_eq!(real_decode.header.nonce, 2456102546); + assert_eq!(real_decode.header.time, 1730283725); + assert_eq!(real_decode.header.bits, CompactTarget::from_consensus(503384208)); + assert_eq!(real_decode.header.nonce, 394542); assert_eq!(real_decode.header.work(), work); assert_eq!( real_decode.header.validate_pow(real_decode.header.target()).unwrap(), real_decode.block_hash() ); - assert_eq!(real_decode.header.difficulty(), 84852220); - assert_eq!(real_decode.header.difficulty_float(), 84852220.19239795); + assert_eq!(real_decode.header.difficulty(), 0); + assert_eq!(real_decode.header.difficulty_float(), 0.0037797675075301206); // [test] TODO: check the transaction data assert_eq!(real_decode.size(), some_block.len()); diff --git a/dash/src/blockdata/transaction/special_transaction/coinbase.rs b/dash/src/blockdata/transaction/special_transaction/coinbase.rs index e26b9d3144..921d6d4924 100644 --- a/dash/src/blockdata/transaction/special_transaction/coinbase.rs +++ b/dash/src/blockdata/transaction/special_transaction/coinbase.rs @@ -23,6 +23,7 @@ use crate::hash_types::{MerkleRootMasternodeList, MerkleRootQuorums}; use crate::io::{Error, ErrorKind}; use crate::{VarInt, io}; use crate::bls_sig_utils::BLSSignature; +use crate::consensus::encode::{compact_size_len, read_compact_size, write_compact_size}; /// A Coinbase payload. This is contained as the payload of a coinbase special transaction. /// The Coinbase payload is described in DIP4. @@ -43,15 +44,16 @@ pub struct CoinbasePayload { impl CoinbasePayload { /// The size of the payload in bytes. /// version(2) + height(4) + merkle_root_masternode_list(32) + merkle_root_quorums(32) - /// in addition to the above, if version >= 3: asset_locked_amount(8) + best_cl_height(4) + - /// best_cl_signature(VarInt(len) + len) + /// in addition to the above, if version >= 3: asset_locked_amount(8) + best_cl_height(compact_size) + + /// best_cl_signature(96) pub fn size(&self) -> usize { let mut size: usize = 2 + 4 + 32 + 32; if self.version >= 3 { - size += 4 + 8; - if let Some(sig) = &self.best_cl_signature { - size += VarInt(sig.len() as u64).len() + sig.len() + size += 96; + if let Some(best_cl_height) = self.best_cl_height { + size += compact_size_len(best_cl_height); } + size += 8; } size } @@ -66,7 +68,7 @@ impl Encodable for CoinbasePayload { len += self.merkle_root_quorums.consensus_encode(w)?; if self.version >= 3 { if let Some(best_cl_height) = self.best_cl_height { - len += best_cl_height.consensus_encode(w)?; + len += write_compact_size(w, best_cl_height)?; } else { return Err(Error::new(ErrorKind::InvalidInput, "best_cl_height is not set")); } @@ -94,12 +96,7 @@ impl Decodable for CoinbasePayload { let merkle_root_masternode_list = MerkleRootMasternodeList::consensus_decode(r)?; let merkle_root_quorums = MerkleRootQuorums::consensus_decode(r)?; let best_cl_height = if version >= 3 { - let value = u8::consensus_decode(r)?; - match value { - 253 => Some(u16::consensus_decode(r)? as u32), - 254 => Some(u32::consensus_decode(r)?), - _ => Some(value as u32) - } + Some(read_compact_size(r)?) } else { None }; let best_cl_signature = if version >= 3 { Some(BLSSignature::consensus_decode(r)?) } else { None }; @@ -126,7 +123,7 @@ mod tests { #[test] fn size() { - let test_cases: &[(usize, u16)] = &[(70, 2), (179, 3)]; + let test_cases: &[(usize, u16)] = &[(70, 2), (177, 3)]; for (want, version) in test_cases.iter() { let payload = CoinbasePayload { height: 1000, diff --git a/dash/src/blockdata/transaction/special_transaction/quorum_commitment.rs b/dash/src/blockdata/transaction/special_transaction/quorum_commitment.rs index 16450ec843..23dbdc8256 100644 --- a/dash/src/blockdata/transaction/special_transaction/quorum_commitment.rs +++ b/dash/src/blockdata/transaction/special_transaction/quorum_commitment.rs @@ -23,6 +23,7 @@ use crate::consensus::{Decodable, Encodable, encode}; use crate::hash_types::{QuorumHash, QuorumVVecHash}; use crate::prelude::*; use crate::{VarInt, io}; +use crate::consensus::encode::{compact_size_len, fixed_bitset_len, read_compact_size, read_fixed_bitset, write_compact_size, write_fixed_bitset}; /// A Quorum Finalization Commitment. It is described in the finalization section of DIP6: /// [dip-0006.md#6-finalization-phase](https://github.com/dashpay/dips/blob/master/dip-0006.md#6-finalization-phase) @@ -35,8 +36,8 @@ pub struct QuorumFinalizationCommitment { pub llmq_type: u8, pub quorum_hash: QuorumHash, pub quorum_index: Option, - pub signers: Vec, - pub valid_members: Vec, + pub signers: Vec, + pub valid_members: Vec, pub quorum_public_key: BLSPublicKey, pub quorum_vvec_hash: QuorumVVecHash, pub quorum_sig: BLSSignature, @@ -47,8 +48,10 @@ impl QuorumFinalizationCommitment { /// The size of the payload in bytes. pub fn size(&self) -> usize { let mut size = 2 + 1 + 32 + 48 + 32 + 96 + 96; - size += VarInt(self.signers.len() as u64).len() + self.signers.len(); - size += VarInt(self.valid_members.len() as u64).len() + self.valid_members.len(); + size += compact_size_len(self.signers.len() as u32); + size += fixed_bitset_len(self.signers.as_slice(), self.signers.len()); + size += compact_size_len(self.valid_members.len() as u32); + size += fixed_bitset_len(self.valid_members.as_slice(), self.valid_members.len()); if self.version == 2 || self.version == 4 { size += 2; } @@ -67,8 +70,10 @@ impl Encodable for QuorumFinalizationCommitment { len += q_index.consensus_encode(w)?; } } - len += self.signers.consensus_encode(w)?; - len += self.valid_members.consensus_encode(w)?; + len += write_compact_size(w, self.signers.len() as u32)?; + len += write_fixed_bitset(w, self.signers.as_slice(), self.signers.iter().len())?; + len += write_compact_size(w, self.valid_members.len() as u32)?; + len += write_fixed_bitset(w, self.valid_members.as_slice(), self.valid_members.iter().len())?; len += self.quorum_public_key.consensus_encode(w)?; len += self.quorum_vvec_hash.consensus_encode(w)?; len += self.quorum_sig.consensus_encode(w)?; @@ -96,8 +101,8 @@ impl Decodable for QuorumFinalizationCommitment { llmq_type, quorum_hash, quorum_index, - signers: signers.iter().map(|&b| b as u8).collect(), - valid_members: valid_members.iter().map(|&b| b as u8).collect(), + signers, + valid_members, quorum_public_key, quorum_vvec_hash, quorum_sig, @@ -144,54 +149,6 @@ impl Decodable for QuorumCommitmentPayload { } } -fn read_compact_size(r: &mut R) -> io::Result { - let mut marker = [0u8; 1]; - r.read_exact(&mut marker)?; - match marker[0] { - 0xFD => { - // Read the next 2 bytes as a little-endian u16 - let mut buf = [0u8; 2]; - r.read_exact(&mut buf)?; - Ok(u16::from_le_bytes(buf) as u64) - } - 0xFE => { - // Read the next 4 bytes as a little-endian u32 - let mut buf = [0u8; 4]; - r.read_exact(&mut buf)?; - Ok(u32::from_le_bytes(buf) as u64) - } - 0xFF => { - // Read the next 8 bytes as a little-endian u64 - let mut buf = [0u8; 8]; - r.read_exact(&mut buf)?; - Ok(u64::from_le_bytes(buf)) - } - value => { - // For values less than 253, the value is stored directly in the marker byte - Ok(value as u64) - } - } -} - -fn read_fixed_bitset(r: &mut R, size: usize) -> std::io::Result> { - // Calculate the number of bytes needed - let num_bytes = (size + 7) / 8; - let mut bytes = vec![0u8; num_bytes]; - - // Read bytes from the reader - r.read_exact(&mut bytes)?; - - // Unpack bits into a vector of bools - let mut bits = Vec::with_capacity(size); - for p in 0..size { - let byte = bytes[p / 8]; - let bit = (byte >> (p % 8)) & 1; - bits.push(bit != 0); - } - - Ok(bits) -} - #[cfg(test)] mod tests { use hashes::Hash; @@ -199,31 +156,53 @@ mod tests { use crate::bls_sig_utils::{BLSPublicKey, BLSSignature}; use crate::consensus::Encodable; use crate::hash_types::{QuorumHash, QuorumVVecHash}; - use crate::transaction::special_transaction::quorum_commitment::{ - QuorumCommitmentPayload, QuorumFinalizationCommitment, - }; + use crate::transaction::special_transaction::quorum_commitment::{QuorumCommitmentPayload, QuorumFinalizationCommitment}; #[test] fn size() { - let want = 325; - let payload = QuorumCommitmentPayload { - version: 0, - height: 0, - finalization_commitment: QuorumFinalizationCommitment { + { + let want = 317; + let payload = QuorumCommitmentPayload { + version: 0, + height: 0, + finalization_commitment: QuorumFinalizationCommitment { + version: 1, + llmq_type: 0, + quorum_hash: QuorumHash::all_zeros(), + quorum_index: None, + signers: vec![true, false, true, true, false], + valid_members: vec![false, true, false, true], + quorum_public_key: BLSPublicKey::from([0; 48]), + quorum_vvec_hash: QuorumVVecHash::all_zeros(), + quorum_sig: BLSSignature::from([0; 96]), + sig: BLSSignature::from([0; 96]), + }, + }; + let actual = payload.consensus_encode(&mut Vec::new()).unwrap(); + assert_eq!(payload.size(), want); + assert_eq!(actual, want); + } + { + let want = 319; + let payload = QuorumCommitmentPayload { version: 0, - llmq_type: 0, - quorum_hash: QuorumHash::all_zeros(), - quorum_index: None, - signers: vec![1, 2, 3, 4, 5], - valid_members: vec![6, 7, 8, 9, 0], - quorum_public_key: BLSPublicKey::from([0; 48]), - quorum_vvec_hash: QuorumVVecHash::all_zeros(), - quorum_sig: BLSSignature::from([0; 96]), - sig: BLSSignature::from([0; 96]), - }, - }; - let actual = payload.consensus_encode(&mut Vec::new()).unwrap(); - assert_eq!(payload.size(), want); - assert_eq!(actual, want); + height: 0, + finalization_commitment: QuorumFinalizationCommitment { + version: 2, + llmq_type: 0, + quorum_hash: QuorumHash::all_zeros(), + quorum_index: Some(1), + signers: vec![true, false, true, true, false, true, false], + valid_members: vec![false, true, false, true, false, true], + quorum_public_key: BLSPublicKey::from([0; 48]), + quorum_vvec_hash: QuorumVVecHash::all_zeros(), + quorum_sig: BLSSignature::from([0; 96]), + sig: BLSSignature::from([0; 96]), + }, + }; + let actual = payload.consensus_encode(&mut Vec::new()).unwrap(); + assert_eq!(payload.size(), want); + assert_eq!(actual, want); + } } } diff --git a/dash/src/consensus/encode.rs b/dash/src/consensus/encode.rs index 268e9bc3dd..8b3cf7f9bc 100644 --- a/dash/src/consensus/encode.rs +++ b/dash/src/consensus/encode.rs @@ -32,7 +32,7 @@ use core::convert::From; use core::{fmt, mem, u32}; - +use std::io::Write; #[cfg(feature = "core-block-hash-use-x11")] use hashes::hash_x11; use hashes::{Hash, hash160, sha256, sha256d}; @@ -905,6 +905,149 @@ impl Decodable for TapLeafHash { } } +pub fn read_compact_size(r: &mut R) -> io::Result { + let mut marker = [0u8; 1]; + r.read_exact(&mut marker)?; + match marker[0] { + 0xFD => { + let mut buf = [0u8; 2]; + r.read_exact(&mut buf)?; + let value = u16::from_le_bytes(buf) as u32; + if value < 0xFD { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Non-minimal compact size encoding", + )); + } + Ok(value) + } + 0xFE => { + let mut buf = [0u8; 4]; + r.read_exact(&mut buf)?; + let value = u32::from_le_bytes(buf); + if value <= 0xFFFF { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "Non-minimal compact size encoding", + )); + } + Ok(value) + } + 0xFF => { + // Value is too large to fit in u32 + Err(io::Error::new( + io::ErrorKind::InvalidData, + "CompactSize value exceeds u32::MAX", + )) + } + value => Ok(value as u32), + } +} + +pub fn write_compact_size(w: &mut W, value: u32) -> io::Result { + let bytes_written = if value < 253 { + // For values less than 253, write the value as a single byte. + w.write_all(&[value as u8])?; + 1 // 1 byte written + } else if value <= 0xFFFF { + // For values from 253 to 65535, write 0xFD followed by the value as a little-endian u16. + w.write_all(&[0xFDu8])?; + w.write_all(&(value as u16).to_le_bytes())?; + 3 // 1 byte marker + 2 bytes for u16 + } else { + // For values from 65536 to 0xFFFFFFFF, write 0xFE followed by the value as a little-endian u32. + w.write_all(&[0xFEu8])?; + w.write_all(&value.to_le_bytes())?; + 5 // 1 byte marker + 4 bytes for u32 + }; + Ok(bytes_written) +} + +pub fn compact_size_len(value: u32) -> usize { + let mut size: usize = 0; + if value < 253 { + size += 1; + } + else if value < 65536 { + size += 3; + } + else { + size += 5; + } + size +} + +pub fn read_fixed_bitset(r: &mut R, size: usize) -> std::io::Result> { + // Define a reasonable maximum size to prevent excessive memory allocation + const MAX_BITSET_SIZE: usize = 1_000_000; + if size > MAX_BITSET_SIZE { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Bitset size exceeds maximum allowed value", + )); + } + // Calculate the number of bytes needed + let num_bytes = (size + 7) / 8; + let mut bytes = vec![0u8; num_bytes]; + + // Read bytes from the reader + r.read_exact(&mut bytes)?; + + // Unpack bits into a vector of bools + let mut bits = Vec::with_capacity(size); + for p in 0..size { + let byte = bytes[p / 8]; + let bit = (byte >> (p % 8)) & 1; + bits.push(bit != 0); + } + + Ok(bits) +} + +pub fn write_fixed_bitset(w: &mut W, bits: &[bool], size: usize) -> io::Result { + if bits.len() < size { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Bits length is less than the specified size", + )); + } + // Define a reasonable maximum size to prevent excessive memory allocation + const MAX_BITSET_SIZE: usize = 1_000_000; + if size > MAX_BITSET_SIZE { + return Err(io::Error::new( + io::ErrorKind::InvalidInput, + "Bitset size exceeds maximum allowed value", + )); + } + // Calculate the number of bytes needed to represent 'size' bits + let num_bytes = (size + 7) / 8; + let mut bytes = vec![0u8; num_bytes]; + + // Determine the minimum size to handle cases where bits.len() < size + let ms = std::cmp::min(size, bits.len()); + + // Pack the bits into the byte buffer + for p in 0..ms { + if bits[p] { + bytes[p / 8] |= 1 << (p % 8); + } + } + + // Write the bytes to the writer + w.write_all(&bytes)?; + + // Return the number of bytes written + Ok(bytes.len()) +} + +pub fn fixed_bitset_len(bits: &[bool], size: usize) -> usize { + // Calculate the minimum size between `size` and `bits.len()` + let ms = std::cmp::min(size, bits.len()); + + // Calculate the number of bytes needed to represent `ms` bits + (ms + 7) / 8 +} + #[cfg(test)] mod tests { use core::fmt; @@ -1306,4 +1449,113 @@ mod tests { ); } } + + #[test] + fn test_compact_size_round_trip() { + let test_values = vec![ + 0u32, + 1, + 252, + 253, + 254, + 255, + 300, + 5000, + 65535, + 65536, + 70000, + 1_000_000, + u32::MAX, + ]; + + for &value in &test_values { + let mut buffer = Vec::new(); + // Write the value to the buffer + let bytes_written = write_compact_size(&mut buffer, value).expect("Failed to write"); + // Read the value back from the buffer + let mut cursor = Cursor::new(&buffer); + let read_value = read_compact_size(&mut cursor).expect("Failed to read"); + + // Assert that the original value matches the deserialized value + assert_eq!( + value, read_value, + "Deserialized value does not match original for value {}", + value + ); + + // Ensure that we've consumed all bytes (no extra bytes left) + let position = cursor.position(); + assert_eq!( + position as usize, + buffer.len(), + "Not all bytes were consumed for value {}", + value + ); + } + } + + #[test] + fn test_fixed_bitset_round_trip() { + let test_cases = vec![ + (vec![], 0, true), // (bits, size, expect_success) + (vec![true, false, true, false, true, false, true, false], 8, true), + (vec![true; 10], 10, true), + (vec![false; 15], 15, true), + (vec![true, false, true], 16, false), // size greater than bits.len() + ( + vec![ + true, false, true, false, true, false, true, false, true, false, true, false, + true, false, true, false, true, false, true, false, true, false, true, false, + ], + 24, + true, + ), + ]; + + for (bits, size, expect_success) in test_cases { + let mut buffer = Vec::new(); + // Attempt to write the bitset to the buffer + let result = write_fixed_bitset(&mut buffer, &bits, size); + + if expect_success { + // Expect the write to succeed + let bytes_written = result.expect("Failed to write"); + // Calculate expected bytes written + let expected_bytes = (size + 7) / 8; + assert_eq!( + bytes_written, expected_bytes, + "Incorrect number of bytes written for bitset with size {}", + size + ); + + // Read the bitset back from the buffer + let mut cursor = Cursor::new(&buffer); + let read_bits = read_fixed_bitset(&mut cursor, size).expect("Failed to read"); + + // Assert that the original bits match the deserialized bits + assert_eq!( + read_bits, bits, + "Deserialized bits do not match original for size {}", + size + ); + + // Ensure that we've consumed all bytes (no extra bytes left) + let position = cursor.position(); + assert_eq!( + position as usize, + buffer.len(), + "Not all bytes were consumed for size {}", + size + ); + } else { + // Expect the write to fail + assert!( + result.is_err(), + "Expected write to fail for bits.len() < size (size: {}, bits.len(): {})", + size, + bits.len() + ); + } + } + } }