Skip to content

Commit

Permalink
contrib: support reading XORed blocks in linearize-data.py script
Browse files Browse the repository at this point in the history
Partly fixes issue bitcoin#30599.
  • Loading branch information
Sebastian Falbesoner committed Aug 7, 2024
1 parent 676abd1 commit 6e93928
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 9 deletions.
27 changes: 22 additions & 5 deletions contrib/linearize/linearize-data.py
Original file line number Diff line number Diff line change
Expand Up @@ -76,6 +76,15 @@ def getFirstBlockFileId(block_dir_path):
blkId = int(firstBlkFn[3:8])
return blkId

def read_xor_key(blocks_path):
NUM_XOR_BYTES = 8 # From InitBlocksdirXorKey::xor_key.size()
try:
with open(os.path.join(blocks_path, "xor.dat"), "rb") as xor_f:
return xor_f.read(NUM_XOR_BYTES)
# support also blockdirs created with pre-v28 versions, where no xor key exists yet
except FileNotFoundError:
return bytes([0] * NUM_XOR_BYTES)

# Block header and extent on disk
BlockExtent = namedtuple('BlockExtent', ['fn', 'offset', 'inhdr', 'blkhdr', 'size'])

Expand All @@ -95,6 +104,7 @@ def __init__(self, settings, blkindex, blkmap):
self.outFname = None
self.blkCountIn = 0
self.blkCountOut = 0
self.xor_key = read_xor_key(self.settings['input'])

self.lastDate = datetime.datetime(2000, 1, 1)
self.highTS = 1408893517 - 315360000
Expand All @@ -113,6 +123,13 @@ def __init__(self, settings, blkindex, blkmap):
self.outOfOrderData = {}
self.outOfOrderSize = 0 # running total size for items in outOfOrderData

def read_xored(self, f, size):
offset = f.tell()
data = bytearray(f.read(size))
for i in range(len(data)):
data[i] ^= self.xor_key[(i + offset) % len(self.xor_key)]
return bytes(data)

def writeBlock(self, inhdr, blk_hdr, rawblock):
blockSizeOnDisk = len(inhdr) + len(blk_hdr) + len(rawblock)
if not self.fileOutput and ((self.outsz + blockSizeOnDisk) > self.maxOutSz):
Expand Down Expand Up @@ -165,7 +182,7 @@ def fetchBlock(self, extent):
'''Fetch block contents from disk given extents'''
with open(self.inFileName(extent.fn), "rb") as f:
f.seek(extent.offset)
return f.read(extent.size)
return self.read_xored(f, extent.size)

def copyOneBlock(self):
'''Find the next block to be written in the input, and copy it to the output.'''
Expand All @@ -190,7 +207,7 @@ def run(self):
print("Premature end of block data")
return

inhdr = self.inF.read(8)
inhdr = self.read_xored(self.inF, 8)
if (not inhdr or (inhdr[0] == "\0")):
self.inF.close()
self.inF = None
Expand All @@ -207,7 +224,7 @@ def run(self):
inLenLE = inhdr[4:]
su = struct.unpack("<I", inLenLE)
inLen = su[0] - 80 # length without header
blk_hdr = self.inF.read(80)
blk_hdr = self.read_xored(self.inF, 80)
inExtent = BlockExtent(self.inFn, self.inF.tell(), inhdr, blk_hdr, inLen)

self.hash_str = calc_hash_str(blk_hdr)
Expand All @@ -224,7 +241,7 @@ def run(self):

if self.blkCountOut == blkHeight:
# If in-order block, just copy
rawblock = self.inF.read(inLen)
rawblock = self.read_xored(self.inF, inLen)
self.writeBlock(inhdr, blk_hdr, rawblock)

# See if we can catch up to prior out-of-order blocks
Expand All @@ -237,7 +254,7 @@ def run(self):
# If there is space in the cache, read the data
# Reading the data in file sequence instead of seeking and fetching it later is preferred,
# but we don't want to fill up memory
self.outOfOrderData[blkHeight] = self.inF.read(inLen)
self.outOfOrderData[blkHeight] = self.read_xored(self.inF, inLen)
self.outOfOrderSize += inLen
else: # If no space in cache, seek forward
self.inF.seek(inLen, os.SEEK_CUR)
Expand Down
4 changes: 0 additions & 4 deletions test/functional/feature_loadblock.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,10 +26,6 @@ def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 2
self.supports_cli = False
self.extra_args = [
["-blocksxor=0"], # TODO: The linearize scripts should be adjusted to apply any XOR
[],
]

def run_test(self):
self.nodes[1].setnetworkactive(state=False)
Expand Down

0 comments on commit 6e93928

Please sign in to comment.