From 9845b64bc898f23130d77fd4fbf7014c630c5ff8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?S=C3=A9bastien=20Bernery?= Date: Thu, 6 Jun 2024 17:03:15 +0200 Subject: [PATCH] [zip] Bugfix on zipfiles not having data descriptor after filedata. (Zip files generated with 7zip tool on windows was corrupted when read with Haxe) --- std/haxe/zip/Reader.hx | 103 +++++++++++++++++++++-------------------- 1 file changed, 53 insertions(+), 50 deletions(-) diff --git a/std/haxe/zip/Reader.hx b/std/haxe/zip/Reader.hx index cdb6115a432..6b1abc91637 100644 --- a/std/haxe/zip/Reader.hx +++ b/std/haxe/zip/Reader.hx @@ -127,55 +127,59 @@ class Reader { if (e == null) break; // do we have a data descriptor? (see readEntryHeader) - if (e.crc32 == null) { - if (e.compressed) { - #if neko - // enter progressive mode : we use a different input which has - // a temporary buffer, this is necessary since we have to uncompress - // progressively, and after that we might have pending read data - // that needs to be processed - var bufSize = 65536; - if (buf == null) { - buf = new haxe.io.BufferInput(i, haxe.io.Bytes.alloc(bufSize)); - tmp = haxe.io.Bytes.alloc(bufSize); - i = buf; - } - var out = new haxe.io.BytesBuffer(); - var z = new neko.zip.Uncompress(-15); - z.setFlushMode(neko.zip.Flush.SYNC); - while (true) { - if (buf.available == 0) - buf.refill(); - var p = bufSize - buf.available; - if (p != buf.pos) { - // because of lack of "srcLen" in zip api, we need to always be stuck to the buffer end - buf.buf.blit(p, buf.buf, buf.pos, buf.available); - buf.pos = p; - } - var r = z.execute(buf.buf, buf.pos, tmp, 0); - out.addBytes(tmp, 0, r.write); - buf.pos += r.read; - buf.available -= r.read; - if (r.done) - break; - } - e.data = out.getBytes(); - #else - var bufSize = 65536; - if (tmp == null) - tmp = haxe.io.Bytes.alloc(bufSize); - var out = new haxe.io.BytesBuffer(); - var z = new InflateImpl(i, false, false); - while (true) { - var n = z.readBytes(tmp, 0, bufSize); - out.addBytes(tmp, 0, n); - if (n < bufSize) - break; + if (e.compressed) { + #if neko + // enter progressive mode : we use a different input which has + // a temporary buffer, this is necessary since we have to uncompress + // progressively, and after that we might have pending read data + // that needs to be processed + var bufSize = 65536; + if (buf == null) { + buf = new haxe.io.BufferInput(i, haxe.io.Bytes.alloc(bufSize)); + tmp = haxe.io.Bytes.alloc(bufSize); + i = buf; + } + var out = new haxe.io.BytesBuffer(); + var z = new neko.zip.Uncompress(-15); + z.setFlushMode(neko.zip.Flush.SYNC); + while (true) { + if (buf.available == 0) + buf.refill(); + var p = bufSize - buf.available; + if (p != buf.pos) { + // because of lack of "srcLen" in zip api, we need to always be stuck to the buffer end + buf.buf.blit(p, buf.buf, buf.pos, buf.available); + buf.pos = p; } - e.data = out.getBytes(); - #end - } else - e.data = i.read(e.dataSize); + var r = z.execute(buf.buf, buf.pos, tmp, 0); + out.addBytes(tmp, 0, r.write); + buf.pos += r.read; + buf.available -= r.read; + if (r.done) + break; + } + e.data = out.getBytes(); + e.compressed = false; + #else + var bufSize = 65536; + if (tmp == null) + tmp = haxe.io.Bytes.alloc(bufSize); + var out = new haxe.io.BytesBuffer(); + var z = new InflateImpl(i, false, false); + while (true) { + var n = z.readBytes(tmp, 0, bufSize); + out.addBytes(tmp, 0, n); + if (n < bufSize) + break; + } + e.data = out.getBytes(); + #end + } else + e.data = i.read(e.dataSize); + + // If CRC32 is not defined in the header, + // it's defined in a data descriptor after the compressed data. + if (e.crc32 == null) { e.crc32 = i.readInt32(); if (e.crc32 == 0x08074b50) e.crc32 = i.readInt32(); @@ -184,8 +188,7 @@ class Reader { // set data to uncompressed e.dataSize = e.fileSize; e.compressed = false; - } else - e.data = i.read(e.dataSize); + } l.add(e); } return l;