-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathreader.go
463 lines (414 loc) · 12.2 KB
/
reader.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
package zipstream
import (
"archive/zip"
"bytes"
"compress/flate"
"encoding/binary"
"errors"
"fmt"
"hash"
"hash/crc32"
"io"
"sync"
"time"
)
const (
headerIdentifierLen = 4
fileHeaderLen = 26
dataDescriptorLen = 16 // four uint32: descriptor signature, crc32, compressed size, size
fileHeaderSignature = 0x04034b50
directoryHeaderSignature = 0x02014b50
directoryEndSignature = 0x06054b50
dataDescriptorSignature = 0x08074b50
// Extra header IDs.
// See http://mdfs.net/Docs/Comp/Archiving/Zip/ExtraField
Zip64ExtraID = 0x0001 // Zip64 extended information
NtfsExtraID = 0x000a // NTFS
UnixExtraID = 0x000d // UNIX
ExtTimeExtraID = 0x5455 // Extended timestamp
InfoZipUnixExtraID = 0x5855 // Info-ZIP Unix extension
)
const (
CompressMethodStored = 0
CompressMethodDeflated = 8
)
type Entry struct {
zip.FileHeader
r io.Reader
lr io.Reader // LimitReader
zip64 bool
hasReadNum uint64
hasDataDescriptorSignature bool
eof bool
}
func (e *Entry) hasDataDescriptor() bool {
return e.Flags&8 != 0
}
// IsDir just simply check whether the entry name ends with "/"
func (e *Entry) IsDir() bool {
return len(e.Name) > 0 && e.Name[len(e.Name)-1] == '/'
}
func (e *Entry) Open() (io.ReadCloser, error) {
if e.eof {
return nil, errors.New("this file has read to end")
}
decomp := decompressor(e.Method)
if decomp == nil {
return nil, zip.ErrAlgorithm
}
rc := decomp(e.lr)
return &checksumReader{
rc: rc,
hash: crc32.NewIEEE(),
entry: e,
}, nil
}
type Reader struct {
r io.Reader
localFileEnd bool
curEntry *Entry
}
func NewReader(r io.Reader) *Reader {
return &Reader{
r: r,
}
}
func (z *Reader) readEntry() (*Entry, error) {
buf := make([]byte, fileHeaderLen)
if _, err := io.ReadFull(z.r, buf); err != nil {
return nil, fmt.Errorf("unable to read local file header: %w", err)
}
lr := readBuf(buf)
readerVersion := lr.uint16()
flags := lr.uint16()
method := lr.uint16()
modifiedTime := lr.uint16()
modifiedDate := lr.uint16()
crc32Sum := lr.uint32()
compressedSize := lr.uint32()
uncompressedSize := lr.uint32()
filenameLen := int(lr.uint16())
extraAreaLen := int(lr.uint16())
entry := &Entry{
FileHeader: zip.FileHeader{
ReaderVersion: readerVersion,
Flags: flags,
Method: method,
ModifiedTime: modifiedTime,
ModifiedDate: modifiedDate,
CRC32: crc32Sum,
CompressedSize: compressedSize,
UncompressedSize: uncompressedSize,
CompressedSize64: uint64(compressedSize),
UncompressedSize64: uint64(uncompressedSize),
},
r: z.r,
hasReadNum: 0,
eof: false,
}
nameAndExtraBuf := make([]byte, filenameLen+extraAreaLen)
if _, err := io.ReadFull(z.r, nameAndExtraBuf); err != nil {
return nil, fmt.Errorf("unable to read entry name and extra area: %w", err)
}
entry.Name = string(nameAndExtraBuf[:filenameLen])
entry.Extra = nameAndExtraBuf[filenameLen:]
entry.NonUTF8 = flags&0x800 == 0
if flags&1 == 1 {
return nil, fmt.Errorf("encrypted ZIP entry not supported")
}
if flags&8 == 8 && method != CompressMethodDeflated {
return nil, fmt.Errorf("only DEFLATED entries can have data descriptor")
}
needCSize := entry.CompressedSize == ^uint32(0)
needUSize := entry.UncompressedSize == ^uint32(0)
ler := readBuf(entry.Extra)
var modified time.Time
parseExtras:
for len(ler) >= 4 { // need at least tag and size
fieldTag := ler.uint16()
fieldSize := int(ler.uint16())
if len(ler) < fieldSize {
break
}
fieldBuf := ler.sub(fieldSize)
switch fieldTag {
case Zip64ExtraID:
entry.zip64 = true
// update directory values from the zip64 extra block.
// They should only be consulted if the sizes read earlier
// are maxed out.
// See golang.org/issue/13367.
if needUSize {
needUSize = false
if len(fieldBuf) < 8 {
return nil, zip.ErrFormat
}
entry.UncompressedSize64 = fieldBuf.uint64()
}
if needCSize {
needCSize = false
if len(fieldBuf) < 8 {
return nil, zip.ErrFormat
}
entry.CompressedSize64 = fieldBuf.uint64()
}
case NtfsExtraID:
if len(fieldBuf) < 4 {
continue parseExtras
}
fieldBuf.uint32() // reserved (ignored)
for len(fieldBuf) >= 4 { // need at least tag and size
attrTag := fieldBuf.uint16()
attrSize := int(fieldBuf.uint16())
if len(fieldBuf) < attrSize {
continue parseExtras
}
attrBuf := fieldBuf.sub(attrSize)
if attrTag != 1 || attrSize != 24 {
continue // Ignore irrelevant attributes
}
const ticksPerSecond = 1e7 // Windows timestamp resolution
ts := int64(attrBuf.uint64()) // ModTime since Windows epoch
secs := ts / ticksPerSecond
nsecs := (1e9 / ticksPerSecond) * int64(ts%ticksPerSecond)
epoch := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
modified = time.Unix(epoch.Unix()+secs, nsecs)
}
case UnixExtraID, InfoZipUnixExtraID:
if len(fieldBuf) < 8 {
continue parseExtras
}
fieldBuf.uint32() // AcTime (ignored)
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
case ExtTimeExtraID:
if len(fieldBuf) < 5 || fieldBuf.uint8()&1 == 0 {
continue parseExtras
}
ts := int64(fieldBuf.uint32()) // ModTime since Unix epoch
modified = time.Unix(ts, 0)
}
}
msDosModified := MSDosTimeToTime(entry.ModifiedDate, entry.ModifiedTime)
entry.Modified = msDosModified
if !modified.IsZero() {
entry.Modified = modified.UTC()
// If legacy MS-DOS timestamps are set, we can use the delta between
// the legacy and extended versions to estimate timezone offset.
//
// A non-UTC timezone is always used (even if offset is zero).
// Thus, FileHeader.Modified.Location() == time.UTC is useful for
// determining whether extended timestamps are present.
// This is necessary for users that need to do additional time
// calculations when dealing with legacy ZIP formats.
if entry.ModifiedTime != 0 || entry.ModifiedDate != 0 {
entry.Modified = modified.In(timeZone(msDosModified.Sub(modified)))
}
}
if needCSize {
return nil, zip.ErrFormat
}
entry.lr = io.LimitReader(z.r, int64(entry.CompressedSize64))
return entry, nil
}
func (z *Reader) GetNextEntry() (*Entry, error) {
if z.localFileEnd {
return nil, io.EOF
}
if z.curEntry != nil && !z.curEntry.eof {
if z.curEntry.hasReadNum <= z.curEntry.UncompressedSize64 {
if _, err := io.Copy(io.Discard, z.curEntry.lr); err != nil {
return nil, fmt.Errorf("read previous file data fail: %w", err)
}
if z.curEntry.hasDataDescriptor() {
if err := readDataDescriptor(z.r, z.curEntry); err != nil {
return nil, fmt.Errorf("read previous entry's data descriptor fail: %w", err)
}
}
} else {
if !z.curEntry.hasDataDescriptor() {
return nil, errors.New("parse error, read position exceed entry")
}
readDataLen := z.curEntry.hasReadNum - z.curEntry.UncompressedSize64
if readDataLen > dataDescriptorLen {
return nil, errors.New("parse error, read position exceed entry")
} else if readDataLen > dataDescriptorLen-4 {
if z.curEntry.hasDataDescriptorSignature {
if _, err := io.Copy(io.Discard, io.LimitReader(z.r, int64(dataDescriptorLen-readDataLen))); err != nil {
return nil, fmt.Errorf("read previous entry's data descriptor fail: %w", err)
}
} else {
return nil, errors.New("parse error, read position exceed entry")
}
} else {
buf := make([]byte, dataDescriptorLen-readDataLen)
if _, err := io.ReadFull(z.r, buf); err != nil {
return nil, fmt.Errorf("read previous entry's data descriptor fail: %w", err)
}
buf = buf[len(buf)-4:]
headerID := binary.LittleEndian.Uint32(buf)
// read to next record head
if headerID == fileHeaderSignature ||
headerID == directoryHeaderSignature ||
headerID == directoryEndSignature {
z.r = io.MultiReader(bytes.NewReader(buf), z.r)
}
}
}
z.curEntry.eof = true
}
headerIDBuf := make([]byte, headerIdentifierLen)
if _, err := io.ReadFull(z.r, headerIDBuf); err != nil {
return nil, fmt.Errorf("unable to read header identifier: %w", err)
}
headerID := binary.LittleEndian.Uint32(headerIDBuf)
if headerID != fileHeaderSignature {
if headerID == directoryHeaderSignature || headerID == directoryEndSignature {
z.localFileEnd = true
return nil, io.EOF
}
return nil, zip.ErrFormat
}
entry, err := z.readEntry()
if err != nil {
return nil, fmt.Errorf("unable to read zip file header: %w", err)
}
z.curEntry = entry
return entry, nil
}
var (
decompressors sync.Map // map[uint16]Decompressor
)
func init() {
decompressors.Store(zip.Store, zip.Decompressor(io.NopCloser))
decompressors.Store(zip.Deflate, zip.Decompressor(newFlateReader))
}
func decompressor(method uint16) zip.Decompressor {
di, ok := decompressors.Load(method)
if !ok {
return nil
}
return di.(zip.Decompressor)
}
var flateReaderPool sync.Pool
func newFlateReader(r io.Reader) io.ReadCloser {
fr, ok := flateReaderPool.Get().(io.ReadCloser)
if ok {
fr.(flate.Resetter).Reset(r, nil)
} else {
fr = flate.NewReader(r)
}
return &pooledFlateReader{fr: fr}
}
type pooledFlateReader struct {
mu sync.Mutex // guards Close and Read
fr io.ReadCloser
}
func (r *pooledFlateReader) Read(p []byte) (n int, err error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.fr == nil {
return 0, errors.New("Read after Close")
}
return r.fr.Read(p)
}
func (r *pooledFlateReader) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
var err error
if r.fr != nil {
err = r.fr.Close()
flateReaderPool.Put(r.fr)
r.fr = nil
}
return err
}
func readDataDescriptor(r io.Reader, entry *Entry) error {
var buf [dataDescriptorLen]byte
// The spec says: "Although not originally assigned a
// signature, the value 0x08074b50 has commonly been adopted
// as a signature value for the data descriptor record.
// Implementers should be aware that ZIP files may be
// encountered with or without this signature marking data
// descriptors and should account for either case when reading
// ZIP files to ensure compatibility."
//
// dataDescriptorLen includes the size of the signature but
// first read just those 4 bytes to see if it exists.
n, err := io.ReadFull(r, buf[:4])
entry.hasReadNum += uint64(n)
if err != nil {
return err
}
off := 0
maybeSig := readBuf(buf[:4])
if maybeSig.uint32() != dataDescriptorSignature {
// No data descriptor signature. Keep these four
// bytes.
off += 4
} else {
entry.hasDataDescriptorSignature = true
}
n, err = io.ReadFull(r, buf[off:12])
entry.hasReadNum += uint64(n)
if err != nil {
return err
}
entry.eof = true
b := readBuf(buf[:12])
if b.uint32() != entry.CRC32 {
return zip.ErrChecksum
}
// The two sizes that follow here can be either 32 bits or 64 bits
// but the spec is not very clear on this and different
// interpretations has been made causing incompatibilities. We
// already have the sizes from the central directory so we can
// just ignore these.
return nil
}
type checksumReader struct {
rc io.ReadCloser
hash hash.Hash32
nread uint64 // number of bytes read so far
entry *Entry
err error // sticky error
}
func (r *checksumReader) Read(b []byte) (n int, err error) {
if r.err != nil {
return 0, r.err
}
n, err = r.rc.Read(b)
r.hash.Write(b[:n])
r.nread += uint64(n)
r.entry.hasReadNum += uint64(n)
if err == nil {
return
}
if err == io.EOF {
if r.nread != r.entry.UncompressedSize64 {
return 0, io.ErrUnexpectedEOF
}
if r.entry.hasDataDescriptor() {
if err1 := readDataDescriptor(r.entry.r, r.entry); err1 != nil {
if err1 == io.EOF {
err = io.ErrUnexpectedEOF
} else {
err = err1
}
} else if r.hash.Sum32() != r.entry.CRC32 {
err = zip.ErrChecksum
}
} else {
// If there's not a data descriptor, we still compare
// the CRC32 of what we've read against the file header
// or TOC's CRC32, if it seems like it was set.
r.entry.eof = true
if r.entry.CRC32 != 0 && r.hash.Sum32() != r.entry.CRC32 {
err = zip.ErrChecksum
}
}
}
r.err = err
return
}
func (r *checksumReader) Close() error { return r.rc.Close() }