1
2
3
4
5 package zip
6
7 import (
8 "bufio"
9 "encoding/binary"
10 "errors"
11 "hash"
12 "hash/crc32"
13 "io"
14 "strings"
15 "unicode/utf8"
16 )
17
18 var (
19 errLongName = errors.New("zip: FileHeader.Name too long")
20 errLongExtra = errors.New("zip: FileHeader.Extra too long")
21 )
22
23
24 type Writer struct {
25 cw *countWriter
26 dir []*header
27 last *fileWriter
28 closed bool
29 compressors map[uint16]Compressor
30 comment string
31
32
33
34 testHookCloseSizeOffset func(size, offset uint64)
35 }
36
37 type header struct {
38 *FileHeader
39 offset uint64
40 raw bool
41 }
42
43
44 func NewWriter(w io.Writer) *Writer {
45 return &Writer{cw: &countWriter{w: bufio.NewWriter(w)}}
46 }
47
48
49
50
51
52 func (w *Writer) SetOffset(n int64) {
53 if w.cw.count != 0 {
54 panic("zip: SetOffset called after data was written")
55 }
56 w.cw.count = n
57 }
58
59
60
61 func (w *Writer) Flush() error {
62 return w.cw.w.(*bufio.Writer).Flush()
63 }
64
65
66
67 func (w *Writer) SetComment(comment string) error {
68 if len(comment) > uint16max {
69 return errors.New("zip: Writer.Comment too long")
70 }
71 w.comment = comment
72 return nil
73 }
74
75
76
77 func (w *Writer) Close() error {
78 if w.last != nil && !w.last.closed {
79 if err := w.last.close(); err != nil {
80 return err
81 }
82 w.last = nil
83 }
84 if w.closed {
85 return errors.New("zip: writer closed twice")
86 }
87 w.closed = true
88
89
90 start := w.cw.count
91 for _, h := range w.dir {
92 var buf [directoryHeaderLen]byte
93 b := writeBuf(buf[:])
94 b.uint32(uint32(directoryHeaderSignature))
95 b.uint16(h.CreatorVersion)
96 b.uint16(h.ReaderVersion)
97 b.uint16(h.Flags)
98 b.uint16(h.Method)
99 b.uint16(h.ModifiedTime)
100 b.uint16(h.ModifiedDate)
101 b.uint32(h.CRC32)
102 if h.isZip64() || h.offset >= uint32max {
103
104
105
106 b.uint32(uint32max)
107 b.uint32(uint32max)
108
109
110 var buf [28]byte
111 eb := writeBuf(buf[:])
112 eb.uint16(zip64ExtraID)
113 eb.uint16(24)
114 eb.uint64(h.UncompressedSize64)
115 eb.uint64(h.CompressedSize64)
116 eb.uint64(h.offset)
117 h.Extra = append(h.Extra, buf[:]...)
118 } else {
119 b.uint32(h.CompressedSize)
120 b.uint32(h.UncompressedSize)
121 }
122
123 b.uint16(uint16(len(h.Name)))
124 b.uint16(uint16(len(h.Extra)))
125 b.uint16(uint16(len(h.Comment)))
126 b = b[4:]
127 b.uint32(h.ExternalAttrs)
128 if h.isZip64() || h.offset > uint32max {
129 b.uint32(uint32max)
130 } else {
131 b.uint32(uint32(h.offset))
132 }
133 if _, err := w.cw.Write(buf[:]); err != nil {
134 return err
135 }
136 if _, err := io.WriteString(w.cw, h.Name); err != nil {
137 return err
138 }
139 if _, err := w.cw.Write(h.Extra); err != nil {
140 return err
141 }
142 if _, err := io.WriteString(w.cw, h.Comment); err != nil {
143 return err
144 }
145 }
146 end := w.cw.count
147
148 records := uint64(len(w.dir))
149 size := uint64(end - start)
150 offset := uint64(start)
151
152 if f := w.testHookCloseSizeOffset; f != nil {
153 f(size, offset)
154 }
155
156 if records >= uint16max || size >= uint32max || offset >= uint32max {
157 var buf [directory64EndLen + directory64LocLen]byte
158 b := writeBuf(buf[:])
159
160
161 b.uint32(directory64EndSignature)
162 b.uint64(directory64EndLen - 12)
163 b.uint16(zipVersion45)
164 b.uint16(zipVersion45)
165 b.uint32(0)
166 b.uint32(0)
167 b.uint64(records)
168 b.uint64(records)
169 b.uint64(size)
170 b.uint64(offset)
171
172
173 b.uint32(directory64LocSignature)
174 b.uint32(0)
175 b.uint64(uint64(end))
176 b.uint32(1)
177
178 if _, err := w.cw.Write(buf[:]); err != nil {
179 return err
180 }
181
182
183
184 records = uint16max
185 size = uint32max
186 offset = uint32max
187 }
188
189
190 var buf [directoryEndLen]byte
191 b := writeBuf(buf[:])
192 b.uint32(uint32(directoryEndSignature))
193 b = b[4:]
194 b.uint16(uint16(records))
195 b.uint16(uint16(records))
196 b.uint32(uint32(size))
197 b.uint32(uint32(offset))
198 b.uint16(uint16(len(w.comment)))
199 if _, err := w.cw.Write(buf[:]); err != nil {
200 return err
201 }
202 if _, err := io.WriteString(w.cw, w.comment); err != nil {
203 return err
204 }
205
206 return w.cw.w.(*bufio.Writer).Flush()
207 }
208
209
210
211
212
213
214
215
216
217
218 func (w *Writer) Create(name string) (io.Writer, error) {
219 header := &FileHeader{
220 Name: name,
221 Method: Deflate,
222 }
223 return w.CreateHeader(header)
224 }
225
226
227
228
229 func detectUTF8(s string) (valid, require bool) {
230 for i := 0; i < len(s); {
231 r, size := utf8.DecodeRuneInString(s[i:])
232 i += size
233
234
235
236
237
238
239 if r < 0x20 || r > 0x7d || r == 0x5c {
240 if !utf8.ValidRune(r) || (r == utf8.RuneError && size == 1) {
241 return false, false
242 }
243 require = true
244 }
245 }
246 return true, require
247 }
248
249
250
251 func (w *Writer) prepare(fh *FileHeader) error {
252 if w.last != nil && !w.last.closed {
253 if err := w.last.close(); err != nil {
254 return err
255 }
256 }
257 if len(w.dir) > 0 && w.dir[len(w.dir)-1].FileHeader == fh {
258
259 return errors.New("archive/zip: invalid duplicate FileHeader")
260 }
261 return nil
262 }
263
264
265
266
267
268
269
270
271 func (w *Writer) CreateHeader(fh *FileHeader) (io.Writer, error) {
272 if err := w.prepare(fh); err != nil {
273 return nil, err
274 }
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292 utf8Valid1, utf8Require1 := detectUTF8(fh.Name)
293 utf8Valid2, utf8Require2 := detectUTF8(fh.Comment)
294 switch {
295 case fh.NonUTF8:
296 fh.Flags &^= 0x800
297 case (utf8Require1 || utf8Require2) && (utf8Valid1 && utf8Valid2):
298 fh.Flags |= 0x800
299 }
300
301 fh.CreatorVersion = fh.CreatorVersion&0xff00 | zipVersion20
302 fh.ReaderVersion = zipVersion20
303
304
305 if !fh.Modified.IsZero() {
306
307
308
309
310
311
312
313
314 fh.ModifiedDate, fh.ModifiedTime = timeToMsDosTime(fh.Modified)
315
316
317
318
319
320
321
322 var mbuf [9]byte
323 mt := uint32(fh.Modified.Unix())
324 eb := writeBuf(mbuf[:])
325 eb.uint16(extTimeExtraID)
326 eb.uint16(5)
327 eb.uint8(1)
328 eb.uint32(mt)
329 fh.Extra = append(fh.Extra, mbuf[:]...)
330 }
331
332 var (
333 ow io.Writer
334 fw *fileWriter
335 )
336 h := &header{
337 FileHeader: fh,
338 offset: uint64(w.cw.count),
339 }
340
341 if strings.HasSuffix(fh.Name, "/") {
342
343
344
345
346 fh.Method = Store
347 fh.Flags &^= 0x8
348
349
350 fh.CompressedSize = 0
351 fh.CompressedSize64 = 0
352 fh.UncompressedSize = 0
353 fh.UncompressedSize64 = 0
354
355 ow = dirWriter{}
356 } else {
357 fh.Flags |= 0x8
358
359 fw = &fileWriter{
360 zipw: w.cw,
361 compCount: &countWriter{w: w.cw},
362 crc32: crc32.NewIEEE(),
363 }
364 comp := w.compressor(fh.Method)
365 if comp == nil {
366 return nil, ErrAlgorithm
367 }
368 var err error
369 fw.comp, err = comp(fw.compCount)
370 if err != nil {
371 return nil, err
372 }
373 fw.rawCount = &countWriter{w: fw.comp}
374 fw.header = h
375 ow = fw
376 }
377 w.dir = append(w.dir, h)
378 if err := writeHeader(w.cw, h); err != nil {
379 return nil, err
380 }
381
382 w.last = fw
383 return ow, nil
384 }
385
386 func writeHeader(w io.Writer, h *header) error {
387 const maxUint16 = 1<<16 - 1
388 if len(h.Name) > maxUint16 {
389 return errLongName
390 }
391 if len(h.Extra) > maxUint16 {
392 return errLongExtra
393 }
394
395 var buf [fileHeaderLen]byte
396 b := writeBuf(buf[:])
397 b.uint32(uint32(fileHeaderSignature))
398 b.uint16(h.ReaderVersion)
399 b.uint16(h.Flags)
400 b.uint16(h.Method)
401 b.uint16(h.ModifiedTime)
402 b.uint16(h.ModifiedDate)
403
404
405
406 if h.raw && !h.hasDataDescriptor() {
407 b.uint32(h.CRC32)
408 b.uint32(uint32(min64(h.CompressedSize64, uint32max)))
409 b.uint32(uint32(min64(h.UncompressedSize64, uint32max)))
410 } else {
411
412
413 b.uint32(0)
414 b.uint32(0)
415 b.uint32(0)
416 }
417 b.uint16(uint16(len(h.Name)))
418 b.uint16(uint16(len(h.Extra)))
419 if _, err := w.Write(buf[:]); err != nil {
420 return err
421 }
422 if _, err := io.WriteString(w, h.Name); err != nil {
423 return err
424 }
425 _, err := w.Write(h.Extra)
426 return err
427 }
428
429 func min64(x, y uint64) uint64 {
430 if x < y {
431 return x
432 }
433 return y
434 }
435
436
437
438 func (w *Writer) CreateHeaderRaw(fh *FileHeader) (io.Writer, error) {
439 return w.CreateRaw(fh)
440 }
441
442
443
444
445
446
447
448 func (w *Writer) CreateRaw(fh *FileHeader) (io.Writer, error) {
449 if err := w.prepare(fh); err != nil {
450 return nil, err
451 }
452
453 fh.CompressedSize = uint32(min64(fh.CompressedSize64, uint32max))
454 fh.UncompressedSize = uint32(min64(fh.UncompressedSize64, uint32max))
455
456 h := &header{
457 FileHeader: fh,
458 offset: uint64(w.cw.count),
459 raw: true,
460 }
461 w.dir = append(w.dir, h)
462 if err := writeHeader(w.cw, h); err != nil {
463 return nil, err
464 }
465
466 if strings.HasSuffix(fh.Name, "/") {
467 w.last = nil
468 return dirWriter{}, nil
469 }
470
471 fw := &fileWriter{
472 header: h,
473 zipw: w.cw,
474 }
475 w.last = fw
476 return fw, nil
477 }
478
479
480
481
482 func (w *Writer) Copy(f *File) error {
483 r, err := f.OpenRaw()
484 if err != nil {
485 return err
486 }
487 fw, err := w.CreateRaw(&f.FileHeader)
488 if err != nil {
489 return err
490 }
491 _, err = io.Copy(fw, r)
492 return err
493 }
494
495
496
497
498 func (w *Writer) RegisterCompressor(method uint16, comp Compressor) {
499 if w.compressors == nil {
500 w.compressors = make(map[uint16]Compressor)
501 }
502 w.compressors[method] = comp
503 }
504
505 func (w *Writer) compressor(method uint16) Compressor {
506 comp := w.compressors[method]
507 if comp == nil {
508 comp = compressor(method)
509 }
510 return comp
511 }
512
513 type dirWriter struct{}
514
515 func (dirWriter) Write(b []byte) (int, error) {
516 if len(b) == 0 {
517 return 0, nil
518 }
519 return 0, errors.New("zip: write to directory")
520 }
521
522 type fileWriter struct {
523 *header
524 zipw io.Writer
525 rawCount *countWriter
526 comp io.WriteCloser
527 compCount *countWriter
528 crc32 hash.Hash32
529 closed bool
530 }
531
532 func (w *fileWriter) Write(p []byte) (int, error) {
533 if w.closed {
534 return 0, errors.New("zip: write to closed file")
535 }
536 if w.raw {
537 return w.zipw.Write(p)
538 }
539 w.crc32.Write(p)
540 return w.rawCount.Write(p)
541 }
542
543 func (w *fileWriter) close() error {
544 if w.closed {
545 return errors.New("zip: file closed twice")
546 }
547 w.closed = true
548 if w.raw {
549 return w.writeDataDescriptor()
550 }
551 if err := w.comp.Close(); err != nil {
552 return err
553 }
554
555
556 fh := w.header.FileHeader
557 fh.CRC32 = w.crc32.Sum32()
558 fh.CompressedSize64 = uint64(w.compCount.count)
559 fh.UncompressedSize64 = uint64(w.rawCount.count)
560
561 if fh.isZip64() {
562 fh.CompressedSize = uint32max
563 fh.UncompressedSize = uint32max
564 fh.ReaderVersion = zipVersion45
565 } else {
566 fh.CompressedSize = uint32(fh.CompressedSize64)
567 fh.UncompressedSize = uint32(fh.UncompressedSize64)
568 }
569
570 return w.writeDataDescriptor()
571 }
572
573 func (w *fileWriter) writeDataDescriptor() error {
574 if !w.hasDataDescriptor() {
575 return nil
576 }
577
578
579
580
581
582 var buf []byte
583 if w.isZip64() {
584 buf = make([]byte, dataDescriptor64Len)
585 } else {
586 buf = make([]byte, dataDescriptorLen)
587 }
588 b := writeBuf(buf)
589 b.uint32(dataDescriptorSignature)
590 b.uint32(w.CRC32)
591 if w.isZip64() {
592 b.uint64(w.CompressedSize64)
593 b.uint64(w.UncompressedSize64)
594 } else {
595 b.uint32(w.CompressedSize)
596 b.uint32(w.UncompressedSize)
597 }
598 _, err := w.zipw.Write(buf)
599 return err
600 }
601
602 type countWriter struct {
603 w io.Writer
604 count int64
605 }
606
607 func (w *countWriter) Write(p []byte) (int, error) {
608 n, err := w.w.Write(p)
609 w.count += int64(n)
610 return n, err
611 }
612
613 type nopCloser struct {
614 io.Writer
615 }
616
617 func (w nopCloser) Close() error {
618 return nil
619 }
620
621 type writeBuf []byte
622
623 func (b *writeBuf) uint8(v uint8) {
624 (*b)[0] = v
625 *b = (*b)[1:]
626 }
627
628 func (b *writeBuf) uint16(v uint16) {
629 binary.LittleEndian.PutUint16(*b, v)
630 *b = (*b)[2:]
631 }
632
633 func (b *writeBuf) uint32(v uint32) {
634 binary.LittleEndian.PutUint32(*b, v)
635 *b = (*b)[4:]
636 }
637
638 func (b *writeBuf) uint64(v uint64) {
639 binary.LittleEndian.PutUint64(*b, v)
640 *b = (*b)[8:]
641 }
642
View as plain text