...

Source file src/oras.land/oras-go/pkg/content/decompress.go

Documentation: oras.land/oras-go/pkg/content

     1  /*
     2  Copyright The ORAS Authors.
     3  Licensed under the Apache License, Version 2.0 (the "License");
     4  you may not use this file except in compliance with the License.
     5  You may obtain a copy of the License at
     6  
     7  http://www.apache.org/licenses/LICENSE-2.0
     8  
     9  Unless required by applicable law or agreed to in writing, software
    10  distributed under the License is distributed on an "AS IS" BASIS,
    11  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    12  See the License for the specific language governing permissions and
    13  limitations under the License.
    14  */
    15  
    16  package content
    17  
    18  import (
    19  	"context"
    20  	"errors"
    21  	"strings"
    22  
    23  	ctrcontent "github.com/containerd/containerd/content"
    24  	"github.com/containerd/containerd/remotes"
    25  	ocispec "github.com/opencontainers/image-spec/specs-go/v1"
    26  )
    27  
    28  // Decompress store to decompress content and extract from tar, if needed, wrapping
    29  // another store. By default, a FileStore will simply take each artifact and write it to
    30  // a file, as a MemoryStore will do into memory. If the artifact is gzipped or tarred,
    31  // you might want to store the actual object inside tar or gzip. Wrap your Store
    32  // with Decompress, and it will check the media-type and, if relevant,
    33  // gunzip and/or untar.
    34  //
    35  // For example:
    36  //
    37  //        fileStore := NewFileStore(rootPath)
    38  //        Decompress := store.NewDecompress(fileStore, WithBlocksize(blocksize))
    39  //
    40  // The above example works if there is no tar, i.e. each artifact is just a single file, perhaps gzipped,
    41  // or if there is only one file in each tar archive. In other words, when each content.Writer has only one target output stream.
    42  // However, if you have multiple files in each tar archive, each archive of which is an artifact layer, then
    43  // you need a way to select how to handle each file in the tar archive. In other words, when each content.Writer has more than one
    44  // target output stream. In that case, use the following example:
    45  //
    46  //        multiStore := NewMultiStore(rootPath) // some store that can handle different filenames
    47  //        Decompress := store.NewDecompress(multiStore, WithBlocksize(blocksize), WithMultiWriterIngester())
    48  //
    49  type Decompress struct {
    50  	pusher              remotes.Pusher
    51  	blocksize           int
    52  	multiWriterIngester bool
    53  }
    54  
    55  func NewDecompress(pusher remotes.Pusher, opts ...WriterOpt) Decompress {
    56  	// we have to reprocess the opts to find the blocksize
    57  	var wOpts WriterOpts
    58  	for _, opt := range opts {
    59  		if err := opt(&wOpts); err != nil {
    60  			// TODO: we probably should handle errors here
    61  			continue
    62  		}
    63  	}
    64  
    65  	return Decompress{pusher, wOpts.Blocksize, wOpts.MultiWriterIngester}
    66  }
    67  
    68  // Push get a content.Writer
    69  func (d Decompress) Push(ctx context.Context, desc ocispec.Descriptor) (ctrcontent.Writer, error) {
    70  	// the logic is straightforward:
    71  	// - if there is a desc in the opts, and the mediatype is tar or tar+gzip, then pass the correct decompress writer
    72  	// - else, pass the regular writer
    73  	var (
    74  		writer        ctrcontent.Writer
    75  		err           error
    76  		multiIngester MultiWriterPusher
    77  		ok            bool
    78  	)
    79  
    80  	// check to see if we are supposed to use a MultiWriterIngester
    81  	if d.multiWriterIngester {
    82  		multiIngester, ok = d.pusher.(MultiWriterPusher)
    83  		if !ok {
    84  			return nil, errors.New("configured to use multiwriter ingester, but ingester does not implement multiwriter")
    85  		}
    86  	}
    87  
    88  	// figure out if compression and/or archive exists
    89  	// before we pass it down, we need to strip anything we are removing here
    90  	// and possibly update the digest, since the store indexes things by digest
    91  	hasGzip, hasTar, modifiedMediaType := checkCompression(desc.MediaType)
    92  	desc.MediaType = modifiedMediaType
    93  	// determine if we pass it blocksize, only if positive
    94  	writerOpts := []WriterOpt{}
    95  	if d.blocksize > 0 {
    96  		writerOpts = append(writerOpts, WithBlocksize(d.blocksize))
    97  	}
    98  
    99  	writer, err = d.pusher.Push(ctx, desc)
   100  	if err != nil {
   101  		return nil, err
   102  	}
   103  
   104  	// do we need to wrap with an untar writer?
   105  	if hasTar {
   106  		// if not multiingester, get a regular writer
   107  		if multiIngester == nil {
   108  			writer = NewUntarWriter(writer, writerOpts...)
   109  		} else {
   110  			writers, err := multiIngester.Pushers(ctx, desc)
   111  			if err != nil {
   112  				return nil, err
   113  			}
   114  			writer = NewUntarWriterByName(writers, writerOpts...)
   115  		}
   116  	}
   117  	if hasGzip {
   118  		if writer == nil {
   119  			writer, err = d.pusher.Push(ctx, desc)
   120  			if err != nil {
   121  				return nil, err
   122  			}
   123  		}
   124  		writer = NewGunzipWriter(writer, writerOpts...)
   125  	}
   126  	return writer, nil
   127  }
   128  
   129  // checkCompression check if the mediatype uses gzip compression or tar.
   130  // Returns if it has gzip and/or tar, as well as the base media type without
   131  // those suffixes.
   132  func checkCompression(mediaType string) (gzip, tar bool, mt string) {
   133  	mt = mediaType
   134  	gzipSuffix := "+gzip"
   135  	gzipAltSuffix := ".gzip"
   136  	tarSuffix := ".tar"
   137  	switch {
   138  	case strings.HasSuffix(mt, gzipSuffix):
   139  		mt = mt[:len(mt)-len(gzipSuffix)]
   140  		gzip = true
   141  	case strings.HasSuffix(mt, gzipAltSuffix):
   142  		mt = mt[:len(mt)-len(gzipAltSuffix)]
   143  		gzip = true
   144  	}
   145  
   146  	if strings.HasSuffix(mt, tarSuffix) {
   147  		mt = mt[:len(mt)-len(tarSuffix)]
   148  		tar = true
   149  	}
   150  	return
   151  }
   152  

View as plain text