Source file src/archive/tar/reader.go

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package tar
     6  
     7  import (
     8  	"bytes"
     9  	"io"
    10  	"path/filepath"
    11  	"strconv"
    12  	"strings"
    13  	"time"
    14  )
    15  
    16  // Reader provides sequential access to the contents of a tar archive.
    17  // Reader.Next advances to the next file in the archive (including the first),
    18  // and then Reader can be treated as an io.Reader to access the file's data.
    19  type Reader struct {
    20  	r    io.Reader
    21  	pad  int64      // Amount of padding (ignored) after current file entry
    22  	curr fileReader // Reader for current file entry
    23  	blk  block      // Buffer to use as temporary local storage
    24  
    25  	// err is a persistent error.
    26  	// It is only the responsibility of every exported method of Reader to
    27  	// ensure that this error is sticky.
    28  	err error
    29  }
    30  
    31  type fileReader interface {
    32  	io.Reader
    33  	fileState
    34  
    35  	WriteTo(io.Writer) (int64, error)
    36  }
    37  
    38  // NewReader creates a new Reader reading from r.
    39  func NewReader(r io.Reader) *Reader {
    40  	return &Reader{r: r, curr: &regFileReader{r, 0}}
    41  }
    42  
    43  // Next advances to the next entry in the tar archive.
    44  // The Header.Size determines how many bytes can be read for the next file.
    45  // Any remaining data in the current file is automatically discarded.
    46  // At the end of the archive, Next returns the error io.EOF.
    47  //
    48  // If Next encounters a non-local name (as defined by [filepath.IsLocal])
    49  // and the GODEBUG environment variable contains `tarinsecurepath=0`,
    50  // Next returns the header with an ErrInsecurePath error.
    51  // A future version of Go may introduce this behavior by default.
    52  // Programs that want to accept non-local names can ignore
    53  // the ErrInsecurePath error and use the returned header.
    54  func (tr *Reader) Next() (*Header, error) {
    55  	if tr.err != nil {
    56  		return nil, tr.err
    57  	}
    58  	hdr, err := tr.next()
    59  	tr.err = err
    60  	if err == nil && tarinsecurepath.Value() == "0" && !filepath.IsLocal(hdr.Name) {
    61  		err = ErrInsecurePath
    62  	}
    63  	return hdr, err
    64  }
    65  
    66  func (tr *Reader) next() (*Header, error) {
    67  	var paxHdrs map[string]string
    68  	var gnuLongName, gnuLongLink string
    69  
    70  	// Externally, Next iterates through the tar archive as if it is a series of
    71  	// files. Internally, the tar format often uses fake "files" to add meta
    72  	// data that describes the next file. These meta data "files" should not
    73  	// normally be visible to the outside. As such, this loop iterates through
    74  	// one or more "header files" until it finds a "normal file".
    75  	format := FormatUSTAR | FormatPAX | FormatGNU
    76  	for {
    77  		// Discard the remainder of the file and any padding.
    78  		if err := discard(tr.r, tr.curr.physicalRemaining()); err != nil {
    79  			return nil, err
    80  		}
    81  		if _, err := tryReadFull(tr.r, tr.blk[:tr.pad]); err != nil {
    82  			return nil, err
    83  		}
    84  		tr.pad = 0
    85  
    86  		hdr, rawHdr, err := tr.readHeader()
    87  		if err != nil {
    88  			return nil, err
    89  		}
    90  		if err := tr.handleRegularFile(hdr); err != nil {
    91  			return nil, err
    92  		}
    93  		format.mayOnlyBe(hdr.Format)
    94  
    95  		// Check for PAX/GNU special headers and files.
    96  		switch hdr.Typeflag {
    97  		case TypeXHeader, TypeXGlobalHeader:
    98  			format.mayOnlyBe(FormatPAX)
    99  			paxHdrs, err = parsePAX(tr)
   100  			if err != nil {
   101  				return nil, err
   102  			}
   103  			if hdr.Typeflag == TypeXGlobalHeader {
   104  				mergePAX(hdr, paxHdrs)
   105  				return &Header{
   106  					Name:       hdr.Name,
   107  					Typeflag:   hdr.Typeflag,
   108  					Xattrs:     hdr.Xattrs,
   109  					PAXRecords: hdr.PAXRecords,
   110  					Format:     format,
   111  				}, nil
   112  			}
   113  			continue // This is a meta header affecting the next header
   114  		case TypeGNULongName, TypeGNULongLink:
   115  			format.mayOnlyBe(FormatGNU)
   116  			realname, err := readSpecialFile(tr)
   117  			if err != nil {
   118  				return nil, err
   119  			}
   120  
   121  			var p parser
   122  			switch hdr.Typeflag {
   123  			case TypeGNULongName:
   124  				gnuLongName = p.parseString(realname)
   125  			case TypeGNULongLink:
   126  				gnuLongLink = p.parseString(realname)
   127  			}
   128  			continue // This is a meta header affecting the next header
   129  		default:
   130  			// The old GNU sparse format is handled here since it is technically
   131  			// just a regular file with additional attributes.
   132  
   133  			if err := mergePAX(hdr, paxHdrs); err != nil {
   134  				return nil, err
   135  			}
   136  			if gnuLongName != "" {
   137  				hdr.Name = gnuLongName
   138  			}
   139  			if gnuLongLink != "" {
   140  				hdr.Linkname = gnuLongLink
   141  			}
   142  			if hdr.Typeflag == TypeRegA {
   143  				if strings.HasSuffix(hdr.Name, "/") {
   144  					hdr.Typeflag = TypeDir // Legacy archives use trailing slash for directories
   145  				} else {
   146  					hdr.Typeflag = TypeReg
   147  				}
   148  			}
   149  
   150  			// The extended headers may have updated the size.
   151  			// Thus, setup the regFileReader again after merging PAX headers.
   152  			if err := tr.handleRegularFile(hdr); err != nil {
   153  				return nil, err
   154  			}
   155  
   156  			// Sparse formats rely on being able to read from the logical data
   157  			// section; there must be a preceding call to handleRegularFile.
   158  			if err := tr.handleSparseFile(hdr, rawHdr); err != nil {
   159  				return nil, err
   160  			}
   161  
   162  			// Set the final guess at the format.
   163  			if format.has(FormatUSTAR) && format.has(FormatPAX) {
   164  				format.mayOnlyBe(FormatUSTAR)
   165  			}
   166  			hdr.Format = format
   167  			return hdr, nil // This is a file, so stop
   168  		}
   169  	}
   170  }
   171  
   172  // handleRegularFile sets up the current file reader and padding such that it
   173  // can only read the following logical data section. It will properly handle
   174  // special headers that contain no data section.
   175  func (tr *Reader) handleRegularFile(hdr *Header) error {
   176  	nb := hdr.Size
   177  	if isHeaderOnlyType(hdr.Typeflag) {
   178  		nb = 0
   179  	}
   180  	if nb < 0 {
   181  		return ErrHeader
   182  	}
   183  
   184  	tr.pad = blockPadding(nb)
   185  	tr.curr = &regFileReader{r: tr.r, nb: nb}
   186  	return nil
   187  }
   188  
   189  // handleSparseFile checks if the current file is a sparse format of any type
   190  // and sets the curr reader appropriately.
   191  func (tr *Reader) handleSparseFile(hdr *Header, rawHdr *block) error {
   192  	var spd sparseDatas
   193  	var err error
   194  	if hdr.Typeflag == TypeGNUSparse {
   195  		spd, err = tr.readOldGNUSparseMap(hdr, rawHdr)
   196  	} else {
   197  		spd, err = tr.readGNUSparsePAXHeaders(hdr)
   198  	}
   199  
   200  	// If sp is non-nil, then this is a sparse file.
   201  	// Note that it is possible for len(sp) == 0.
   202  	if err == nil && spd != nil {
   203  		if isHeaderOnlyType(hdr.Typeflag) || !validateSparseEntries(spd, hdr.Size) {
   204  			return ErrHeader
   205  		}
   206  		sph := invertSparseEntries(spd, hdr.Size)
   207  		tr.curr = &sparseFileReader{tr.curr, sph, 0}
   208  	}
   209  	return err
   210  }
   211  
   212  // readGNUSparsePAXHeaders checks the PAX headers for GNU sparse headers.
   213  // If they are found, then this function reads the sparse map and returns it.
   214  // This assumes that 0.0 headers have already been converted to 0.1 headers
   215  // by the PAX header parsing logic.
   216  func (tr *Reader) readGNUSparsePAXHeaders(hdr *Header) (sparseDatas, error) {
   217  	// Identify the version of GNU headers.
   218  	var is1x0 bool
   219  	major, minor := hdr.PAXRecords[paxGNUSparseMajor], hdr.PAXRecords[paxGNUSparseMinor]
   220  	switch {
   221  	case major == "0" && (minor == "0" || minor == "1"):
   222  		is1x0 = false
   223  	case major == "1" && minor == "0":
   224  		is1x0 = true
   225  	case major != "" || minor != "":
   226  		return nil, nil // Unknown GNU sparse PAX version
   227  	case hdr.PAXRecords[paxGNUSparseMap] != "":
   228  		is1x0 = false // 0.0 and 0.1 did not have explicit version records, so guess
   229  	default:
   230  		return nil, nil // Not a PAX format GNU sparse file.
   231  	}
   232  	hdr.Format.mayOnlyBe(FormatPAX)
   233  
   234  	// Update hdr from GNU sparse PAX headers.
   235  	if name := hdr.PAXRecords[paxGNUSparseName]; name != "" {
   236  		hdr.Name = name
   237  	}
   238  	size := hdr.PAXRecords[paxGNUSparseSize]
   239  	if size == "" {
   240  		size = hdr.PAXRecords[paxGNUSparseRealSize]
   241  	}
   242  	if size != "" {
   243  		n, err := strconv.ParseInt(size, 10, 64)
   244  		if err != nil {
   245  			return nil, ErrHeader
   246  		}
   247  		hdr.Size = n
   248  	}
   249  
   250  	// Read the sparse map according to the appropriate format.
   251  	if is1x0 {
   252  		return readGNUSparseMap1x0(tr.curr)
   253  	}
   254  	return readGNUSparseMap0x1(hdr.PAXRecords)
   255  }
   256  
   257  // mergePAX merges paxHdrs into hdr for all relevant fields of Header.
   258  func mergePAX(hdr *Header, paxHdrs map[string]string) (err error) {
   259  	for k, v := range paxHdrs {
   260  		if v == "" {
   261  			continue // Keep the original USTAR value
   262  		}
   263  		var id64 int64
   264  		switch k {
   265  		case paxPath:
   266  			hdr.Name = v
   267  		case paxLinkpath:
   268  			hdr.Linkname = v
   269  		case paxUname:
   270  			hdr.Uname = v
   271  		case paxGname:
   272  			hdr.Gname = v
   273  		case paxUid:
   274  			id64, err = strconv.ParseInt(v, 10, 64)
   275  			hdr.Uid = int(id64) // Integer overflow possible
   276  		case paxGid:
   277  			id64, err = strconv.ParseInt(v, 10, 64)
   278  			hdr.Gid = int(id64) // Integer overflow possible
   279  		case paxAtime:
   280  			hdr.AccessTime, err = parsePAXTime(v)
   281  		case paxMtime:
   282  			hdr.ModTime, err = parsePAXTime(v)
   283  		case paxCtime:
   284  			hdr.ChangeTime, err = parsePAXTime(v)
   285  		case paxSize:
   286  			hdr.Size, err = strconv.ParseInt(v, 10, 64)
   287  		default:
   288  			if strings.HasPrefix(k, paxSchilyXattr) {
   289  				if hdr.Xattrs == nil {
   290  					hdr.Xattrs = make(map[string]string)
   291  				}
   292  				hdr.Xattrs[k[len(paxSchilyXattr):]] = v
   293  			}
   294  		}
   295  		if err != nil {
   296  			return ErrHeader
   297  		}
   298  	}
   299  	hdr.PAXRecords = paxHdrs
   300  	return nil
   301  }
   302  
   303  // parsePAX parses PAX headers.
   304  // If an extended header (type 'x') is invalid, ErrHeader is returned.
   305  func parsePAX(r io.Reader) (map[string]string, error) {
   306  	buf, err := readSpecialFile(r)
   307  	if err != nil {
   308  		return nil, err
   309  	}
   310  	sbuf := string(buf)
   311  
   312  	// For GNU PAX sparse format 0.0 support.
   313  	// This function transforms the sparse format 0.0 headers into format 0.1
   314  	// headers since 0.0 headers were not PAX compliant.
   315  	var sparseMap []string
   316  
   317  	paxHdrs := make(map[string]string)
   318  	for len(sbuf) > 0 {
   319  		key, value, residual, err := parsePAXRecord(sbuf)
   320  		if err != nil {
   321  			return nil, ErrHeader
   322  		}
   323  		sbuf = residual
   324  
   325  		switch key {
   326  		case paxGNUSparseOffset, paxGNUSparseNumBytes:
   327  			// Validate sparse header order and value.
   328  			if (len(sparseMap)%2 == 0 && key != paxGNUSparseOffset) ||
   329  				(len(sparseMap)%2 == 1 && key != paxGNUSparseNumBytes) ||
   330  				strings.Contains(value, ",") {
   331  				return nil, ErrHeader
   332  			}
   333  			sparseMap = append(sparseMap, value)
   334  		default:
   335  			paxHdrs[key] = value
   336  		}
   337  	}
   338  	if len(sparseMap) > 0 {
   339  		paxHdrs[paxGNUSparseMap] = strings.Join(sparseMap, ",")
   340  	}
   341  	return paxHdrs, nil
   342  }
   343  
   344  // readHeader reads the next block header and assumes that the underlying reader
   345  // is already aligned to a block boundary. It returns the raw block of the
   346  // header in case further processing is required.
   347  //
   348  // The err will be set to io.EOF only when one of the following occurs:
   349  //   - Exactly 0 bytes are read and EOF is hit.
   350  //   - Exactly 1 block of zeros is read and EOF is hit.
   351  //   - At least 2 blocks of zeros are read.
   352  func (tr *Reader) readHeader() (*Header, *block, error) {
   353  	// Two blocks of zero bytes marks the end of the archive.
   354  	if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
   355  		return nil, nil, err // EOF is okay here; exactly 0 bytes read
   356  	}
   357  	if bytes.Equal(tr.blk[:], zeroBlock[:]) {
   358  		if _, err := io.ReadFull(tr.r, tr.blk[:]); err != nil {
   359  			return nil, nil, err // EOF is okay here; exactly 1 block of zeros read
   360  		}
   361  		if bytes.Equal(tr.blk[:], zeroBlock[:]) {
   362  			return nil, nil, io.EOF // normal EOF; exactly 2 block of zeros read
   363  		}
   364  		return nil, nil, ErrHeader // Zero block and then non-zero block
   365  	}
   366  
   367  	// Verify the header matches a known format.
   368  	format := tr.blk.getFormat()
   369  	if format == FormatUnknown {
   370  		return nil, nil, ErrHeader
   371  	}
   372  
   373  	var p parser
   374  	hdr := new(Header)
   375  
   376  	// Unpack the V7 header.
   377  	v7 := tr.blk.toV7()
   378  	hdr.Typeflag = v7.typeFlag()[0]
   379  	hdr.Name = p.parseString(v7.name())
   380  	hdr.Linkname = p.parseString(v7.linkName())
   381  	hdr.Size = p.parseNumeric(v7.size())
   382  	hdr.Mode = p.parseNumeric(v7.mode())
   383  	hdr.Uid = int(p.parseNumeric(v7.uid()))
   384  	hdr.Gid = int(p.parseNumeric(v7.gid()))
   385  	hdr.ModTime = time.Unix(p.parseNumeric(v7.modTime()), 0)
   386  
   387  	// Unpack format specific fields.
   388  	if format > formatV7 {
   389  		ustar := tr.blk.toUSTAR()
   390  		hdr.Uname = p.parseString(ustar.userName())
   391  		hdr.Gname = p.parseString(ustar.groupName())
   392  		hdr.Devmajor = p.parseNumeric(ustar.devMajor())
   393  		hdr.Devminor = p.parseNumeric(ustar.devMinor())
   394  
   395  		var prefix string
   396  		switch {
   397  		case format.has(FormatUSTAR | FormatPAX):
   398  			hdr.Format = format
   399  			ustar := tr.blk.toUSTAR()
   400  			prefix = p.parseString(ustar.prefix())
   401  
   402  			// For Format detection, check if block is properly formatted since
   403  			// the parser is more liberal than what USTAR actually permits.
   404  			notASCII := func(r rune) bool { return r >= 0x80 }
   405  			if bytes.IndexFunc(tr.blk[:], notASCII) >= 0 {
   406  				hdr.Format = FormatUnknown // Non-ASCII characters in block.
   407  			}
   408  			nul := func(b []byte) bool { return int(b[len(b)-1]) == 0 }
   409  			if !(nul(v7.size()) && nul(v7.mode()) && nul(v7.uid()) && nul(v7.gid()) &&
   410  				nul(v7.modTime()) && nul(ustar.devMajor()) && nul(ustar.devMinor())) {
   411  				hdr.Format = FormatUnknown // Numeric fields must end in NUL
   412  			}
   413  		case format.has(formatSTAR):
   414  			star := tr.blk.toSTAR()
   415  			prefix = p.parseString(star.prefix())
   416  			hdr.AccessTime = time.Unix(p.parseNumeric(star.accessTime()), 0)
   417  			hdr.ChangeTime = time.Unix(p.parseNumeric(star.changeTime()), 0)
   418  		case format.has(FormatGNU):
   419  			hdr.Format = format
   420  			var p2 parser
   421  			gnu := tr.blk.toGNU()
   422  			if b := gnu.accessTime(); b[0] != 0 {
   423  				hdr.AccessTime = time.Unix(p2.parseNumeric(b), 0)
   424  			}
   425  			if b := gnu.changeTime(); b[0] != 0 {
   426  				hdr.ChangeTime = time.Unix(p2.parseNumeric(b), 0)
   427  			}
   428  
   429  			// Prior to Go1.8, the Writer had a bug where it would output
   430  			// an invalid tar file in certain rare situations because the logic
   431  			// incorrectly believed that the old GNU format had a prefix field.
   432  			// This is wrong and leads to an output file that mangles the
   433  			// atime and ctime fields, which are often left unused.
   434  			//
   435  			// In order to continue reading tar files created by former, buggy
   436  			// versions of Go, we skeptically parse the atime and ctime fields.
   437  			// If we are unable to parse them and the prefix field looks like
   438  			// an ASCII string, then we fallback on the pre-Go1.8 behavior
   439  			// of treating these fields as the USTAR prefix field.
   440  			//
   441  			// Note that this will not use the fallback logic for all possible
   442  			// files generated by a pre-Go1.8 toolchain. If the generated file
   443  			// happened to have a prefix field that parses as valid
   444  			// atime and ctime fields (e.g., when they are valid octal strings),
   445  			// then it is impossible to distinguish between a valid GNU file
   446  			// and an invalid pre-Go1.8 file.
   447  			//
   448  			// See https://golang.org/issues/12594
   449  			// See https://golang.org/issues/21005
   450  			if p2.err != nil {
   451  				hdr.AccessTime, hdr.ChangeTime = time.Time{}, time.Time{}
   452  				ustar := tr.blk.toUSTAR()
   453  				if s := p.parseString(ustar.prefix()); isASCII(s) {
   454  					prefix = s
   455  				}
   456  				hdr.Format = FormatUnknown // Buggy file is not GNU
   457  			}
   458  		}
   459  		if len(prefix) > 0 {
   460  			hdr.Name = prefix + "/" + hdr.Name
   461  		}
   462  	}
   463  	return hdr, &tr.blk, p.err
   464  }
   465  
   466  // readOldGNUSparseMap reads the sparse map from the old GNU sparse format.
   467  // The sparse map is stored in the tar header if it's small enough.
   468  // If it's larger than four entries, then one or more extension headers are used
   469  // to store the rest of the sparse map.
   470  //
   471  // The Header.Size does not reflect the size of any extended headers used.
   472  // Thus, this function will read from the raw io.Reader to fetch extra headers.
   473  // This method mutates blk in the process.
   474  func (tr *Reader) readOldGNUSparseMap(hdr *Header, blk *block) (sparseDatas, error) {
   475  	// Make sure that the input format is GNU.
   476  	// Unfortunately, the STAR format also has a sparse header format that uses
   477  	// the same type flag but has a completely different layout.
   478  	if blk.getFormat() != FormatGNU {
   479  		return nil, ErrHeader
   480  	}
   481  	hdr.Format.mayOnlyBe(FormatGNU)
   482  
   483  	var p parser
   484  	hdr.Size = p.parseNumeric(blk.toGNU().realSize())
   485  	if p.err != nil {
   486  		return nil, p.err
   487  	}
   488  	s := blk.toGNU().sparse()
   489  	spd := make(sparseDatas, 0, s.maxEntries())
   490  	for {
   491  		for i := 0; i < s.maxEntries(); i++ {
   492  			// This termination condition is identical to GNU and BSD tar.
   493  			if s.entry(i).offset()[0] == 0x00 {
   494  				break // Don't return, need to process extended headers (even if empty)
   495  			}
   496  			offset := p.parseNumeric(s.entry(i).offset())
   497  			length := p.parseNumeric(s.entry(i).length())
   498  			if p.err != nil {
   499  				return nil, p.err
   500  			}
   501  			spd = append(spd, sparseEntry{Offset: offset, Length: length})
   502  		}
   503  
   504  		if s.isExtended()[0] > 0 {
   505  			// There are more entries. Read an extension header and parse its entries.
   506  			if _, err := mustReadFull(tr.r, blk[:]); err != nil {
   507  				return nil, err
   508  			}
   509  			s = blk.toSparse()
   510  			continue
   511  		}
   512  		return spd, nil // Done
   513  	}
   514  }
   515  
   516  // readGNUSparseMap1x0 reads the sparse map as stored in GNU's PAX sparse format
   517  // version 1.0. The format of the sparse map consists of a series of
   518  // newline-terminated numeric fields. The first field is the number of entries
   519  // and is always present. Following this are the entries, consisting of two
   520  // fields (offset, length). This function must stop reading at the end
   521  // boundary of the block containing the last newline.
   522  //
   523  // Note that the GNU manual says that numeric values should be encoded in octal
   524  // format. However, the GNU tar utility itself outputs these values in decimal.
   525  // As such, this library treats values as being encoded in decimal.
   526  func readGNUSparseMap1x0(r io.Reader) (sparseDatas, error) {
   527  	var (
   528  		cntNewline int64
   529  		buf        bytes.Buffer
   530  		blk        block
   531  	)
   532  
   533  	// feedTokens copies data in blocks from r into buf until there are
   534  	// at least cnt newlines in buf. It will not read more blocks than needed.
   535  	feedTokens := func(n int64) error {
   536  		for cntNewline < n {
   537  			if _, err := mustReadFull(r, blk[:]); err != nil {
   538  				return err
   539  			}
   540  			buf.Write(blk[:])
   541  			for _, c := range blk {
   542  				if c == '\n' {
   543  					cntNewline++
   544  				}
   545  			}
   546  		}
   547  		return nil
   548  	}
   549  
   550  	// nextToken gets the next token delimited by a newline. This assumes that
   551  	// at least one newline exists in the buffer.
   552  	nextToken := func() string {
   553  		cntNewline--
   554  		tok, _ := buf.ReadString('\n')
   555  		return strings.TrimRight(tok, "\n")
   556  	}
   557  
   558  	// Parse for the number of entries.
   559  	// Use integer overflow resistant math to check this.
   560  	if err := feedTokens(1); err != nil {
   561  		return nil, err
   562  	}
   563  	numEntries, err := strconv.ParseInt(nextToken(), 10, 0) // Intentionally parse as native int
   564  	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
   565  		return nil, ErrHeader
   566  	}
   567  
   568  	// Parse for all member entries.
   569  	// numEntries is trusted after this since a potential attacker must have
   570  	// committed resources proportional to what this library used.
   571  	if err := feedTokens(2 * numEntries); err != nil {
   572  		return nil, err
   573  	}
   574  	spd := make(sparseDatas, 0, numEntries)
   575  	for i := int64(0); i < numEntries; i++ {
   576  		offset, err1 := strconv.ParseInt(nextToken(), 10, 64)
   577  		length, err2 := strconv.ParseInt(nextToken(), 10, 64)
   578  		if err1 != nil || err2 != nil {
   579  			return nil, ErrHeader
   580  		}
   581  		spd = append(spd, sparseEntry{Offset: offset, Length: length})
   582  	}
   583  	return spd, nil
   584  }
   585  
   586  // readGNUSparseMap0x1 reads the sparse map as stored in GNU's PAX sparse format
   587  // version 0.1. The sparse map is stored in the PAX headers.
   588  func readGNUSparseMap0x1(paxHdrs map[string]string) (sparseDatas, error) {
   589  	// Get number of entries.
   590  	// Use integer overflow resistant math to check this.
   591  	numEntriesStr := paxHdrs[paxGNUSparseNumBlocks]
   592  	numEntries, err := strconv.ParseInt(numEntriesStr, 10, 0) // Intentionally parse as native int
   593  	if err != nil || numEntries < 0 || int(2*numEntries) < int(numEntries) {
   594  		return nil, ErrHeader
   595  	}
   596  
   597  	// There should be two numbers in sparseMap for each entry.
   598  	sparseMap := strings.Split(paxHdrs[paxGNUSparseMap], ",")
   599  	if len(sparseMap) == 1 && sparseMap[0] == "" {
   600  		sparseMap = sparseMap[:0]
   601  	}
   602  	if int64(len(sparseMap)) != 2*numEntries {
   603  		return nil, ErrHeader
   604  	}
   605  
   606  	// Loop through the entries in the sparse map.
   607  	// numEntries is trusted now.
   608  	spd := make(sparseDatas, 0, numEntries)
   609  	for len(sparseMap) >= 2 {
   610  		offset, err1 := strconv.ParseInt(sparseMap[0], 10, 64)
   611  		length, err2 := strconv.ParseInt(sparseMap[1], 10, 64)
   612  		if err1 != nil || err2 != nil {
   613  			return nil, ErrHeader
   614  		}
   615  		spd = append(spd, sparseEntry{Offset: offset, Length: length})
   616  		sparseMap = sparseMap[2:]
   617  	}
   618  	return spd, nil
   619  }
   620  
   621  // Read reads from the current file in the tar archive.
   622  // It returns (0, io.EOF) when it reaches the end of that file,
   623  // until Next is called to advance to the next file.
   624  //
   625  // If the current file is sparse, then the regions marked as a hole
   626  // are read back as NUL-bytes.
   627  //
   628  // Calling Read on special types like TypeLink, TypeSymlink, TypeChar,
   629  // TypeBlock, TypeDir, and TypeFifo returns (0, io.EOF) regardless of what
   630  // the Header.Size claims.
   631  func (tr *Reader) Read(b []byte) (int, error) {
   632  	if tr.err != nil {
   633  		return 0, tr.err
   634  	}
   635  	n, err := tr.curr.Read(b)
   636  	if err != nil && err != io.EOF {
   637  		tr.err = err
   638  	}
   639  	return n, err
   640  }
   641  
   642  // writeTo writes the content of the current file to w.
   643  // The bytes written matches the number of remaining bytes in the current file.
   644  //
   645  // If the current file is sparse and w is an io.WriteSeeker,
   646  // then writeTo uses Seek to skip past holes defined in Header.SparseHoles,
   647  // assuming that skipped regions are filled with NULs.
   648  // This always writes the last byte to ensure w is the right size.
   649  //
   650  // TODO(dsnet): Re-export this when adding sparse file support.
   651  // See https://golang.org/issue/22735
   652  func (tr *Reader) writeTo(w io.Writer) (int64, error) {
   653  	if tr.err != nil {
   654  		return 0, tr.err
   655  	}
   656  	n, err := tr.curr.WriteTo(w)
   657  	if err != nil {
   658  		tr.err = err
   659  	}
   660  	return n, err
   661  }
   662  
   663  // regFileReader is a fileReader for reading data from a regular file entry.
   664  type regFileReader struct {
   665  	r  io.Reader // Underlying Reader
   666  	nb int64     // Number of remaining bytes to read
   667  }
   668  
   669  func (fr *regFileReader) Read(b []byte) (n int, err error) {
   670  	if int64(len(b)) > fr.nb {
   671  		b = b[:fr.nb]
   672  	}
   673  	if len(b) > 0 {
   674  		n, err = fr.r.Read(b)
   675  		fr.nb -= int64(n)
   676  	}
   677  	switch {
   678  	case err == io.EOF && fr.nb > 0:
   679  		return n, io.ErrUnexpectedEOF
   680  	case err == nil && fr.nb == 0:
   681  		return n, io.EOF
   682  	default:
   683  		return n, err
   684  	}
   685  }
   686  
   687  func (fr *regFileReader) WriteTo(w io.Writer) (int64, error) {
   688  	return io.Copy(w, struct{ io.Reader }{fr})
   689  }
   690  
   691  // logicalRemaining implements fileState.logicalRemaining.
   692  func (fr regFileReader) logicalRemaining() int64 {
   693  	return fr.nb
   694  }
   695  
   696  // physicalRemaining implements fileState.physicalRemaining.
   697  func (fr regFileReader) physicalRemaining() int64 {
   698  	return fr.nb
   699  }
   700  
   701  // sparseFileReader is a fileReader for reading data from a sparse file entry.
   702  type sparseFileReader struct {
   703  	fr  fileReader  // Underlying fileReader
   704  	sp  sparseHoles // Normalized list of sparse holes
   705  	pos int64       // Current position in sparse file
   706  }
   707  
   708  func (sr *sparseFileReader) Read(b []byte) (n int, err error) {
   709  	finished := int64(len(b)) >= sr.logicalRemaining()
   710  	if finished {
   711  		b = b[:sr.logicalRemaining()]
   712  	}
   713  
   714  	b0 := b
   715  	endPos := sr.pos + int64(len(b))
   716  	for endPos > sr.pos && err == nil {
   717  		var nf int // Bytes read in fragment
   718  		holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
   719  		if sr.pos < holeStart { // In a data fragment
   720  			bf := b[:min(int64(len(b)), holeStart-sr.pos)]
   721  			nf, err = tryReadFull(sr.fr, bf)
   722  		} else { // In a hole fragment
   723  			bf := b[:min(int64(len(b)), holeEnd-sr.pos)]
   724  			nf, err = tryReadFull(zeroReader{}, bf)
   725  		}
   726  		b = b[nf:]
   727  		sr.pos += int64(nf)
   728  		if sr.pos >= holeEnd && len(sr.sp) > 1 {
   729  			sr.sp = sr.sp[1:] // Ensure last fragment always remains
   730  		}
   731  	}
   732  
   733  	n = len(b0) - len(b)
   734  	switch {
   735  	case err == io.EOF:
   736  		return n, errMissData // Less data in dense file than sparse file
   737  	case err != nil:
   738  		return n, err
   739  	case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
   740  		return n, errUnrefData // More data in dense file than sparse file
   741  	case finished:
   742  		return n, io.EOF
   743  	default:
   744  		return n, nil
   745  	}
   746  }
   747  
   748  func (sr *sparseFileReader) WriteTo(w io.Writer) (n int64, err error) {
   749  	ws, ok := w.(io.WriteSeeker)
   750  	if ok {
   751  		if _, err := ws.Seek(0, io.SeekCurrent); err != nil {
   752  			ok = false // Not all io.Seeker can really seek
   753  		}
   754  	}
   755  	if !ok {
   756  		return io.Copy(w, struct{ io.Reader }{sr})
   757  	}
   758  
   759  	var writeLastByte bool
   760  	pos0 := sr.pos
   761  	for sr.logicalRemaining() > 0 && !writeLastByte && err == nil {
   762  		var nf int64 // Size of fragment
   763  		holeStart, holeEnd := sr.sp[0].Offset, sr.sp[0].endOffset()
   764  		if sr.pos < holeStart { // In a data fragment
   765  			nf = holeStart - sr.pos
   766  			nf, err = io.CopyN(ws, sr.fr, nf)
   767  		} else { // In a hole fragment
   768  			nf = holeEnd - sr.pos
   769  			if sr.physicalRemaining() == 0 {
   770  				writeLastByte = true
   771  				nf--
   772  			}
   773  			_, err = ws.Seek(nf, io.SeekCurrent)
   774  		}
   775  		sr.pos += nf
   776  		if sr.pos >= holeEnd && len(sr.sp) > 1 {
   777  			sr.sp = sr.sp[1:] // Ensure last fragment always remains
   778  		}
   779  	}
   780  
   781  	// If the last fragment is a hole, then seek to 1-byte before EOF, and
   782  	// write a single byte to ensure the file is the right size.
   783  	if writeLastByte && err == nil {
   784  		_, err = ws.Write([]byte{0})
   785  		sr.pos++
   786  	}
   787  
   788  	n = sr.pos - pos0
   789  	switch {
   790  	case err == io.EOF:
   791  		return n, errMissData // Less data in dense file than sparse file
   792  	case err != nil:
   793  		return n, err
   794  	case sr.logicalRemaining() == 0 && sr.physicalRemaining() > 0:
   795  		return n, errUnrefData // More data in dense file than sparse file
   796  	default:
   797  		return n, nil
   798  	}
   799  }
   800  
   801  func (sr sparseFileReader) logicalRemaining() int64 {
   802  	return sr.sp[len(sr.sp)-1].endOffset() - sr.pos
   803  }
   804  func (sr sparseFileReader) physicalRemaining() int64 {
   805  	return sr.fr.physicalRemaining()
   806  }
   807  
   808  type zeroReader struct{}
   809  
   810  func (zeroReader) Read(b []byte) (int, error) {
   811  	for i := range b {
   812  		b[i] = 0
   813  	}
   814  	return len(b), nil
   815  }
   816  
   817  // mustReadFull is like io.ReadFull except it returns
   818  // io.ErrUnexpectedEOF when io.EOF is hit before len(b) bytes are read.
   819  func mustReadFull(r io.Reader, b []byte) (int, error) {
   820  	n, err := tryReadFull(r, b)
   821  	if err == io.EOF {
   822  		err = io.ErrUnexpectedEOF
   823  	}
   824  	return n, err
   825  }
   826  
   827  // tryReadFull is like io.ReadFull except it returns
   828  // io.EOF when it is hit before len(b) bytes are read.
   829  func tryReadFull(r io.Reader, b []byte) (n int, err error) {
   830  	for len(b) > n && err == nil {
   831  		var nn int
   832  		nn, err = r.Read(b[n:])
   833  		n += nn
   834  	}
   835  	if len(b) == n && err == io.EOF {
   836  		err = nil
   837  	}
   838  	return n, err
   839  }
   840  
   841  // readSpecialFile is like io.ReadAll except it returns
   842  // ErrFieldTooLong if more than maxSpecialFileSize is read.
   843  func readSpecialFile(r io.Reader) ([]byte, error) {
   844  	buf, err := io.ReadAll(io.LimitReader(r, maxSpecialFileSize+1))
   845  	if len(buf) > maxSpecialFileSize {
   846  		return nil, ErrFieldTooLong
   847  	}
   848  	return buf, err
   849  }
   850  
   851  // discard skips n bytes in r, reporting an error if unable to do so.
   852  func discard(r io.Reader, n int64) error {
   853  	// If possible, Seek to the last byte before the end of the data section.
   854  	// Do this because Seek is often lazy about reporting errors; this will mask
   855  	// the fact that the stream may be truncated. We can rely on the
   856  	// io.CopyN done shortly afterwards to trigger any IO errors.
   857  	var seekSkipped int64 // Number of bytes skipped via Seek
   858  	if sr, ok := r.(io.Seeker); ok && n > 1 {
   859  		// Not all io.Seeker can actually Seek. For example, os.Stdin implements
   860  		// io.Seeker, but calling Seek always returns an error and performs
   861  		// no action. Thus, we try an innocent seek to the current position
   862  		// to see if Seek is really supported.
   863  		pos1, err := sr.Seek(0, io.SeekCurrent)
   864  		if pos1 >= 0 && err == nil {
   865  			// Seek seems supported, so perform the real Seek.
   866  			pos2, err := sr.Seek(n-1, io.SeekCurrent)
   867  			if pos2 < 0 || err != nil {
   868  				return err
   869  			}
   870  			seekSkipped = pos2 - pos1
   871  		}
   872  	}
   873  
   874  	copySkipped, err := io.CopyN(io.Discard, r, n-seekSkipped)
   875  	if err == io.EOF && seekSkipped+copySkipped < n {
   876  		err = io.ErrUnexpectedEOF
   877  	}
   878  	return err
   879  }
   880  

View as plain text