system/docker: Updated for version 1.8.2.

Signed-off-by: Willy Sudiarto Raharjo <willysr@slackbuilds.org>
This commit is contained in:
Vincent Batts 2015-09-30 22:48:46 +07:00 committed by Willy Sudiarto Raharjo
parent 3a424a9d7c
commit 47dfc8d36e
7 changed files with 869 additions and 5 deletions

View File

@ -5,11 +5,11 @@
# Written by Vincent Batts <vbatts@hashbangbash.com>
PRGNAM=docker
VERSION=${VERSION:-1.6.2}
VERSION=${VERSION:-1.8.2}
BUILD=${BUILD:-1}
TAG=${TAG:-_SBo}
GITHASH=${GITHASH:-7c8fca2}
GITHASH=${GITHASH:-0a8c2e3}
if [ -z "$ARCH" ]; then
case "$( uname -m )" in
@ -61,6 +61,12 @@ unset GOPATH
# https://github.com/docker/docker/commit/6922f1be08111d889b0585b763b08f92d7a55e05
patch -p1 -R < $CWD/docker-btrfs.patch
# a couple of patches that missed the 1.8.2 release window, but are essential
for patch in ${CWD}/patches/*.patch
do
patch -p1 < ${patch}
done
mkdir -p ${PKG}/usr/share/gocode/src/github.com/docker/docker
cp -a . ${PKG}/usr/share/gocode/src/github.com/docker/docker/

View File

@ -1,10 +1,10 @@
PRGNAM="docker"
VERSION="1.6.2"
VERSION="1.8.2"
HOMEPAGE="https://docker.io/"
DOWNLOAD="UNSUPPORTED"
MD5SUM=""
DOWNLOAD_x86_64="https://github.com/docker/docker/archive/v1.6.2.tar.gz"
MD5SUM_x86_64="81a1a015ec0520d739ec721f8295d94f"
DOWNLOAD_x86_64="https://github.com/docker/docker/archive/v1.8.2.tar.gz"
MD5SUM_x86_64="4faf25b356900f3e7599783ad4565e69"
REQUIRES="google-go-lang"
MAINTAINER="Vincent Batts"
EMAIL="vbatts@hashbangbash.com"

View File

@ -0,0 +1,88 @@
From f7236a195c84687edb74fec28b6c4cc98e34185c Mon Sep 17 00:00:00 2001
From: Vincent Batts <vbatts@redhat.com>
Date: Fri, 7 Aug 2015 10:18:20 -0400
Subject: [PATCH 1/4] devicemapper: fix zero-sized field access
Fixes: #15279
Due to
https://github.com/golang/go/commit/7904946eeb35faece61bbf6f5b3cc8be2f519c17
the devices field is dropped.
This solution works on go1.4 and go1.5
Signed-off-by: Vincent Batts <vbatts@redhat.com>
---
daemon/graphdriver/devmapper/deviceset.go | 14 +++++++++-----
pkg/devicemapper/devmapper_wrapper.go | 18 +++++++++++++++---
2 files changed, 24 insertions(+), 8 deletions(-)
diff --git a/daemon/graphdriver/devmapper/deviceset.go b/daemon/graphdriver/devmapper/deviceset.go
index 2eee330..a80736a 100644
--- a/daemon/graphdriver/devmapper/deviceset.go
+++ b/daemon/graphdriver/devmapper/deviceset.go
@@ -1482,12 +1482,16 @@ func (devices *DeviceSet) deactivatePool() error {
if err != nil {
return err
}
- if d, err := devicemapper.GetDeps(devname); err == nil {
- // Access to more Debug output
- logrus.Debugf("[devmapper] devicemapper.GetDeps() %s: %#v", devname, d)
+
+ if devinfo.Exists == 0 {
+ return nil
}
- if devinfo.Exists != 0 {
- return devicemapper.RemoveDevice(devname)
+ if err := devicemapper.RemoveDevice(devname); err != nil {
+ return err
+ }
+
+ if d, err := devicemapper.GetDeps(devname); err == nil {
+ logrus.Warnf("[devmapper] device %s still has %d active dependents", devname, d.Count)
}
return nil
diff --git a/pkg/devicemapper/devmapper_wrapper.go b/pkg/devicemapper/devmapper_wrapper.go
index 87c2003..44ca772 100644
--- a/pkg/devicemapper/devmapper_wrapper.go
+++ b/pkg/devicemapper/devmapper_wrapper.go
@@ -38,7 +38,10 @@ static void log_with_errno_init()
*/
import "C"
-import "unsafe"
+import (
+ "reflect"
+ "unsafe"
+)
type (
CDmTask C.struct_dm_task
@@ -184,12 +187,21 @@ func dmTaskGetDepsFct(task *CDmTask) *Deps {
if Cdeps == nil {
return nil
}
+
+ // golang issue: https://github.com/golang/go/issues/11925
+ hdr := reflect.SliceHeader{
+ Data: uintptr(unsafe.Pointer(uintptr(unsafe.Pointer(Cdeps)) + unsafe.Sizeof(*Cdeps))),
+ Len: int(Cdeps.count),
+ Cap: int(Cdeps.count),
+ }
+ devices := *(*[]C.uint64_t)(unsafe.Pointer(&hdr))
+
deps := &Deps{
Count: uint32(Cdeps.count),
Filler: uint32(Cdeps.filler),
}
- for _, device := range Cdeps.device {
- deps.Device = append(deps.Device, (uint64)(device))
+ for _, device := range devices {
+ deps.Device = append(deps.Device, uint64(device))
}
return deps
}
--
2.4.3

View File

@ -0,0 +1,220 @@
From cccc745d93a59fdbb4dd7d7562ee8dd684a00786 Mon Sep 17 00:00:00 2001
From: Stephen J Day <stephen.day@docker.com>
Date: Tue, 11 Aug 2015 13:47:08 -0700
Subject: [PATCH 2/4] Avoid buffering to tempfile when pushing with V2
The practice of buffering to a tempfile during a pushing contributes massively
to slow V2 push performance perception. The protocol was actually designed to
avoid precalculation, supporting cut-through data push. This means we can
assemble the layer, calculate its digest and push to the remote endpoint, all
at the same time.
This should increase performance massively on systems with slow disks or IO
bottlenecks.
Signed-off-by: Stephen J Day <stephen.day@docker.com>
---
graph/graph.go | 21 --------------
graph/push_v2.go | 51 ++++++++++++++-------------------
integration-cli/docker_cli_push_test.go | 2 +-
pkg/jsonmessage/jsonmessage.go | 6 ++++
pkg/jsonmessage/jsonmessage_test.go | 4 +--
5 files changed, 31 insertions(+), 53 deletions(-)
diff --git a/graph/graph.go b/graph/graph.go
index be911b0..885de87 100644
--- a/graph/graph.go
+++ b/graph/graph.go
@@ -2,7 +2,6 @@ package graph
import (
"compress/gzip"
- "crypto/sha256"
"encoding/json"
"errors"
"fmt"
@@ -329,26 +328,6 @@ func (graph *Graph) newTempFile() (*os.File, error) {
return ioutil.TempFile(tmp, "")
}
-func bufferToFile(f *os.File, src io.Reader) (int64, digest.Digest, error) {
- var (
- h = sha256.New()
- w = gzip.NewWriter(io.MultiWriter(f, h))
- )
- _, err := io.Copy(w, src)
- w.Close()
- if err != nil {
- return 0, "", err
- }
- n, err := f.Seek(0, os.SEEK_CUR)
- if err != nil {
- return 0, "", err
- }
- if _, err := f.Seek(0, 0); err != nil {
- return 0, "", err
- }
- return n, digest.NewDigest("sha256", h), nil
-}
-
// Delete atomically removes an image from the graph.
func (graph *Graph) Delete(name string) error {
id, err := graph.idIndex.Get(name)
diff --git a/graph/push_v2.go b/graph/push_v2.go
index 92d63ca..0ec8cfd 100644
--- a/graph/push_v2.go
+++ b/graph/push_v2.go
@@ -2,8 +2,8 @@ package graph
import (
"fmt"
+ "io"
"io/ioutil"
- "os"
"github.com/Sirupsen/logrus"
"github.com/docker/distribution"
@@ -199,7 +199,7 @@ func (p *v2Pusher) pushV2Tag(tag string) error {
func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (digest.Digest, error) {
out := p.config.OutStream
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Buffering to Disk", nil))
+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Preparing", nil))
image, err := p.graph.Get(img.ID)
if err != nil {
@@ -209,52 +209,45 @@ func (p *v2Pusher) pushV2Image(bs distribution.BlobService, img *image.Image) (d
if err != nil {
return "", err
}
-
- tf, err := p.graph.newTempFile()
- if err != nil {
- return "", err
- }
- defer func() {
- tf.Close()
- os.Remove(tf.Name())
- }()
-
- size, dgst, err := bufferToFile(tf, arch)
- if err != nil {
- return "", err
- }
+ defer arch.Close()
// Send the layer
- logrus.Debugf("rendered layer for %s of [%d] size", img.ID, size)
layerUpload, err := bs.Create(context.Background())
if err != nil {
return "", err
}
defer layerUpload.Close()
+ digester := digest.Canonical.New()
+ tee := io.TeeReader(arch, digester.Hash())
+
reader := progressreader.New(progressreader.Config{
- In: ioutil.NopCloser(tf),
+ In: ioutil.NopCloser(tee), // we'll take care of close here.
Out: out,
Formatter: p.sf,
- Size: int(size),
- NewLines: false,
- ID: stringid.TruncateID(img.ID),
- Action: "Pushing",
+ // TODO(stevvooe): This may cause a size reporting error. Try to get
+ // this from tar-split or elsewhere. The main issue here is that we
+ // don't want to buffer to disk *just* to calculate the size.
+ Size: int(img.Size),
+
+ NewLines: false,
+ ID: stringid.TruncateID(img.ID),
+ Action: "Pushing",
})
- n, err := layerUpload.ReadFrom(reader)
+
+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushing", nil))
+ nn, err := io.Copy(layerUpload, reader)
if err != nil {
return "", err
}
- if n != size {
- return "", fmt.Errorf("short upload: only wrote %d of %d", n, size)
- }
- desc := distribution.Descriptor{Digest: dgst}
- if _, err := layerUpload.Commit(context.Background(), desc); err != nil {
+ dgst := digester.Digest()
+ if _, err := layerUpload.Commit(context.Background(), distribution.Descriptor{Digest: dgst}); err != nil {
return "", err
}
- out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Image successfully pushed", nil))
+ logrus.Debugf("uploaded layer %s (%s), %d bytes", img.ID, dgst, nn)
+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pushed", nil))
return dgst, nil
}
diff --git a/integration-cli/docker_cli_push_test.go b/integration-cli/docker_cli_push_test.go
index 111e9f3..c17a0ea 100644
--- a/integration-cli/docker_cli_push_test.go
+++ b/integration-cli/docker_cli_push_test.go
@@ -108,7 +108,7 @@ func (s *DockerRegistrySuite) TestPushInterrupt(c *check.C) {
}
// Interrupt push (yes, we have no idea at what point it will get killed).
- time.Sleep(200 * time.Millisecond)
+ time.Sleep(50 * time.Millisecond) // dependent on race condition.
if err := pushCmd.Process.Kill(); err != nil {
c.Fatalf("Failed to kill push process: %v", err)
}
diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go
index 7db1626..c4b311e 100644
--- a/pkg/jsonmessage/jsonmessage.go
+++ b/pkg/jsonmessage/jsonmessage.go
@@ -61,8 +61,14 @@ func (p *JSONProgress) String() string {
}
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
}
+
numbersBox = fmt.Sprintf("%8v/%v", current, total)
+ if p.Current > p.Total {
+ // remove total display if the reported current is wonky.
+ numbersBox = fmt.Sprintf("%8v", current)
+ }
+
if p.Current > 0 && p.Start > 0 && percentage < 50 {
fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
perEntry := fromStart / time.Duration(p.Current)
diff --git a/pkg/jsonmessage/jsonmessage_test.go b/pkg/jsonmessage/jsonmessage_test.go
index 2e78fa7..889b0ba 100644
--- a/pkg/jsonmessage/jsonmessage_test.go
+++ b/pkg/jsonmessage/jsonmessage_test.go
@@ -3,12 +3,12 @@ package jsonmessage
import (
"bytes"
"fmt"
+ "strings"
"testing"
"time"
"github.com/docker/docker/pkg/term"
"github.com/docker/docker/pkg/timeutils"
- "strings"
)
func TestError(t *testing.T) {
@@ -45,7 +45,7 @@ func TestProgress(t *testing.T) {
}
// this number can't be negative gh#7136
- expected = "[==================================================>] 50 B/40 B"
+ expected = "[==================================================>] 50 B"
jp5 := JSONProgress{Current: 50, Total: 40}
if jp5.String() != expected {
t.Fatalf("Expected %q, got %q", expected, jp5.String())
--
2.4.3

View File

@ -0,0 +1,189 @@
From e12038fb4a2f74314cf23860ea97528418832ba5 Mon Sep 17 00:00:00 2001
From: Alexander Morozov <lk4d4@docker.com>
Date: Wed, 12 Aug 2015 20:23:56 -0700
Subject: [PATCH 3/4] Refactoring of pullV2Tag
* use downloadInfo pointers everywhere
* use downloads slice only for things that we really download
* cleanup tmp files in all cases
Signed-off-by: Alexander Morozov <lk4d4@docker.com>
---
graph/pull_v2.go | 106 ++++++++++++++++++++++++++++++-------------------------
1 file changed, 58 insertions(+), 48 deletions(-)
diff --git a/graph/pull_v2.go b/graph/pull_v2.go
index 1dbb9fe..ba5e8ce 100644
--- a/graph/pull_v2.go
+++ b/graph/pull_v2.go
@@ -139,6 +139,7 @@ func (p *v2Puller) download(di *downloadInfo) {
di.err <- err
return
}
+ di.tmpFile = tmpFile
blobs := p.repo.Blobs(context.Background())
@@ -187,7 +188,6 @@ func (p *v2Puller) download(di *downloadInfo) {
out.Write(p.sf.FormatProgress(stringid.TruncateID(di.img.ID), "Download complete", nil))
logrus.Debugf("Downloaded %s to tempfile %s", di.img.ID, tmpFile.Name())
- di.tmpFile = tmpFile
di.layer = layerDownload
di.err <- nil
@@ -243,9 +243,9 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
out.Write(p.sf.FormatStatus(tag, "Pulling from %s", p.repo.Name()))
- downloads := make([]downloadInfo, len(manifest.FSLayers))
+ var downloads []*downloadInfo
- layerIDs := []string{}
+ var layerIDs []string
defer func() {
p.graph.Release(p.sessionID, layerIDs...)
}()
@@ -256,66 +256,75 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
logrus.Debugf("error getting image v1 json: %v", err)
return false, err
}
- downloads[i].img = img
- downloads[i].digest = manifest.FSLayers[i].BlobSum
-
p.graph.Retain(p.sessionID, img.ID)
layerIDs = append(layerIDs, img.ID)
// Check if exists
if p.graph.Exists(img.ID) {
logrus.Debugf("Image already exists: %s", img.ID)
+ out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Already exists", nil))
continue
}
-
out.Write(p.sf.FormatProgress(stringid.TruncateID(img.ID), "Pulling fs layer", nil))
- downloads[i].err = make(chan error)
- downloads[i].out = pipeWriter
- go p.download(&downloads[i])
+ d := &downloadInfo{
+ img: img,
+ digest: manifest.FSLayers[i].BlobSum,
+ // TODO: seems like this chan buffer solved hanging problem in go1.5,
+ // this can indicate some deeper problem that somehow we never take
+ // error from channel in loop below
+ err: make(chan error, 1),
+ out: pipeWriter,
+ }
+ downloads = append(downloads, d)
+
+ go p.download(d)
}
- var tagUpdated bool
- for i := len(downloads) - 1; i >= 0; i-- {
- d := &downloads[i]
- if d.err != nil {
- if err := <-d.err; err != nil {
- return false, err
- }
- }
- if d.layer != nil {
- // if tmpFile is empty assume download and extracted elsewhere
- defer os.Remove(d.tmpFile.Name())
- defer d.tmpFile.Close()
- d.tmpFile.Seek(0, 0)
+ // run clean for all downloads to prevent leftovers
+ for _, d := range downloads {
+ defer func(d *downloadInfo) {
if d.tmpFile != nil {
-
- reader := progressreader.New(progressreader.Config{
- In: d.tmpFile,
- Out: out,
- Formatter: p.sf,
- Size: int(d.size),
- NewLines: false,
- ID: stringid.TruncateID(d.img.ID),
- Action: "Extracting",
- })
-
- err = p.graph.Register(d.img, reader)
- if err != nil {
- return false, err
+ d.tmpFile.Close()
+ if err := os.RemoveAll(d.tmpFile.Name()); err != nil {
+ logrus.Errorf("Failed to remove temp file: %s", d.tmpFile.Name())
}
+ }
+ }(d)
+ }
- if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
- return false, err
- }
+ var tagUpdated bool
+ for _, d := range downloads {
+ if err := <-d.err; err != nil {
+ return false, err
+ }
+ if d.layer == nil {
+ continue
+ }
+ // if tmpFile is empty assume download and extracted elsewhere
+ d.tmpFile.Seek(0, 0)
+ reader := progressreader.New(progressreader.Config{
+ In: d.tmpFile,
+ Out: out,
+ Formatter: p.sf,
+ Size: int(d.size),
+ NewLines: false,
+ ID: stringid.TruncateID(d.img.ID),
+ Action: "Extracting",
+ })
+
+ err = p.graph.Register(d.img, reader)
+ if err != nil {
+ return false, err
+ }
- // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
- }
- out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
- tagUpdated = true
- } else {
- out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Already exists", nil))
+ if err := p.graph.SetDigest(d.img.ID, d.digest); err != nil {
+ return false, err
}
+
+ // FIXME: Pool release here for parallel tag pull (ensures any downloads block until fully extracted)
+ out.Write(p.sf.FormatProgress(stringid.TruncateID(d.img.ID), "Pull complete", nil))
+ tagUpdated = true
}
manifestDigest, _, err := digestFromManifest(manifest, p.repoInfo.LocalName)
@@ -342,17 +351,18 @@ func (p *v2Puller) pullV2Tag(tag, taggedName string) (verified bool, err error)
out.Write(p.sf.FormatStatus(p.repo.Name()+":"+tag, "The image you are pulling has been verified. Important: image verification is a tech preview feature and should not be relied on to provide security."))
}
+ firstID := layerIDs[len(layerIDs)-1]
if utils.DigestReference(tag) {
// TODO(stevvooe): Ideally, we should always set the digest so we can
// use the digest whether we pull by it or not. Unfortunately, the tag
// store treats the digest as a separate tag, meaning there may be an
// untagged digest image that would seem to be dangling by a user.
- if err = p.SetDigest(p.repoInfo.LocalName, tag, downloads[0].img.ID); err != nil {
+ if err = p.SetDigest(p.repoInfo.LocalName, tag, firstID); err != nil {
return false, err
}
} else {
// only set the repository/tag -> image ID mapping when pulling by tag (i.e. not by digest)
- if err = p.Tag(p.repoInfo.LocalName, tag, downloads[0].img.ID, true); err != nil {
+ if err = p.Tag(p.repoInfo.LocalName, tag, firstID, true); err != nil {
return false, err
}
}
--
2.4.3

View File

@ -0,0 +1,72 @@
From 67c185dea42b6d4dc8f53280446488621ab04f7c Mon Sep 17 00:00:00 2001
From: Vincent Batts <vbatts@redhat.com>
Date: Tue, 15 Sep 2015 15:05:17 -0400
Subject: [PATCH 4/4] deamon/events: use UnixNano and no goroutine
Signed-off-by: Vincent Batts <vbatts@redhat.com>
---
daemon/events/events.go | 24 +++++++++++-------------
pkg/jsonmessage/jsonmessage.go | 5 +++--
2 files changed, 14 insertions(+), 15 deletions(-)
diff --git a/daemon/events/events.go b/daemon/events/events.go
index 07ee29a..aeb22e8 100644
--- a/daemon/events/events.go
+++ b/daemon/events/events.go
@@ -45,19 +45,17 @@ func (e *Events) Evict(l chan interface{}) {
// Log broadcasts event to listeners. Each listener has 100 millisecond for
// receiving event or it will be skipped.
func (e *Events) Log(action, id, from string) {
- go func() {
- e.mu.Lock()
- jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, Time: time.Now().UTC().Unix()}
- if len(e.events) == cap(e.events) {
- // discard oldest event
- copy(e.events, e.events[1:])
- e.events[len(e.events)-1] = jm
- } else {
- e.events = append(e.events, jm)
- }
- e.mu.Unlock()
- e.pub.Publish(jm)
- }()
+ e.mu.Lock()
+ jm := &jsonmessage.JSONMessage{Status: action, ID: id, From: from, TimeNano: time.Now().UTC().UnixNano()}
+ if len(e.events) == cap(e.events) {
+ // discard oldest event
+ copy(e.events, e.events[1:])
+ e.events[len(e.events)-1] = jm
+ } else {
+ e.events = append(e.events, jm)
+ }
+ e.mu.Unlock()
+ e.pub.Publish(jm)
}
// SubscribersCount returns number of event listeners
diff --git a/pkg/jsonmessage/jsonmessage.go b/pkg/jsonmessage/jsonmessage.go
index c4b311e..8a24d9c 100644
--- a/pkg/jsonmessage/jsonmessage.go
+++ b/pkg/jsonmessage/jsonmessage.go
@@ -90,6 +90,7 @@ type JSONMessage struct {
ID string `json:"id,omitempty"`
From string `json:"from,omitempty"`
Time int64 `json:"time,omitempty"`
+ TimeNano int64 `json:"timeNano,omitempty"`
Error *JSONError `json:"errorDetail,omitempty"`
ErrorMessage string `json:"error,omitempty"` //deprecated
}
@@ -109,8 +110,8 @@ func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
return nil
}
- if jm.Time != 0 {
- fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
+ if jm.Time != 0 || jm.TimeNano != 0 {
+ fmt.Fprintf(out, "%s ", time.Unix(jm.Time, jm.TimeNano).Format(timeutils.RFC3339NanoFixed))
}
if jm.ID != "" {
fmt.Fprintf(out, "%s: ", jm.ID)
--
2.4.3

View File

@ -0,0 +1,289 @@
From d8029ceb202fda8160855c07081dc51aae1ec1ad Mon Sep 17 00:00:00 2001
From: Vincent Batts <vbatts@redhat.com>
Date: Wed, 23 Sep 2015 15:50:23 -0400
Subject: [PATCH 5/5] vendor: update tar-split to v0.9.10
This addresses handling of non-utf8 file names, namely iso-8859-1.
https://github.com/docker/docker/issues/16516
Reported-by: @kwk
Signed-off-by: Vincent Batts <vbatts@redhat.com>
---
hack/vendor.sh | 2 +-
.../vbatts/tar-split/archive/tar/common.go | 28 ++++++++++++++--
.../vbatts/tar-split/archive/tar/reader.go | 15 ++++++++-
.../vbatts/tar-split/archive/tar/writer.go | 2 +-
.../vbatts/tar-split/tar/asm/assemble.go | 4 +--
.../vbatts/tar-split/tar/asm/disassemble.go | 11 +++---
.../vbatts/tar-split/tar/storage/entry.go | 39 ++++++++++++++++++++++
.../vbatts/tar-split/tar/storage/packer.go | 13 ++++++--
8 files changed, 101 insertions(+), 13 deletions(-)
diff --git a/hack/vendor.sh b/hack/vendor.sh
index 68772ef..52ba6ef 100755
--- a/hack/vendor.sh
+++ b/hack/vendor.sh
@@ -36,7 +36,7 @@ clone git github.com/hashicorp/consul v0.5.2
# get graph and distribution packages
clone git github.com/docker/distribution ec87e9b6971d831f0eff752ddb54fb64693e51cd # docker/1.8 branch
-clone git github.com/vbatts/tar-split v0.9.6
+clone git github.com/vbatts/tar-split v0.9.10
clone git github.com/docker/notary 8e8122eb5528f621afcd4e2854c47302f17392f7
clone git github.com/endophage/gotuf a592b03b28b02bb29bb5878308fb1abed63383b5
diff --git a/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go b/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go
index e363aa7..c31df06 100644
--- a/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go
+++ b/vendor/src/github.com/vbatts/tar-split/archive/tar/common.go
@@ -139,8 +139,8 @@ func (fi headerFileInfo) Mode() (mode os.FileMode) {
}
switch fi.h.Typeflag {
- case TypeLink, TypeSymlink:
- // hard link, symbolic link
+ case TypeSymlink:
+ // symbolic link
mode |= os.ModeSymlink
case TypeChar:
// character device node
@@ -249,6 +249,30 @@ func FileInfoHeader(fi os.FileInfo, link string) (*Header, error) {
if fm&os.ModeSticky != 0 {
h.Mode |= c_ISVTX
}
+ // If possible, populate additional fields from OS-specific
+ // FileInfo fields.
+ if sys, ok := fi.Sys().(*Header); ok {
+ // This FileInfo came from a Header (not the OS). Use the
+ // original Header to populate all remaining fields.
+ h.Uid = sys.Uid
+ h.Gid = sys.Gid
+ h.Uname = sys.Uname
+ h.Gname = sys.Gname
+ h.AccessTime = sys.AccessTime
+ h.ChangeTime = sys.ChangeTime
+ if sys.Xattrs != nil {
+ h.Xattrs = make(map[string]string)
+ for k, v := range sys.Xattrs {
+ h.Xattrs[k] = v
+ }
+ }
+ if sys.Typeflag == TypeLink {
+ // hard link
+ h.Typeflag = TypeLink
+ h.Size = 0
+ h.Linkname = sys.Linkname
+ }
+ }
if sysStat != nil {
return h, sysStat(fi, h)
}
diff --git a/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go b/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go
index c72e002..4168ea2 100644
--- a/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go
+++ b/vendor/src/github.com/vbatts/tar-split/archive/tar/reader.go
@@ -138,7 +138,13 @@ func (tr *Reader) Next() (*Header, error) {
// We actually read the whole file,
// but this skips alignment padding
tr.skipUnread()
+ if tr.err != nil {
+ return nil, tr.err
+ }
hdr = tr.readHeader()
+ if hdr == nil {
+ return nil, tr.err
+ }
mergePAX(hdr, headers)
// Check for a PAX format sparse file
@@ -397,7 +403,7 @@ func parsePAX(r io.Reader) (map[string]string, error) {
}
// Parse the first token as a decimal integer.
n, err := strconv.ParseInt(string(buf[:sp]), 10, 0)
- if err != nil {
+ if err != nil || n < 5 || int64(len(buf)) < n {
return nil, ErrHeader
}
// Extract everything between the decimal and the n -1 on the
@@ -553,6 +559,10 @@ func (tr *Reader) readHeader() *Header {
hdr.Uid = int(tr.octal(s.next(8)))
hdr.Gid = int(tr.octal(s.next(8)))
hdr.Size = tr.octal(s.next(12))
+ if hdr.Size < 0 {
+ tr.err = ErrHeader
+ return nil
+ }
hdr.ModTime = time.Unix(tr.octal(s.next(12)), 0)
s.next(8) // chksum
hdr.Typeflag = s.next(1)[0]
@@ -895,6 +905,9 @@ func (sfr *sparseFileReader) Read(b []byte) (n int, err error) {
// Otherwise, we're at the end of the file
return 0, io.EOF
}
+ if sfr.tot < sfr.sp[0].offset {
+ return 0, io.ErrUnexpectedEOF
+ }
if sfr.pos < sfr.sp[0].offset {
// We're in a hole
n = sfr.readHole(b, sfr.sp[0].offset)
diff --git a/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go b/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go
index dafb2ca..9dbc01a 100644
--- a/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go
+++ b/vendor/src/github.com/vbatts/tar-split/archive/tar/writer.go
@@ -355,7 +355,7 @@ func paxHeader(msg string) string {
// hdr.Size bytes are written after WriteHeader.
func (tw *Writer) Write(b []byte) (n int, err error) {
if tw.closed {
- err = ErrWriteTooLong
+ err = ErrWriteAfterClose
return
}
overwrite := false
diff --git a/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go b/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go
index 74317cb..83d6426 100644
--- a/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go
+++ b/vendor/src/github.com/vbatts/tar-split/tar/asm/assemble.go
@@ -39,7 +39,7 @@ func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadClose
if entry.Size == 0 {
continue
}
- fh, err := fg.Get(entry.Name)
+ fh, err := fg.Get(entry.GetName())
if err != nil {
pw.CloseWithError(err)
return
@@ -56,7 +56,7 @@ func NewOutputTarStream(fg storage.FileGetter, up storage.Unpacker) io.ReadClose
// but since it's coming through the PipeReader, the context of
// _which_ file would be lost...
fh.Close()
- pw.CloseWithError(fmt.Errorf("file integrity checksum failed for %q", entry.Name))
+ pw.CloseWithError(fmt.Errorf("file integrity checksum failed for %q", entry.GetName()))
return
}
fh.Close()
diff --git a/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go b/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go
index 7986890..54ef23a 100644
--- a/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go
+++ b/vendor/src/github.com/vbatts/tar-split/tar/asm/disassemble.go
@@ -92,13 +92,16 @@ func NewInputTarStream(r io.Reader, p storage.Packer, fp storage.FilePutter) (io
}
}
- // File entries added, regardless of size
- _, err = p.AddEntry(storage.Entry{
+ entry := storage.Entry{
Type: storage.FileType,
- Name: hdr.Name,
Size: hdr.Size,
Payload: csum,
- })
+ }
+ // For proper marshalling of non-utf8 characters
+ entry.SetName(hdr.Name)
+
+ // File entries added, regardless of size
+ _, err = p.AddEntry(entry)
if err != nil {
pW.CloseWithError(err)
return
diff --git a/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go b/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go
index 38fe7ba..c91e7ea 100644
--- a/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go
+++ b/vendor/src/github.com/vbatts/tar-split/tar/storage/entry.go
@@ -1,5 +1,7 @@
package storage
+import "unicode/utf8"
+
// Entries is for sorting by Position
type Entries []Entry
@@ -33,7 +35,44 @@ const (
type Entry struct {
Type Type `json:"type"`
Name string `json:"name,omitempty"`
+ NameRaw []byte `json:"name_raw,omitempty"`
Size int64 `json:"size,omitempty"`
Payload []byte `json:"payload"` // SegmentType stores payload here; FileType stores crc64 checksum here;
Position int `json:"position"`
}
+
+// SetName will check name for valid UTF-8 string, and set the appropriate
+// field. See https://github.com/vbatts/tar-split/issues/17
+func (e *Entry) SetName(name string) {
+ if utf8.ValidString(name) {
+ e.Name = name
+ } else {
+ e.NameRaw = []byte(name)
+ }
+}
+
+// SetNameBytes will check name for valid UTF-8 string, and set the appropriate
+// field
+func (e *Entry) SetNameBytes(name []byte) {
+ if utf8.Valid(name) {
+ e.Name = string(name)
+ } else {
+ e.NameRaw = name
+ }
+}
+
+// GetName returns the string for the entry's name, regardless of the field stored in
+func (e *Entry) GetName() string {
+ if len(e.NameRaw) > 0 {
+ return string(e.NameRaw)
+ }
+ return e.Name
+}
+
+// GetNameBytes returns the bytes for the entry's name, regardless of the field stored in
+func (e *Entry) GetNameBytes() []byte {
+ if len(e.NameRaw) > 0 {
+ return e.NameRaw
+ }
+ return []byte(e.Name)
+}
diff --git a/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go b/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go
index a02a19a..0c9d99b 100644
--- a/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go
+++ b/vendor/src/github.com/vbatts/tar-split/tar/storage/packer.go
@@ -6,6 +6,7 @@ import (
"errors"
"io"
"path/filepath"
+ "unicode/utf8"
)
// ErrDuplicatePath occurs when a tar archive has more than one entry for the
@@ -61,7 +62,7 @@ func (jup *jsonUnpacker) Next() (*Entry, error) {
// check for dup name
if e.Type == FileType {
- cName := filepath.Clean(e.Name)
+ cName := filepath.Clean(e.GetName())
if _, ok := jup.seen[cName]; ok {
return nil, ErrDuplicatePath
}
@@ -93,9 +94,17 @@ type jsonPacker struct {
type seenNames map[string]struct{}
func (jp *jsonPacker) AddEntry(e Entry) (int, error) {
+ // if Name is not valid utf8, switch it to raw first.
+ if e.Name != "" {
+ if !utf8.ValidString(e.Name) {
+ e.NameRaw = []byte(e.Name)
+ e.Name = ""
+ }
+ }
+
// check early for dup name
if e.Type == FileType {
- cName := filepath.Clean(e.Name)
+ cName := filepath.Clean(e.GetName())
if _, ok := jp.seen[cName]; ok {
return -1, ErrDuplicatePath
}
--
2.4.3