12 "github.com/Sirupsen/logrus"
13 "github.com/docker/distribution"
14 "github.com/docker/distribution/manifest/manifestlist"
15 "github.com/docker/distribution/manifest/schema1"
16 "github.com/docker/distribution/manifest/schema2"
17 "github.com/docker/distribution/reference"
18 "github.com/docker/distribution/registry/api/errcode"
19 "github.com/docker/distribution/registry/client/auth"
20 "github.com/docker/docker/distribution/metadata"
21 "github.com/docker/docker/distribution/xfer"
22 "github.com/docker/docker/image"
23 "github.com/docker/docker/image/v1"
24 "github.com/docker/docker/layer"
25 "github.com/docker/docker/pkg/ioutils"
26 "github.com/docker/docker/pkg/progress"
27 "github.com/docker/docker/pkg/stringid"
28 "github.com/docker/docker/pkg/system"
29 refstore "github.com/docker/docker/reference"
30 "github.com/docker/docker/registry"
31 "github.com/opencontainers/go-digest"
32 "golang.org/x/net/context"
36 errRootFSMismatch = errors.New("layers from manifest don't match image configuration")
37 errRootFSInvalid = errors.New("invalid rootfs in image configuration")
40 const maxDownloadAttempts = 5
42 // ImageConfigPullError is an error pulling the image config blob
43 // (only applies to schema2).
44 type ImageConfigPullError struct {
48 // Error returns the error string for ImageConfigPullError.
49 func (e ImageConfigPullError) Error() string {
50 return "error pulling image configuration: " + e.Err.Error()
53 type v2Puller struct {
54 V2MetadataService metadata.V2MetadataService
55 endpoint registry.APIEndpoint
56 config *ImagePullConfig
57 repoInfo *registry.RepositoryInfo
58 repo distribution.Repository
59 // confirmedV2 is set to true if we confirm we're talking to a v2
60 // registry. This is used to limit fallbacks to the v1 protocol.
64 func (p *v2Puller) Pull(ctx context.Context, ref reference.Named) (err error) {
65 // TODO(tiborvass): was ReceiveTimeout
66 p.repo, p.confirmedV2, err = NewV2Repository(ctx, p.repoInfo, p.endpoint, p.config.MetaHeaders, p.config.AuthConfig, "pull")
68 logrus.Warnf("Error getting v2 registry: %v", err)
72 if err = p.pullV2Repository(ctx, ref); err != nil {
73 if _, ok := err.(fallbackError); ok {
76 if continueOnError(err) {
79 confirmedV2: p.confirmedV2,
87 func (p *v2Puller) pullV2Repository(ctx context.Context, ref reference.Named) (err error) {
88 var layersDownloaded bool
89 if !reference.IsNameOnly(ref) {
90 layersDownloaded, err = p.pullV2Tag(ctx, ref)
95 tags, err := p.repo.Tags(ctx).All(ctx)
97 // If this repository doesn't exist on V2, we should
98 // permit a fallback to V1.
99 return allowV1Fallback(err)
102 // The v2 registry knows about this repository, so we will not
103 // allow fallback to the v1 protocol even if we encounter an
107 for _, tag := range tags {
108 tagRef, err := reference.WithTag(ref, tag)
112 pulledNew, err := p.pullV2Tag(ctx, tagRef)
114 // Since this is the pull-all-tags case, don't
115 // allow an error pulling a particular tag to
116 // make the whole pull fall back to v1.
117 if fallbackErr, ok := err.(fallbackError); ok {
118 return fallbackErr.err
122 // pulledNew is true if either new layers were downloaded OR if existing images were newly tagged
123 // TODO(tiborvass): should we change the name of `layersDownload`? What about message in WriteStatus?
124 layersDownloaded = layersDownloaded || pulledNew
128 writeStatus(reference.FamiliarString(ref), p.config.ProgressOutput, layersDownloaded)
133 type v2LayerDescriptor struct {
136 repoInfo *registry.RepositoryInfo
137 repo distribution.Repository
138 V2MetadataService metadata.V2MetadataService
140 verifier digest.Verifier
141 src distribution.Descriptor
143 layerDownload io.ReadCloser
144 downloadAttempts uint8
146 deltaBase io.ReadSeeker
149 func (ld *v2LayerDescriptor) Key() string {
150 return "v2:" + ld.digest.String()
153 func (ld *v2LayerDescriptor) ID() string {
154 return stringid.TruncateID(ld.digest.String())
157 func (ld *v2LayerDescriptor) DiffID() (layer.DiffID, error) {
159 return ld.diffID, nil
161 return ld.V2MetadataService.GetDiffID(ld.digest)
164 func (ld *v2LayerDescriptor) reset() error {
165 if ld.layerDownload != nil {
166 ld.layerDownload.Close()
167 ld.layerDownload = nil
170 layer, err := ld.open(ld.ctx)
175 if _, err := layer.Seek(ld.downloadOffset, os.SEEK_SET); err != nil {
179 ld.layerDownload = ioutils.TeeReadCloser(ioutils.NewCancelReadCloser(ld.ctx, layer), ld.verifier)
184 func (ld *v2LayerDescriptor) Read(p []byte) (int, error) {
185 if ld.downloadAttempts <= 0 {
186 return 0, fmt.Errorf("no request retries left")
189 if ld.layerDownload == nil {
190 if err := ld.reset(); err != nil {
191 ld.downloadAttempts -= 1
196 n, err := ld.layerDownload.Read(p)
197 ld.downloadOffset += int64(n)
199 if !ld.verifier.Verified() {
200 return n, fmt.Errorf("filesystem layer verification failed for digest %s", ld.digest)
202 } else if err != nil {
203 logrus.Warnf("failed to download layer: \"%v\", retrying to read again", err)
204 ld.downloadAttempts -= 1
205 ld.layerDownload = nil
212 func (ld *v2LayerDescriptor) DeltaBase() io.ReadSeeker {
216 func (ld *v2LayerDescriptor) Close() {
217 if ld.layerDownload != nil {
218 ld.layerDownload.Close()
222 func (ld *v2LayerDescriptor) Download(ctx context.Context, progressOutput progress.Output) (io.ReadCloser, int64, error) {
223 logrus.Debugf("pulling blob %q", ld.digest)
226 ld.layerDownload = nil
227 ld.downloadAttempts = maxDownloadAttempts
228 ld.verifier = ld.digest.Verifier()
230 progress.Update(progressOutput, ld.ID(), "Ready to download")
232 return ioutils.NewReadCloserWrapper(ld, func() error { return nil }), ld.src.Size, nil
235 func (ld *v2LayerDescriptor) Registered(diffID layer.DiffID) {
236 // Cache mapping from this layer's DiffID to the blobsum
237 ld.V2MetadataService.Add(diffID, metadata.V2Metadata{Digest: ld.digest, SourceRepository: ld.repoInfo.Name.Name()})
240 func (ld *v2LayerDescriptor) Size() int64 {
244 func (p *v2Puller) pullV2Tag(ctx context.Context, ref reference.Named) (tagUpdated bool, err error) {
245 manSvc, err := p.repo.Manifests(ctx)
251 manifest distribution.Manifest
252 tagOrDigest string // Used for logging/progress only
254 if digested, isDigested := ref.(reference.Canonical); isDigested {
255 manifest, err = manSvc.Get(ctx, digested.Digest())
259 tagOrDigest = digested.Digest().String()
260 } else if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
261 manifest, err = manSvc.Get(ctx, "", distribution.WithTag(tagged.Tag()))
263 return false, allowV1Fallback(err)
265 tagOrDigest = tagged.Tag()
267 return false, fmt.Errorf("internal error: reference has neither a tag nor a digest: %s", reference.FamiliarString(ref))
271 return false, fmt.Errorf("image manifest does not exist for tag or digest %q", tagOrDigest)
274 if m, ok := manifest.(*schema2.DeserializedManifest); ok {
275 var allowedMediatype bool
276 for _, t := range p.config.Schema2Types {
277 if m.Manifest.Config.MediaType == t {
278 allowedMediatype = true
282 if !allowedMediatype {
283 configClass := mediaTypeClasses[m.Manifest.Config.MediaType]
284 if configClass == "" {
285 configClass = "unknown"
287 return false, fmt.Errorf("Encountered remote %q(%s) when fetching", m.Manifest.Config.MediaType, configClass)
291 // If manSvc.Get succeeded, we can be confident that the registry on
292 // the other side speaks the v2 protocol.
295 logrus.Debugf("Pulling ref from V2 registry: %s", reference.FamiliarString(ref))
296 progress.Message(p.config.ProgressOutput, tagOrDigest, "Pulling from "+reference.FamiliarName(p.repo.Named()))
300 manifestDigest digest.Digest
303 switch v := manifest.(type) {
304 case *schema1.SignedManifest:
305 if p.config.RequireSchema2 {
306 return false, fmt.Errorf("invalid manifest: not schema2")
308 id, manifestDigest, err = p.pullSchema1(ctx, ref, v)
312 case *schema2.DeserializedManifest:
313 id, manifestDigest, err = p.pullSchema2(ctx, ref, v)
317 case *manifestlist.DeserializedManifestList:
318 id, manifestDigest, err = p.pullManifestList(ctx, ref, v)
323 return false, errors.New("unsupported manifest format")
326 progress.Message(p.config.ProgressOutput, "", "Digest: "+manifestDigest.String())
328 if p.config.ReferenceStore != nil {
329 oldTagID, err := p.config.ReferenceStore.Get(ref)
332 return false, addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id)
334 } else if err != refstore.ErrDoesNotExist {
338 if canonical, ok := ref.(reference.Canonical); ok {
339 if err = p.config.ReferenceStore.AddDigest(canonical, id, true); err != nil {
343 if err = addDigestReference(p.config.ReferenceStore, ref, manifestDigest, id); err != nil {
346 if err = p.config.ReferenceStore.AddTag(ref, id, true); err != nil {
354 func (p *v2Puller) pullSchema1(ctx context.Context, ref reference.Named, unverifiedManifest *schema1.SignedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
355 var verifiedManifest *schema1.Manifest
356 verifiedManifest, err = verifySchema1Manifest(unverifiedManifest, ref)
361 rootFS := image.NewRootFS()
363 // remove duplicate layers and check parent chain validity
364 err = fixManifestLayers(verifiedManifest)
369 var descriptors []xfer.DownloadDescriptor
371 // Image history converted to the new format
372 var history []image.History
374 // Note that the order of this loop is in the direction of bottom-most
375 // to top-most, so that the downloads slice gets ordered correctly.
376 for i := len(verifiedManifest.FSLayers) - 1; i >= 0; i-- {
377 blobSum := verifiedManifest.FSLayers[i].BlobSum
379 var throwAway struct {
380 ThrowAway bool `json:"throwaway,omitempty"`
382 if err := json.Unmarshal([]byte(verifiedManifest.History[i].V1Compatibility), &throwAway); err != nil {
386 h, err := v1.HistoryFromConfig([]byte(verifiedManifest.History[i].V1Compatibility), throwAway.ThrowAway)
390 history = append(history, h)
392 if throwAway.ThrowAway {
396 layerDescriptor := &v2LayerDescriptor{
398 repoInfo: p.repoInfo,
400 V2MetadataService: p.V2MetadataService,
403 descriptors = append(descriptors, layerDescriptor)
406 // The v1 manifest itself doesn't directly contain a platform. However,
407 // the history does, but unfortunately that's a string, so search through
408 // all the history until hopefully we find one which indicates the os.
409 platform := runtime.GOOS
410 if system.LCOWSupported() {
412 Os string `json:"os,omitempty"`
414 for _, v := range verifiedManifest.History {
416 if err := json.Unmarshal([]byte(v.V1Compatibility), &c); err == nil {
425 resultRootFS, release, err := p.config.DownloadManager.Download(ctx, *rootFS, layer.Platform(platform), descriptors, p.config.ProgressOutput)
431 config, err := v1.MakeConfigFromV1Config([]byte(verifiedManifest.History[0].V1Compatibility), &resultRootFS, history)
436 imageID, err := p.config.ImageStore.Put(config)
441 manifestDigest = digest.FromBytes(unverifiedManifest.Canonical)
443 return imageID, manifestDigest, nil
446 func (p *v2Puller) pullSchema2(ctx context.Context, ref reference.Named, mfst *schema2.DeserializedManifest) (id digest.Digest, manifestDigest digest.Digest, err error) {
447 manifestDigest, err = schema2ManifestDigest(ref, mfst)
452 target := mfst.Target()
453 if _, err := p.config.ImageStore.Get(target.Digest); err == nil {
454 // If the image already exists locally, no need to pull
456 return target.Digest, manifestDigest, nil
459 // Pull the image config
460 configJSON, err := p.pullSchema2Config(ctx, target.Digest)
462 return "", "", ImageConfigPullError{Err: err}
465 var deltaBase io.ReadSeeker
467 // check for delta config
468 img, err := image.NewFromJSON(configJSON)
473 if img.Config != nil {
474 if base, ok := img.Config.Labels["io.resin.delta.base"]; ok {
475 digest, err := digest.Parse(base)
480 stream, err := p.config.ImageStore.GetTarSeekStream(digest)
489 if config, ok := img.Config.Labels["io.resin.delta.config"]; ok {
490 digest := digest.FromString(config)
492 if _, err := p.config.ImageStore.Get(digest); err == nil {
493 // If the image already exists locally, no need to pull
495 return digest, manifestDigest, nil
498 configJSON = []byte(config)
502 configRootFS, platform, err := p.config.ImageStore.RootFSAndPlatformFromConfig(configJSON)
503 if err == nil && configRootFS == nil {
504 return "", "", errRootFSInvalid
510 var descriptors []xfer.DownloadDescriptor
512 // Note that the order of this loop is in the direction of bottom-most
513 // to top-most, so that the downloads slice gets ordered correctly.
514 for _, d := range mfst.Layers {
515 layerDescriptor := &v2LayerDescriptor{
518 repoInfo: p.repoInfo,
519 V2MetadataService: p.V2MetadataService,
521 deltaBase: deltaBase,
524 descriptors = append(descriptors, layerDescriptor)
527 layerErrChan := make(chan error, 1)
528 downloadsDone := make(chan struct{})
530 ctx, cancel = context.WithCancel(ctx)
534 downloadedRootFS *image.RootFS // rootFS from registered layers
535 release func() // release resources from rootFS download
538 if len(descriptors) != len(configRootFS.DiffIDs) {
539 return "", "", errRootFSMismatch
542 // Populate diff ids in descriptors to avoid downloading foreign layers
543 // which have been side loaded
544 for i := range descriptors {
545 descriptors[i].(*v2LayerDescriptor).diffID = configRootFS.DiffIDs[i]
548 if p.config.DownloadManager != nil {
554 downloadRootFS := *image.NewRootFS()
555 rootFS, release, err = p.config.DownloadManager.Download(ctx, downloadRootFS, platform, descriptors, p.config.ProgressOutput)
557 // Intentionally do not cancel the config download here
558 // as the error from config download (if there is one)
559 // is more interesting than the layer download error
564 downloadedRootFS = &rootFS
568 // We have nothing to download
573 case <-downloadsDone:
574 case err = <-layerErrChan:
582 if downloadedRootFS != nil {
583 // The DiffIDs returned in rootFS MUST match those in the config.
584 // Otherwise the image config could be referencing layers that aren't
585 // included in the manifest.
586 if len(downloadedRootFS.DiffIDs) != len(configRootFS.DiffIDs) {
587 return "", "", errRootFSMismatch
590 for i := range downloadedRootFS.DiffIDs {
591 if downloadedRootFS.DiffIDs[i] != configRootFS.DiffIDs[i] {
592 return "", "", errRootFSMismatch
597 imageID, err := p.config.ImageStore.Put(configJSON)
602 return imageID, manifestDigest, nil
605 // pullManifestList handles "manifest lists" which point to various
606 // platform-specific manifests.
607 func (p *v2Puller) pullManifestList(ctx context.Context, ref reference.Named, mfstList *manifestlist.DeserializedManifestList) (id digest.Digest, manifestListDigest digest.Digest, err error) {
608 manifestListDigest, err = schema2ManifestDigest(ref, mfstList)
613 logrus.Debugf("%s resolved to a manifestList object with %d entries; looking for a os/arch match", ref, len(mfstList.Manifests))
614 var manifestDigest digest.Digest
615 for _, manifestDescriptor := range mfstList.Manifests {
616 // TODO(aaronl): The manifest list spec supports optional
617 // "features" and "variant" fields. These are not yet used.
618 // Once they are, their values should be interpreted here.
619 if manifestDescriptor.Platform.Architecture == runtime.GOARCH && manifestDescriptor.Platform.OS == runtime.GOOS {
620 manifestDigest = manifestDescriptor.Digest
621 logrus.Debugf("found match for %s/%s with media type %s, digest %s", runtime.GOOS, runtime.GOARCH, manifestDescriptor.MediaType, manifestDigest.String())
626 if manifestDigest == "" {
627 errMsg := fmt.Sprintf("no matching manifest for %s/%s in the manifest list entries", runtime.GOOS, runtime.GOARCH)
628 logrus.Debugf(errMsg)
629 return "", "", errors.New(errMsg)
632 manSvc, err := p.repo.Manifests(ctx)
637 manifest, err := manSvc.Get(ctx, manifestDigest)
642 manifestRef, err := reference.WithDigest(reference.TrimNamed(ref), manifestDigest)
647 switch v := manifest.(type) {
648 case *schema1.SignedManifest:
649 id, _, err = p.pullSchema1(ctx, manifestRef, v)
653 case *schema2.DeserializedManifest:
654 id, _, err = p.pullSchema2(ctx, manifestRef, v)
659 return "", "", errors.New("unsupported manifest format")
662 return id, manifestListDigest, err
665 func (p *v2Puller) pullSchema2Config(ctx context.Context, dgst digest.Digest) (configJSON []byte, err error) {
666 blobs := p.repo.Blobs(ctx)
667 configJSON, err = blobs.Get(ctx, dgst)
672 // Verify image config digest
673 verifier := dgst.Verifier()
674 if _, err := verifier.Write(configJSON); err != nil {
677 if !verifier.Verified() {
678 err := fmt.Errorf("image config verification failed for digest %s", dgst)
683 return configJSON, nil
686 // schema2ManifestDigest computes the manifest digest, and, if pulling by
687 // digest, ensures that it matches the requested digest.
688 func schema2ManifestDigest(ref reference.Named, mfst distribution.Manifest) (digest.Digest, error) {
689 _, canonical, err := mfst.Payload()
694 // If pull by digest, then verify the manifest digest.
695 if digested, isDigested := ref.(reference.Canonical); isDigested {
696 verifier := digested.Digest().Verifier()
697 if _, err := verifier.Write(canonical); err != nil {
700 if !verifier.Verified() {
701 err := fmt.Errorf("manifest verification failed for digest %s", digested.Digest())
705 return digested.Digest(), nil
708 return digest.FromBytes(canonical), nil
711 // allowV1Fallback checks if the error is a possible reason to fallback to v1
712 // (even if confirmedV2 has been set already), and if so, wraps the error in
713 // a fallbackError with confirmedV2 set to false. Otherwise, it returns the
715 func allowV1Fallback(err error) error {
716 switch v := err.(type) {
719 if v0, ok := v[0].(errcode.Error); ok && shouldV2Fallback(v0) {
720 return fallbackError{
728 if shouldV2Fallback(v) {
729 return fallbackError{
736 if v.Err == auth.ErrNoBasicAuthCredentials {
737 return fallbackError{err: err, confirmedV2: false}
744 func verifySchema1Manifest(signedManifest *schema1.SignedManifest, ref reference.Named) (m *schema1.Manifest, err error) {
745 // If pull by digest, then verify the manifest digest. NOTE: It is
746 // important to do this first, before any other content validation. If the
747 // digest cannot be verified, don't even bother with those other things.
748 if digested, isCanonical := ref.(reference.Canonical); isCanonical {
749 verifier := digested.Digest().Verifier()
750 if _, err := verifier.Write(signedManifest.Canonical); err != nil {
753 if !verifier.Verified() {
754 err := fmt.Errorf("image verification failed for digest %s", digested.Digest())
759 m = &signedManifest.Manifest
761 if m.SchemaVersion != 1 {
762 return nil, fmt.Errorf("unsupported schema version %d for %q", m.SchemaVersion, reference.FamiliarString(ref))
764 if len(m.FSLayers) != len(m.History) {
765 return nil, fmt.Errorf("length of history not equal to number of layers for %q", reference.FamiliarString(ref))
767 if len(m.FSLayers) == 0 {
768 return nil, fmt.Errorf("no FSLayers in manifest for %q", reference.FamiliarString(ref))
773 // fixManifestLayers removes repeated layers from the manifest and checks the
774 // correctness of the parent chain.
775 func fixManifestLayers(m *schema1.Manifest) error {
776 imgs := make([]*image.V1Image, len(m.FSLayers))
777 for i := range m.FSLayers {
778 img := &image.V1Image{}
780 if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), img); err != nil {
785 if err := v1.ValidateID(img.ID); err != nil {
790 if imgs[len(imgs)-1].Parent != "" && runtime.GOOS != "windows" {
791 // Windows base layer can point to a base layer parent that is not in manifest.
792 return errors.New("invalid parent ID in the base layer of the image")
795 // check general duplicates to error instead of a deadlock
796 idmap := make(map[string]struct{})
799 for _, img := range imgs {
800 // skip IDs that appear after each other, we handle those later
801 if _, exists := idmap[img.ID]; img.ID != lastID && exists {
802 return fmt.Errorf("ID %+v appears multiple times in manifest", img.ID)
805 idmap[lastID] = struct{}{}
808 // backwards loop so that we keep the remaining indexes after removing items
809 for i := len(imgs) - 2; i >= 0; i-- {
810 if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
811 m.FSLayers = append(m.FSLayers[:i], m.FSLayers[i+1:]...)
812 m.History = append(m.History[:i], m.History[i+1:]...)
813 } else if imgs[i].Parent != imgs[i+1].ID {
814 return fmt.Errorf("Invalid parent ID. Expected %v, got %v.", imgs[i+1].ID, imgs[i].Parent)