1 // Package daemon exposes the functions that occur on the host server
2 // that the Docker daemon is running.
4 // In implementing the various functions of the daemon, there is often
5 // a method-specific struct for configuring the runtime behavior.
21 "github.com/Sirupsen/logrus"
22 containerd "github.com/containerd/containerd/api/grpc/types"
23 "github.com/docker/docker/api"
24 "github.com/docker/docker/api/types"
25 containertypes "github.com/docker/docker/api/types/container"
26 "github.com/docker/docker/container"
27 "github.com/docker/docker/daemon/config"
28 "github.com/docker/docker/daemon/discovery"
29 "github.com/docker/docker/daemon/events"
30 "github.com/docker/docker/daemon/exec"
31 "github.com/docker/docker/daemon/logger"
32 // register graph drivers
33 _ "github.com/docker/docker/daemon/graphdriver/register"
34 "github.com/docker/docker/daemon/initlayer"
35 "github.com/docker/docker/daemon/stats"
36 dmetadata "github.com/docker/docker/distribution/metadata"
37 "github.com/docker/docker/distribution/xfer"
38 "github.com/docker/docker/dockerversion"
39 "github.com/docker/docker/image"
40 "github.com/docker/docker/layer"
41 "github.com/docker/docker/libcontainerd"
42 "github.com/docker/docker/migrate/v1"
43 "github.com/docker/docker/pkg/idtools"
44 "github.com/docker/docker/pkg/plugingetter"
45 "github.com/docker/docker/pkg/registrar"
46 "github.com/docker/docker/pkg/signal"
47 "github.com/docker/docker/pkg/sysinfo"
48 "github.com/docker/docker/pkg/system"
49 "github.com/docker/docker/pkg/truncindex"
50 "github.com/docker/docker/plugin"
51 refstore "github.com/docker/docker/reference"
52 "github.com/docker/docker/registry"
53 "github.com/docker/docker/runconfig"
54 volumedrivers "github.com/docker/docker/volume/drivers"
55 "github.com/docker/docker/volume/local"
56 "github.com/docker/docker/volume/store"
57 "github.com/docker/libnetwork"
58 "github.com/docker/libnetwork/cluster"
59 nwconfig "github.com/docker/libnetwork/config"
60 "github.com/docker/libtrust"
61 "github.com/pkg/errors"
65 // DefaultRuntimeBinary is the default runtime to be used by
66 // containerd if none is specified
67 DefaultRuntimeBinary = "docker-runc"
69 errSystemNotSupported = errors.New("The Docker daemon is not supported on this platform.")
72 type daemonStore struct {
75 imageStore image.Store
76 layerStore layer.Store
77 distributionMetadataStore dmetadata.Store
78 referenceStore refstore.Store
81 // Daemon holds information about the Docker daemon.
85 containers container.Store
86 containersReplica container.ViewDB
87 execCommands *exec.Store
88 downloadManager *xfer.LayerDownloadManager
89 uploadManager *xfer.LayerUploadManager
90 trustKey libtrust.PrivateKey
91 idIndex *truncindex.TruncIndex
92 configStore *config.Config
93 statsCollector *stats.Collector
94 defaultLogConfig containertypes.LogConfig
95 RegistryService registry.Service
96 EventsService *events.Events
97 netController libnetwork.NetworkController
98 volumes *store.VolumeStore
99 discoveryWatcher discovery.Reloader
104 idMappings *idtools.IDMappings
105 stores map[string]daemonStore // By container target platform
106 deltaStore *daemonStore
107 PluginStore *plugin.Store // todo: remove
108 pluginManager *plugin.Manager
109 nameIndex *registrar.Registrar
111 containerd libcontainerd.Client
112 containerdRemote libcontainerd.Remote
113 defaultIsolation containertypes.Isolation // Default isolation mode on Windows
114 clusterProvider cluster.Provider
116 metricsPluginListener net.Listener
120 seccompProfile []byte
121 seccompProfilePath string
123 diskUsageRunning int32
125 hosts map[string]bool // hosts stores the addresses the daemon is listening on
126 startupDone chan struct{}
129 // StoreHosts stores the addresses the daemon is listening on
130 func (daemon *Daemon) StoreHosts(hosts []string) {
131 if daemon.hosts == nil {
132 daemon.hosts = make(map[string]bool)
134 for _, h := range hosts {
135 daemon.hosts[h] = true
139 // HasExperimental returns whether the experimental features of the daemon are enabled or not
140 func (daemon *Daemon) HasExperimental() bool {
141 if daemon.configStore != nil && daemon.configStore.Experimental {
147 func (daemon *Daemon) restore() error {
148 containers := make(map[string]*container.Container)
150 logrus.Info("Loading containers: start.")
152 dir, err := ioutil.ReadDir(daemon.repository)
157 for _, v := range dir {
159 container, err := daemon.load(id)
161 logrus.Errorf("Failed to load container %v: %v", id, err)
165 // Ignore the container if it does not support the current driver being used by the graph
166 currentDriverForContainerPlatform := daemon.stores[container.Platform].graphDriver
167 if (container.Driver == "" && currentDriverForContainerPlatform == "aufs") || container.Driver == currentDriverForContainerPlatform {
168 rwlayer, err := daemon.stores[container.Platform].layerStore.GetRWLayer(container.ID)
170 logrus.Errorf("Failed to load container mount %v: %v", id, err)
173 container.RWLayer = rwlayer
174 logrus.Debugf("Loaded container %v", container.ID)
176 containers[container.ID] = container
178 logrus.Debugf("Cannot load container %s because it was created with another graph driver.", container.ID)
182 removeContainers := make(map[string]*container.Container)
183 restartContainers := make(map[*container.Container]chan struct{})
184 activeSandboxes := make(map[string]interface{})
185 for id, c := range containers {
186 if err := daemon.registerName(c); err != nil {
187 logrus.Errorf("Failed to register container name %s: %s", c.ID, err)
188 delete(containers, id)
191 // verify that all volumes valid and have been migrated from the pre-1.7 layout
192 if err := daemon.verifyVolumesInfo(c); err != nil {
193 // don't skip the container due to error
194 logrus.Errorf("Failed to verify volumes for container '%s': %v", c.ID, err)
196 if err := daemon.Register(c); err != nil {
197 logrus.Errorf("Failed to register container %s: %s", c.ID, err)
198 delete(containers, id)
202 // The LogConfig.Type is empty if the container was created before docker 1.12 with default log driver.
203 // We should rewrite it to use the daemon defaults.
204 // Fixes https://github.com/docker/docker/issues/22536
205 if c.HostConfig.LogConfig.Type == "" {
206 if err := daemon.mergeAndVerifyLogConfig(&c.HostConfig.LogConfig); err != nil {
207 logrus.Errorf("Failed to verify log config for container %s: %q", c.ID, err)
213 var wg sync.WaitGroup
214 var mapLock sync.Mutex
215 for _, c := range containers {
217 go func(c *container.Container) {
219 daemon.backportMountSpec(c)
220 if err := daemon.checkpointAndSave(c); err != nil {
221 logrus.WithError(err).WithField("container", c.ID).Error("error saving backported mountspec to disk")
224 daemon.setStateCounter(c)
225 if c.IsRunning() || c.IsPaused() {
226 c.RestartManager().Cancel() // manually start containers because some need to wait for swarm networking
227 if err := daemon.containerd.Restore(c.ID, c.InitializeStdio); err != nil {
228 logrus.Errorf("Failed to restore %s with containerd: %s", c.ID, err)
232 // we call Mount and then Unmount to get BaseFs of the container
233 if err := daemon.Mount(c); err != nil {
234 // The mount is unlikely to fail. However, in case mount fails
235 // the container should be allowed to restore here. Some functionalities
236 // (like docker exec -u user) might be missing but container is able to be
237 // stopped/restarted/removed.
238 // See #29365 for related information.
239 // The error is only logged here.
240 logrus.Warnf("Failed to mount container on getting BaseFs path %v: %v", c.ID, err)
242 if err := daemon.Unmount(c); err != nil {
243 logrus.Warnf("Failed to umount container on getting BaseFs path %v: %v", c.ID, err)
247 c.ResetRestartManager(false)
248 if !c.HostConfig.NetworkMode.IsContainer() && c.IsRunning() {
249 options, err := daemon.buildSandboxOptions(c)
251 logrus.Warnf("Failed build sandbox option to restore container %s: %v", c.ID, err)
254 activeSandboxes[c.NetworkSettings.SandboxID] = options
259 // fixme: only if not running
260 // get list of containers we need to restart
261 if !c.IsRunning() && !c.IsPaused() {
262 // Do not autostart containers which
263 // has endpoints in a swarm scope
264 // network yet since the cluster is
265 // not initialized yet. We will start
266 // it after the cluster is
268 if daemon.configStore.AutoRestart && c.ShouldRestart() && !c.NetworkSettings.HasSwarmEndpoint {
270 restartContainers[c] = make(chan struct{})
272 } else if c.HostConfig != nil && c.HostConfig.AutoRemove {
274 removeContainers[c.ID] = c
280 if c.RemovalInProgress {
281 // We probably crashed in the middle of a removal, reset
284 // We DO NOT remove the container here as we do not
285 // know if the user had requested for either the
286 // associated volumes, network links or both to also
287 // be removed. So we put the container in the "dead"
288 // state and leave further processing up to them.
289 logrus.Debugf("Resetting RemovalInProgress flag from %v", c.ID)
290 c.RemovalInProgress = false
292 if err := c.CheckpointTo(daemon.containersReplica); err != nil {
293 logrus.Errorf("Failed to update container %s state: %v", c.ID, err)
300 daemon.netController, err = daemon.initNetworkController(daemon.configStore, activeSandboxes)
302 return fmt.Errorf("Error initializing network controller: %v", err)
305 // Now that all the containers are registered, register the links
306 for _, c := range containers {
308 if err := daemon.registerLinks(c, c.HostConfig); err != nil {
309 logrus.Errorf("failed to register link for container %s: %v", c.ID, err)
314 group := sync.WaitGroup{}
315 for c, notifier := range restartContainers {
318 go func(c *container.Container, chNotify chan struct{}) {
321 logrus.Debugf("Starting container %s", c.ID)
323 // ignore errors here as this is a best effort to wait for children to be
324 // running before we try to start the container
325 children := daemon.children(c)
326 timeout := time.After(5 * time.Second)
327 for _, child := range children {
328 if notifier, exists := restartContainers[child]; exists {
336 // Make sure networks are available before starting
337 daemon.waitForNetworks(c)
338 if err := daemon.containerStart(c, "", "", true); err != nil {
339 logrus.Errorf("Failed to start container %s: %s", c.ID, err)
347 removeGroup := sync.WaitGroup{}
348 for id := range removeContainers {
350 go func(cid string) {
351 if err := daemon.ContainerRm(cid, &types.ContainerRmConfig{ForceRemove: true, RemoveVolume: true}); err != nil {
352 logrus.Errorf("Failed to remove container %s: %s", cid, err)
359 // any containers that were started above would already have had this done,
360 // however we need to now prepare the mountpoints for the rest of the containers as well.
361 // This shouldn't cause any issue running on the containers that already had this run.
362 // This must be run after any containers with a restart policy so that containerized plugins
363 // can have a chance to be running before we try to initialize them.
364 for _, c := range containers {
365 // if the container has restart policy, do not
366 // prepare the mountpoints since it has been done on restarting.
367 // This is to speed up the daemon start when a restart container
368 // has a volume and the volume driver is not available.
369 if _, ok := restartContainers[c]; ok {
371 } else if _, ok := removeContainers[c.ID]; ok {
372 // container is automatically removed, skip it.
377 go func(c *container.Container) {
379 if err := daemon.prepareMountPoints(c); err != nil {
387 logrus.Info("Loading containers: done.")
392 // RestartSwarmContainers restarts any autostart container which has a
394 func (daemon *Daemon) RestartSwarmContainers() {
395 group := sync.WaitGroup{}
396 for _, c := range daemon.List() {
397 if !c.IsRunning() && !c.IsPaused() {
398 // Autostart all the containers which has a
399 // swarm endpoint now that the cluster is
401 if daemon.configStore.AutoRestart && c.ShouldRestart() && c.NetworkSettings.HasSwarmEndpoint {
403 go func(c *container.Container) {
405 if err := daemon.containerStart(c, "", "", true); err != nil {
416 // waitForNetworks is used during daemon initialization when starting up containers
417 // It ensures that all of a container's networks are available before the daemon tries to start the container.
418 // In practice it just makes sure the discovery service is available for containers which use a network that require discovery.
419 func (daemon *Daemon) waitForNetworks(c *container.Container) {
420 if daemon.discoveryWatcher == nil {
423 // Make sure if the container has a network that requires discovery that the discovery service is available before starting
424 for netName := range c.NetworkSettings.Networks {
425 // If we get `ErrNoSuchNetwork` here, we can assume that it is due to discovery not being ready
426 // Most likely this is because the K/V store used for discovery is in a container and needs to be started
427 if _, err := daemon.netController.NetworkByName(netName); err != nil {
428 if _, ok := err.(libnetwork.ErrNoSuchNetwork); !ok {
431 // use a longish timeout here due to some slowdowns in libnetwork if the k/v store is on anything other than --net=host
432 // FIXME: why is this slow???
433 logrus.Debugf("Container %s waiting for network to be ready", c.Name)
435 case <-daemon.discoveryWatcher.ReadyCh():
436 case <-time.After(60 * time.Second):
443 func (daemon *Daemon) children(c *container.Container) map[string]*container.Container {
444 return daemon.linkIndex.children(c)
447 // parents returns the names of the parent containers of the container
448 // with the given name.
449 func (daemon *Daemon) parents(c *container.Container) map[string]*container.Container {
450 return daemon.linkIndex.parents(c)
453 func (daemon *Daemon) registerLink(parent, child *container.Container, alias string) error {
454 fullName := path.Join(parent.Name, alias)
455 if err := daemon.nameIndex.Reserve(fullName, child.ID); err != nil {
456 if err == registrar.ErrNameReserved {
457 logrus.Warnf("error registering link for %s, to %s, as alias %s, ignoring: %v", parent.ID, child.ID, alias, err)
462 daemon.linkIndex.link(parent, child, fullName)
466 // DaemonJoinsCluster informs the daemon has joined the cluster and provides
467 // the handler to query the cluster component
468 func (daemon *Daemon) DaemonJoinsCluster(clusterProvider cluster.Provider) {
469 daemon.setClusterProvider(clusterProvider)
472 // DaemonLeavesCluster informs the daemon has left the cluster
473 func (daemon *Daemon) DaemonLeavesCluster() {
474 // Daemon is in charge of removing the attachable networks with
475 // connected containers when the node leaves the swarm
476 daemon.clearAttachableNetworks()
477 // We no longer need the cluster provider, stop it now so that
478 // the network agent will stop listening to cluster events.
479 daemon.setClusterProvider(nil)
480 // Wait for the networking cluster agent to stop
481 daemon.netController.AgentStopWait()
482 // Daemon is in charge of removing the ingress network when the
483 // node leaves the swarm. Wait for job to be done or timeout.
484 // This is called also on graceful daemon shutdown. We need to
485 // wait, because the ingress release has to happen before the
486 // network controller is stopped.
487 if done, err := daemon.ReleaseIngress(); err == nil {
490 case <-time.After(5 * time.Second):
491 logrus.Warnf("timeout while waiting for ingress network removal")
494 logrus.Warnf("failed to initiate ingress network removal: %v", err)
498 // setClusterProvider sets a component for querying the current cluster state.
499 func (daemon *Daemon) setClusterProvider(clusterProvider cluster.Provider) {
500 daemon.clusterProvider = clusterProvider
501 daemon.netController.SetClusterProvider(clusterProvider)
504 // IsSwarmCompatible verifies if the current daemon
505 // configuration is compatible with the swarm mode
506 func (daemon *Daemon) IsSwarmCompatible() error {
507 if daemon.configStore == nil {
510 return daemon.configStore.IsSwarmCompatible()
513 // NewDaemon sets up everything for the daemon to be able to service
514 // requests from the webserver.
515 func NewDaemon(config *config.Config, registryService registry.Service, containerdRemote libcontainerd.Remote, pluginStore *plugin.Store) (daemon *Daemon, err error) {
516 setDefaultMtu(config)
518 // Ensure that we have a correct root key limit for launching containers.
519 if err := ModifyRootKeyLimit(); err != nil {
520 logrus.Warnf("unable to modify root key limit, number of containers could be limited by this quota: %v", err)
523 // Ensure we have compatible and valid configuration options
524 if err := verifyDaemonSettings(config); err != nil {
528 // Do we have a disabled network?
529 config.DisableBridge = isBridgeNetworkDisabled(config)
531 // Verify the platform is supported as a daemon
532 if !platformSupported {
533 return nil, errSystemNotSupported
536 // Validate platform-specific requirements
537 if err := checkSystem(); err != nil {
541 idMappings, err := setupRemappedRoot(config)
545 rootIDs := idMappings.RootPair()
546 if err := setupDaemonProcess(config); err != nil {
550 // set up the tmpDir to use a canonical path
551 tmp, err := prepareTempDir(config.Root, rootIDs)
553 return nil, fmt.Errorf("Unable to get the TempDir under %s: %s", config.Root, err)
555 realTmp, err := getRealPath(tmp)
557 return nil, fmt.Errorf("Unable to get the full path to the TempDir (%s): %s", tmp, err)
559 os.Setenv("TMPDIR", realTmp)
563 startupDone: make(chan struct{}),
565 // Ensure the daemon is properly shutdown if there is a failure during
569 if err := d.Shutdown(); err != nil {
575 // set up SIGUSR1 handler on Unix-like systems, or a Win32 global event
576 // on Windows to dump Go routine stacks
577 stackDumpDir := config.Root
578 if execRoot := config.GetExecRoot(); execRoot != "" {
579 stackDumpDir = execRoot
581 d.setupDumpStackTrap(stackDumpDir)
583 if err := d.setupSeccompProfile(); err != nil {
587 // Set the default isolation mode (only applicable on Windows)
588 if err := d.setDefaultIsolation(); err != nil {
589 return nil, fmt.Errorf("error setting default isolation mode: %v", err)
592 logrus.Debugf("Using default logging driver %s", config.LogConfig.Type)
594 if err := configureMaxThreads(config); err != nil {
595 logrus.Warnf("Failed to configure golang's threads limit: %v", err)
598 if err := ensureDefaultAppArmorProfile(); err != nil {
599 logrus.Errorf(err.Error())
602 daemonRepo := filepath.Join(config.Root, "containers")
603 if err := idtools.MkdirAllAndChown(daemonRepo, 0700, rootIDs); err != nil && !os.IsExist(err) {
607 if runtime.GOOS == "windows" {
608 if err := system.MkdirAll(filepath.Join(config.Root, "credentialspecs"), 0, ""); err != nil && !os.IsExist(err) {
613 // On Windows we don't support the environment variable, or a user supplied graphdriver
614 // as Windows has no choice in terms of which graphdrivers to use. It's a case of
615 // running Windows containers on Windows - windowsfilter, running Linux containers on Windows,
616 // lcow. Unix platforms however run a single graphdriver for all containers, and it can
617 // be set through an environment variable, a daemon start parameter, or chosen through
618 // initialization of the layerstore through driver priority order for example.
619 d.stores = make(map[string]daemonStore)
620 if runtime.GOOS == "windows" {
621 d.stores["windows"] = daemonStore{graphDriver: "windowsfilter"}
622 if system.LCOWSupported() {
623 d.stores["linux"] = daemonStore{graphDriver: "lcow"}
626 driverName := os.Getenv("DOCKER_DRIVER")
627 if driverName == "" {
628 driverName = config.GraphDriver
630 d.stores[runtime.GOOS] = daemonStore{graphDriver: driverName} // May still be empty. Layerstore init determines instead.
633 d.RegistryService = registryService
634 d.PluginStore = pluginStore
635 logger.RegisterPluginGetter(d.PluginStore)
637 metricsSockPath, err := d.listenMetricsSock()
641 registerMetricsPluginCallback(d.PluginStore, metricsSockPath)
643 // Plugin system initialization should happen before restore. Do not change order.
644 d.pluginManager, err = plugin.NewManager(plugin.ManagerConfig{
645 Root: filepath.Join(config.Root, "plugins"),
646 ExecRoot: getPluginExecRoot(config.Root),
647 Store: d.PluginStore,
648 Executor: containerdRemote,
649 RegistryService: registryService,
650 LiveRestoreEnabled: config.LiveRestoreEnabled,
651 LogPluginEvent: d.LogPluginEvent, // todo: make private
652 AuthzMiddleware: config.AuthzMiddleware,
655 return nil, errors.Wrap(err, "couldn't create plugin manager")
658 var graphDrivers []string
659 for platform, ds := range d.stores {
660 ls, err := layer.NewStoreFromOptions(layer.StoreOptions{
661 StorePath: config.Root,
662 MetadataStorePathTemplate: filepath.Join(config.Root, "image", "%s", "layerdb"),
663 GraphDriver: ds.graphDriver,
664 GraphDriverOptions: config.GraphOptions,
665 IDMappings: idMappings,
666 PluginGetter: d.PluginStore,
667 ExperimentalEnabled: config.Experimental,
673 ds.graphDriver = ls.DriverName() // As layerstore may set the driver
675 d.stores[platform] = ds
676 graphDrivers = append(graphDrivers, ls.DriverName())
679 if config.DeltaRoot != "" && config.DeltaGraphDriver != "" {
680 ls, err := layer.NewStoreFromOptions(layer.StoreOptions{
681 StorePath: config.DeltaRoot,
682 MetadataStorePathTemplate: filepath.Join(config.DeltaRoot, "image", "%s", "layerdb"),
683 GraphDriver: config.DeltaGraphDriver,
684 GraphDriverOptions: config.DeltaGraphOptions,
685 IDMappings: idMappings,
687 ExperimentalEnabled: false,
688 Platform: runtime.GOOS,
694 imageRoot := filepath.Join(config.DeltaRoot, "image", ls.DriverName())
695 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
701 is, err = image.NewImageStore(ifs, runtime.GOOS, ls)
706 d.deltaStore = &daemonStore{
707 graphDriver: ls.DriverName(),
708 imageRoot: imageRoot,
712 graphDrivers = append(graphDrivers, ls.DriverName())
715 // Configure and validate the kernels security support
716 if err := configureKernelSecuritySupport(config, graphDrivers); err != nil {
720 logrus.Debugf("Max Concurrent Downloads: %d", *config.MaxConcurrentDownloads)
721 lsMap := make(map[string]layer.Store)
722 for platform, ds := range d.stores {
723 lsMap[platform] = ds.layerStore
725 d.downloadManager = xfer.NewLayerDownloadManager(lsMap, *config.MaxConcurrentDownloads)
726 logrus.Debugf("Max Concurrent Uploads: %d", *config.MaxConcurrentUploads)
727 d.uploadManager = xfer.NewLayerUploadManager(*config.MaxConcurrentUploads)
729 for platform, ds := range d.stores {
730 imageRoot := filepath.Join(config.Root, "image", ds.graphDriver)
731 ifs, err := image.NewFSStoreBackend(filepath.Join(imageRoot, "imagedb"))
737 is, err = image.NewImageStore(ifs, platform, ds.layerStore)
741 ds.imageRoot = imageRoot
743 d.stores[platform] = ds
746 // Configure the volumes driver
747 volStore, err := d.configureVolumes(rootIDs)
752 trustKey, err := api.LoadOrCreateTrustKey(config.TrustKeyPath)
757 trustDir := filepath.Join(config.Root, "trust")
759 if err := system.MkdirAll(trustDir, 0700, ""); err != nil {
763 eventsService := events.New()
765 for platform, ds := range d.stores {
766 dms, err := dmetadata.NewFSMetadataStore(filepath.Join(ds.imageRoot, "distribution"), platform)
771 rs, err := refstore.NewReferenceStore(filepath.Join(ds.imageRoot, "repositories.json"), platform)
773 return nil, fmt.Errorf("Couldn't create Tag store repositories: %s", err)
775 ds.distributionMetadataStore = dms
776 ds.referenceStore = rs
777 d.stores[platform] = ds
779 // No content-addressability migration on Windows as it never supported pre-CA
780 if runtime.GOOS != "windows" {
781 migrationStart := time.Now()
782 if err := v1.Migrate(config.Root, ds.graphDriver, ds.layerStore, ds.imageStore, rs, dms); err != nil {
783 logrus.Errorf("Graph migration failed: %q. Your old graph data was found to be too inconsistent for upgrading to content-addressable storage. Some of the old data was probably not upgraded. We recommend starting over with a clean storage directory if possible.", err)
785 logrus.Infof("Graph migration to content-addressability took %.2f seconds", time.Since(migrationStart).Seconds())
789 // Discovery is only enabled when the daemon is launched with an address to advertise. When
790 // initialized, the daemon is registered and we can store the discovery backend as it's read-only
791 if err := d.initDiscovery(config); err != nil {
795 sysInfo := sysinfo.New(false)
796 // Check if Devices cgroup is mounted, it is hard requirement for container security,
798 if runtime.GOOS == "linux" && !sysInfo.CgroupDevicesEnabled {
799 return nil, errors.New("Devices cgroup isn't mounted")
802 d.ID = trustKey.PublicKey().KeyID()
803 d.repository = daemonRepo
804 d.containers = container.NewMemoryStore()
805 if d.containersReplica, err = container.NewViewDB(); err != nil {
808 d.execCommands = exec.NewStore()
809 d.trustKey = trustKey
810 d.idIndex = truncindex.NewTruncIndex([]string{})
811 d.statsCollector = d.newStatsCollector(1 * time.Second)
812 d.defaultLogConfig = containertypes.LogConfig{
813 Type: config.LogConfig.Type,
814 Config: config.LogConfig.Config,
816 d.EventsService = eventsService
819 d.idMappings = idMappings
820 d.seccompEnabled = sysInfo.Seccomp
821 d.apparmorEnabled = sysInfo.AppArmor
823 d.nameIndex = registrar.NewRegistrar()
824 d.linkIndex = newLinkIndex()
825 d.containerdRemote = containerdRemote
829 d.containerd, err = containerdRemote.Client(d)
834 if err := d.restore(); err != nil {
839 // FIXME: this method never returns an error
840 info, _ := d.SystemInfo()
842 engineInfo.WithValues(
843 dockerversion.Version,
844 dockerversion.GitCommit,
848 info.OperatingSystem,
852 engineCpus.Set(float64(info.NCPU))
853 engineMemory.Set(float64(info.MemTotal))
856 for platform, ds := range d.stores {
861 if len(d.stores) > 1 {
862 gd = fmt.Sprintf("%s (%s)", gd, platform)
865 logrus.WithFields(logrus.Fields{
866 "version": dockerversion.Version,
867 "commit": dockerversion.GitCommit,
868 "graphdriver(s)": gd,
869 }).Info("Docker daemon")
874 func (daemon *Daemon) waitForStartupDone() {
878 func (daemon *Daemon) shutdownContainer(c *container.Container) error {
879 stopTimeout := c.StopTimeout()
880 // TODO(windows): Handle docker restart with paused containers
882 // To terminate a process in freezer cgroup, we should send
883 // SIGTERM to this process then unfreeze it, and the process will
884 // force to terminate immediately.
885 logrus.Debugf("Found container %s is paused, sending SIGTERM before unpausing it", c.ID)
886 sig, ok := signal.SignalMap["TERM"]
888 return errors.New("System does not support SIGTERM")
890 if err := daemon.kill(c, int(sig)); err != nil {
891 return fmt.Errorf("sending SIGTERM to container %s with error: %v", c.ID, err)
893 if err := daemon.containerUnpause(c); err != nil {
894 return fmt.Errorf("Failed to unpause container %s with error: %v", c.ID, err)
897 ctx, cancel := context.WithTimeout(context.Background(), time.Duration(stopTimeout)*time.Second)
900 // Wait with timeout for container to exit.
901 if status := <-c.Wait(ctx, container.WaitConditionNotRunning); status.Err() != nil {
902 logrus.Debugf("container %s failed to exit in %d second of SIGTERM, sending SIGKILL to force", c.ID, stopTimeout)
903 sig, ok := signal.SignalMap["KILL"]
905 return errors.New("System does not support SIGKILL")
907 if err := daemon.kill(c, int(sig)); err != nil {
908 logrus.Errorf("Failed to SIGKILL container %s", c.ID)
910 // Wait for exit again without a timeout.
911 // Explicitly ignore the result.
912 _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning)
916 // If container failed to exit in stopTimeout seconds of SIGTERM, then using the force
917 if err := daemon.containerStop(c, stopTimeout); err != nil {
918 return fmt.Errorf("Failed to stop container %s with error: %v", c.ID, err)
921 // Wait without timeout for the container to exit.
922 // Ignore the result.
923 _ = <-c.Wait(context.Background(), container.WaitConditionNotRunning)
927 // ShutdownTimeout returns the shutdown timeout based on the max stopTimeout of the containers,
928 // and is limited by daemon's ShutdownTimeout.
929 func (daemon *Daemon) ShutdownTimeout() int {
930 // By default we use daemon's ShutdownTimeout.
931 shutdownTimeout := daemon.configStore.ShutdownTimeout
934 if daemon.containers != nil {
935 for _, c := range daemon.containers.List() {
936 if shutdownTimeout >= 0 {
937 stopTimeout := c.StopTimeout()
941 if stopTimeout+graceTimeout > shutdownTimeout {
942 shutdownTimeout = stopTimeout + graceTimeout
948 return shutdownTimeout
951 // Shutdown stops the daemon.
952 func (daemon *Daemon) Shutdown() error {
953 daemon.shutdown = true
954 // Keep mounts and networking running on daemon shutdown if
955 // we are to keep containers running and restore them.
957 if daemon.configStore.LiveRestoreEnabled && daemon.containers != nil {
958 // check if there are any running containers, if none we should do some cleanup
959 if ls, err := daemon.Containers(&types.ContainerListOptions{}); len(ls) != 0 || err != nil {
960 // metrics plugins still need some cleanup
961 daemon.cleanupMetricsPlugins()
966 if daemon.containers != nil {
967 logrus.Debugf("start clean shutdown of all containers with a %d seconds timeout...", daemon.configStore.ShutdownTimeout)
968 daemon.containers.ApplyAll(func(c *container.Container) {
972 logrus.Debugf("stopping %s", c.ID)
973 if err := daemon.shutdownContainer(c); err != nil {
974 logrus.Errorf("Stop container error: %v", err)
977 if mountid, err := daemon.stores[c.Platform].layerStore.GetMountID(c.ID); err == nil {
978 daemon.cleanupMountsByID(mountid)
980 logrus.Debugf("container stopped %s", c.ID)
984 if daemon.volumes != nil {
985 if err := daemon.volumes.Shutdown(); err != nil {
986 logrus.Errorf("Error shutting down volume store: %v", err)
990 for platform, ds := range daemon.stores {
991 if ds.layerStore != nil {
992 if err := ds.layerStore.Cleanup(); err != nil {
993 logrus.Errorf("Error during layer Store.Cleanup(): %v %s", err, platform)
998 // If we are part of a cluster, clean up cluster's stuff
999 if daemon.clusterProvider != nil {
1000 logrus.Debugf("start clean shutdown of cluster resources...")
1001 daemon.DaemonLeavesCluster()
1004 daemon.cleanupMetricsPlugins()
1006 // Shutdown plugins after containers and layerstore. Don't change the order.
1007 daemon.pluginShutdown()
1009 // trigger libnetwork Stop only if it's initialized
1010 if daemon.netController != nil {
1011 daemon.netController.Stop()
1014 if err := daemon.cleanupMounts(); err != nil {
1021 // Mount sets container.BaseFS
1022 // (is it not set coming in? why is it unset?)
1023 func (daemon *Daemon) Mount(container *container.Container) error {
1024 dir, err := container.RWLayer.Mount(container.GetMountLabel())
1028 logrus.Debugf("container mounted via layerStore: %v", dir)
1030 if container.BaseFS != dir {
1031 // The mount path reported by the graph driver should always be trusted on Windows, since the
1032 // volume path for a given mounted layer may change over time. This should only be an error
1033 // on non-Windows operating systems.
1034 if container.BaseFS != "" && runtime.GOOS != "windows" {
1035 daemon.Unmount(container)
1036 return fmt.Errorf("Error: driver %s is returning inconsistent paths for container %s ('%s' then '%s')",
1037 daemon.GraphDriverName(container.Platform), container.ID, container.BaseFS, dir)
1040 container.BaseFS = dir // TODO: combine these fields
1044 // Unmount unsets the container base filesystem
1045 func (daemon *Daemon) Unmount(container *container.Container) error {
1046 if err := container.RWLayer.Unmount(); err != nil {
1047 logrus.Errorf("Error unmounting container %s: %s", container.ID, err)
1054 // Subnets return the IPv4 and IPv6 subnets of networks that are manager by Docker.
1055 func (daemon *Daemon) Subnets() ([]net.IPNet, []net.IPNet) {
1056 var v4Subnets []net.IPNet
1057 var v6Subnets []net.IPNet
1059 managedNetworks := daemon.netController.Networks()
1061 for _, managedNetwork := range managedNetworks {
1062 v4infos, v6infos := managedNetwork.Info().IpamInfo()
1063 for _, info := range v4infos {
1064 if info.IPAMData.Pool != nil {
1065 v4Subnets = append(v4Subnets, *info.IPAMData.Pool)
1068 for _, info := range v6infos {
1069 if info.IPAMData.Pool != nil {
1070 v6Subnets = append(v6Subnets, *info.IPAMData.Pool)
1075 return v4Subnets, v6Subnets
1078 // GraphDriverName returns the name of the graph driver used by the layer.Store
1079 func (daemon *Daemon) GraphDriverName(platform string) string {
1080 return daemon.stores[platform].layerStore.DriverName()
1083 // prepareTempDir prepares and returns the default directory to use
1084 // for temporary files.
1085 // If it doesn't exist, it is created. If it exists, its content is removed.
1086 func prepareTempDir(rootDir string, rootIDs idtools.IDPair) (string, error) {
1088 if tmpDir = os.Getenv("DOCKER_TMPDIR"); tmpDir == "" {
1089 tmpDir = filepath.Join(rootDir, "tmp")
1090 newName := tmpDir + "-old"
1091 if err := os.Rename(tmpDir, newName); err == nil {
1093 if err := os.RemoveAll(newName); err != nil {
1094 logrus.Warnf("failed to delete old tmp directory: %s", newName)
1098 logrus.Warnf("failed to rename %s for background deletion: %s. Deleting synchronously", tmpDir, err)
1099 if err := os.RemoveAll(tmpDir); err != nil {
1100 logrus.Warnf("failed to delete old tmp directory: %s", tmpDir)
1104 // We don't remove the content of tmpdir if it's not the default,
1105 // it may hold things that do not belong to us.
1106 return tmpDir, idtools.MkdirAllAndChown(tmpDir, 0700, rootIDs)
1109 func (daemon *Daemon) setupInitLayer(initPath string) error {
1110 rootIDs := daemon.idMappings.RootPair()
1111 return initlayer.Setup(initPath, rootIDs)
1114 func setDefaultMtu(conf *config.Config) {
1115 // do nothing if the config does not have the default 0 value.
1119 conf.Mtu = config.DefaultNetworkMtu
1122 func (daemon *Daemon) configureVolumes(rootIDs idtools.IDPair) (*store.VolumeStore, error) {
1123 volumesDriver, err := local.New(daemon.configStore.Root, rootIDs)
1128 volumedrivers.RegisterPluginGetter(daemon.PluginStore)
1130 if !volumedrivers.Register(volumesDriver, volumesDriver.Name()) {
1131 return nil, errors.New("local volume driver could not be registered")
1133 return store.New(daemon.configStore.Root)
1136 // IsShuttingDown tells whether the daemon is shutting down or not
1137 func (daemon *Daemon) IsShuttingDown() bool {
1138 return daemon.shutdown
1141 // initDiscovery initializes the discovery watcher for this daemon.
1142 func (daemon *Daemon) initDiscovery(conf *config.Config) error {
1143 advertise, err := config.ParseClusterAdvertiseSettings(conf.ClusterStore, conf.ClusterAdvertise)
1145 if err == discovery.ErrDiscoveryDisabled {
1151 conf.ClusterAdvertise = advertise
1152 discoveryWatcher, err := discovery.Init(conf.ClusterStore, conf.ClusterAdvertise, conf.ClusterOpts)
1154 return fmt.Errorf("discovery initialization failed (%v)", err)
1157 daemon.discoveryWatcher = discoveryWatcher
1161 func isBridgeNetworkDisabled(conf *config.Config) bool {
1162 return conf.BridgeConfig.Iface == config.DisableNetworkBridge
1165 func (daemon *Daemon) networkOptions(dconfig *config.Config, pg plugingetter.PluginGetter, activeSandboxes map[string]interface{}) ([]nwconfig.Option, error) {
1166 options := []nwconfig.Option{}
1171 options = append(options, nwconfig.OptionExperimental(dconfig.Experimental))
1172 options = append(options, nwconfig.OptionDataDir(dconfig.Root))
1173 options = append(options, nwconfig.OptionExecRoot(dconfig.GetExecRoot()))
1175 dd := runconfig.DefaultDaemonNetworkMode()
1176 dn := runconfig.DefaultDaemonNetworkMode().NetworkName()
1177 options = append(options, nwconfig.OptionDefaultDriver(string(dd)))
1178 options = append(options, nwconfig.OptionDefaultNetwork(dn))
1180 if strings.TrimSpace(dconfig.ClusterStore) != "" {
1181 kv := strings.Split(dconfig.ClusterStore, "://")
1183 return nil, errors.New("kv store daemon config must be of the form KV-PROVIDER://KV-URL")
1185 options = append(options, nwconfig.OptionKVProvider(kv[0]))
1186 options = append(options, nwconfig.OptionKVProviderURL(kv[1]))
1188 if len(dconfig.ClusterOpts) > 0 {
1189 options = append(options, nwconfig.OptionKVOpts(dconfig.ClusterOpts))
1192 if daemon.discoveryWatcher != nil {
1193 options = append(options, nwconfig.OptionDiscoveryWatcher(daemon.discoveryWatcher))
1196 if dconfig.ClusterAdvertise != "" {
1197 options = append(options, nwconfig.OptionDiscoveryAddress(dconfig.ClusterAdvertise))
1200 options = append(options, nwconfig.OptionLabels(dconfig.Labels))
1201 options = append(options, driverOptions(dconfig)...)
1203 if daemon.configStore != nil && daemon.configStore.LiveRestoreEnabled && len(activeSandboxes) != 0 {
1204 options = append(options, nwconfig.OptionActiveSandboxes(activeSandboxes))
1208 options = append(options, nwconfig.OptionPluginGetter(pg))
1214 func copyBlkioEntry(entries []*containerd.BlkioStatsEntry) []types.BlkioStatEntry {
1215 out := make([]types.BlkioStatEntry, len(entries))
1216 for i, re := range entries {
1217 out[i] = types.BlkioStatEntry{
1227 // GetCluster returns the cluster
1228 func (daemon *Daemon) GetCluster() Cluster {
1229 return daemon.cluster
1232 // SetCluster sets the cluster
1233 func (daemon *Daemon) SetCluster(cluster Cluster) {
1234 daemon.cluster = cluster
1237 func (daemon *Daemon) pluginShutdown() {
1238 manager := daemon.pluginManager
1239 // Check for a valid manager object. In error conditions, daemon init can fail
1240 // and shutdown called, before plugin manager is initialized.
1246 // PluginManager returns current pluginManager associated with the daemon
1247 func (daemon *Daemon) PluginManager() *plugin.Manager { // set up before daemon to avoid this method
1248 return daemon.pluginManager
1251 // PluginGetter returns current pluginStore associated with the daemon
1252 func (daemon *Daemon) PluginGetter() *plugin.Store {
1253 return daemon.PluginStore
1256 // CreateDaemonRoot creates the root for the daemon
1257 func CreateDaemonRoot(config *config.Config) error {
1258 // get the canonical path to the Docker root directory
1260 if _, err := os.Stat(config.Root); err != nil && os.IsNotExist(err) {
1261 realRoot = config.Root
1263 realRoot, err = getRealPath(config.Root)
1265 return fmt.Errorf("Unable to get the full path to root (%s): %s", config.Root, err)
1269 idMappings, err := setupRemappedRoot(config)
1273 return setupDaemonRoot(config, realRoot, idMappings.RootPair())
1276 // checkpointAndSave grabs a container lock to safely call container.CheckpointTo
1277 func (daemon *Daemon) checkpointAndSave(container *container.Container) error {
1279 defer container.Unlock()
1280 if err := container.CheckpointTo(daemon.containersReplica); err != nil {
1281 return fmt.Errorf("Error saving container state: %v", err)